aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/barrier.txt6
-rw-r--r--Documentation/block/biodoc.txt10
-rw-r--r--Documentation/block/request.txt2
-rw-r--r--Documentation/gpio.txt4
-rw-r--r--Documentation/iostats.txt2
-rw-r--r--Documentation/lguest/Makefile3
-rw-r--r--Documentation/lguest/lguest.c84
-rw-r--r--arch/arm/plat-omap/mailbox.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
-rw-r--r--arch/sparc/defconfig2
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/lib/memset.S2
-rw-r--r--arch/sparc/prom/printf.c2
-rw-r--r--arch/sparc64/defconfig32
-rw-r--r--arch/sparc64/kernel/head.S11
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/drivers/mconsole_kern.c4
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/physmem.c15
-rw-r--r--arch/um/kernel/skas/process.c4
-rw-r--r--arch/um/os-Linux/aio.c47
-rw-r--r--arch/um/os-Linux/process.c2
-rw-r--r--arch/um/os-Linux/user_syms.c5
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--block/as-iosched.c26
-rw-r--r--block/blktrace.c10
-rw-r--r--block/bsg.c12
-rw-r--r--block/cfq-iosched.c39
-rw-r--r--block/deadline-iosched.c18
-rw-r--r--block/elevator.c75
-rw-r--r--block/ll_rw_blk.c215
-rw-r--r--block/noop-iosched.c14
-rw-r--r--block/scsi_ioctl.c24
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/acorn/block/mfmhd.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/cpqarray.c6
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/lguest_blk.c2
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps2esdi.c4
-rw-r--r--drivers/block/ps3disk.c8
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/sx8.c20
-rw-r--r--drivers/block/ub.c6
-rw-r--r--drivers/block/umem.c6
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xd.h2
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/xsysace.c4
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig24
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/legacy/hd.c2
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c18
-rw-r--r--drivers/message/i2o/i2o_block.c4
-rw-r--r--drivers/misc/asus-laptop.c32
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/net/82596.c1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c6
-rw-r--r--drivers/net/atl1/atl1_hw.h9
-rw-r--r--drivers/net/atl1/atl1_main.c28
-rw-r--r--drivers/net/defxx.c17
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c22
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/netxen/netxen_nic.h3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c48
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/ps3_gelic_net.c215
-rw-r--r--drivers/net/ps3_gelic_net.h24
-rw-r--r--drivers/net/ucc_geth.c334
-rw-r--r--drivers/net/ucc_geth.h6
-rw-r--r--drivers/net/ucc_geth_ethtool.c388
-rw-r--r--drivers/net/ucc_geth_mii.c6
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/sbus/char/Kconfig1
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/8250.c5
-rw-r--r--drivers/serial/8250_early.c10
-rw-r--r--drivers/serial/serial_core.c9
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--fs/bio.c30
-rw-r--r--fs/open.c2
-rw-r--r--include/asm-arm/arch-omap/mailbox.h2
-rw-r--r--include/asm-xtensa/io.h1
-rw-r--r--include/linux/blkdev.h141
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/elevator.h76
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/raid/md_k.h4
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/scsi/sd.h2
-rw-r--r--lib/fault-inject.c4
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/slab.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bridge/netfilter/ebt_log.c7
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c6
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c6
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c1
-rw-r--r--net/netfilter/xt_physdev.c1
-rw-r--r--net/netlink/genetlink.c28
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--security/selinux/ss/services.c4
154 files changed, 1573 insertions, 1022 deletions
diff --git a/Documentation/block/barrier.txt b/Documentation/block/barrier.txt
index 7d279f2f5bb..2c2f24f634e 100644
--- a/Documentation/block/barrier.txt
+++ b/Documentation/block/barrier.txt
@@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is
79used to indicate the whole sequence of performing barrier requests 79used to indicate the whole sequence of performing barrier requests
80including draining and flushing. 80including draining and flushing.
81 81
82typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq); 82typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
83 83
84int blk_queue_ordered(request_queue_t *q, unsigned ordered, 84int blk_queue_ordered(struct request_queue *q, unsigned ordered,
85 prepare_flush_fn *prepare_flush_fn); 85 prepare_flush_fn *prepare_flush_fn);
86 86
87@q : the queue in question 87@q : the queue in question
@@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
92For example, SCSI disk driver's prepare_flush_fn looks like the 92For example, SCSI disk driver's prepare_flush_fn looks like the
93following. 93following.
94 94
95static void sd_prepare_flush(request_queue_t *q, struct request *rq) 95static void sd_prepare_flush(struct request_queue *q, struct request *rq)
96{ 96{
97 memset(rq->cmd, 0, sizeof(rq->cmd)); 97 memset(rq->cmd, 0, sizeof(rq->cmd));
98 rq->cmd_type = REQ_TYPE_BLOCK_PC; 98 rq->cmd_type = REQ_TYPE_BLOCK_PC;
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 3adaace328a..8af392fc6ef 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command
740queueing (typically known as tagged command queueing), ie manage more than 740queueing (typically known as tagged command queueing), ie manage more than
741one outstanding command on a queue at any given time. 741one outstanding command on a queue at any given time.
742 742
743 blk_queue_init_tags(request_queue_t *q, int depth) 743 blk_queue_init_tags(struct request_queue *q, int depth)
744 744
745 Initialize internal command tagging structures for a maximum 745 Initialize internal command tagging structures for a maximum
746 depth of 'depth'. 746 depth of 'depth'.
747 747
748 blk_queue_free_tags((request_queue_t *q) 748 blk_queue_free_tags((struct request_queue *q)
749 749
750 Teardown tag info associated with the queue. This will be done 750 Teardown tag info associated with the queue. This will be done
751 automatically by block if blk_queue_cleanup() is called on a queue 751 automatically by block if blk_queue_cleanup() is called on a queue
@@ -754,7 +754,7 @@ one outstanding command on a queue at any given time.
754The above are initialization and exit management, the main helpers during 754The above are initialization and exit management, the main helpers during
755normal operations are: 755normal operations are:
756 756
757 blk_queue_start_tag(request_queue_t *q, struct request *rq) 757 blk_queue_start_tag(struct request_queue *q, struct request *rq)
758 758
759 Start tagged operation for this request. A free tag number between 759 Start tagged operation for this request. A free tag number between
760 0 and 'depth' is assigned to the request (rq->tag holds this number), 760 0 and 'depth' is assigned to the request (rq->tag holds this number),
@@ -762,7 +762,7 @@ normal operations are:
762 for this queue is already achieved (or if the tag wasn't started for 762 for this queue is already achieved (or if the tag wasn't started for
763 some other reason), 1 is returned. Otherwise 0 is returned. 763 some other reason), 1 is returned. Otherwise 0 is returned.
764 764
765 blk_queue_end_tag(request_queue_t *q, struct request *rq) 765 blk_queue_end_tag(struct request_queue *q, struct request *rq)
766 766
767 End tagged operation on this request. 'rq' is removed from the internal 767 End tagged operation on this request. 'rq' is removed from the internal
768 book keeping structures. 768 book keeping structures.
@@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both
781the hardware and software block queue and enable the driver to sanely restart 781the hardware and software block queue and enable the driver to sanely restart
782all the outstanding requests. There's a third helper to do that: 782all the outstanding requests. There's a third helper to do that:
783 783
784 blk_queue_invalidate_tags(request_queue_t *q) 784 blk_queue_invalidate_tags(struct request_queue *q)
785 785
786 Clear the internal block tag queue and re-add all the pending requests 786 Clear the internal block tag queue and re-add all the pending requests
787 to the request queue. The driver will receive them again on the 787 to the request queue. The driver will receive them again on the
diff --git a/Documentation/block/request.txt b/Documentation/block/request.txt
index 75924e2a697..fff58acb40a 100644
--- a/Documentation/block/request.txt
+++ b/Documentation/block/request.txt
@@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request
83 83
84struct bio *biotail DBI Last bio in request 84struct bio *biotail DBI Last bio in request
85 85
86request_queue_t *q DB Request queue this request belongs to 86struct request_queue *q DB Request queue this request belongs to
87 87
88struct request_list *rl B Request list this request came from 88struct request_list *rl B Request list this request came from
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 218a8650f48..6bc2ba215df 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -148,7 +148,7 @@ pin ... that won't always match the specified output value, because of
148issues including wire-OR and output latencies. 148issues including wire-OR and output latencies.
149 149
150The get/set calls have no error returns because "invalid GPIO" should have 150The get/set calls have no error returns because "invalid GPIO" should have
151been reported earlier in gpio_set_direction(). However, note that not all 151been reported earlier from gpio_direction_*(). However, note that not all
152platforms can read the value of output pins; those that can't should always 152platforms can read the value of output pins; those that can't should always
153return zero. Also, using these calls for GPIOs that can't safely be accessed 153return zero. Also, using these calls for GPIOs that can't safely be accessed
154without sleeping (see below) is an error. 154without sleeping (see below) is an error.
@@ -239,7 +239,7 @@ map between them using calls like:
239Those return either the corresponding number in the other namespace, or 239Those return either the corresponding number in the other namespace, or
240else a negative errno code if the mapping can't be done. (For example, 240else a negative errno code if the mapping can't be done. (For example,
241some GPIOs can't used as IRQs.) It is an unchecked error to use a GPIO 241some GPIOs can't used as IRQs.) It is an unchecked error to use a GPIO
242number that hasn't been marked as an input using gpio_set_direction(), or 242number that wasn't set up as an input using gpio_direction_input(), or
243to use an IRQ number that didn't originally come from gpio_to_irq(). 243to use an IRQ number that didn't originally come from gpio_to_irq().
244 244
245These two mapping calls are expected to cost on the order of a single 245These two mapping calls are expected to cost on the order of a single
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 09a1bafe252..b963c3b4afa 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing
79 measured from __make_request() to end_that_request_last()). 79 measured from __make_request() to end_that_request_last()).
80Field 9 -- # of I/Os currently in progress 80Field 9 -- # of I/Os currently in progress
81 The only field that should go to zero. Incremented as requests are 81 The only field that should go to zero. Incremented as requests are
82 given to appropriate request_queue_t and decremented as they finish. 82 given to appropriate struct request_queue and decremented as they finish.
83Field 10 -- # of milliseconds spent doing I/Os 83Field 10 -- # of milliseconds spent doing I/Os
84 This field is increases so long as field 9 is nonzero. 84 This field is increases so long as field 9 is nonzero.
85Field 11 -- weighted # of milliseconds spent doing I/Os 85Field 11 -- weighted # of milliseconds spent doing I/Os
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
index b9b9427376e..31e794ef5f9 100644
--- a/Documentation/lguest/Makefile
+++ b/Documentation/lguest/Makefile
@@ -11,8 +11,7 @@ endif
11include $(KBUILD_OUTPUT)/.config 11include $(KBUILD_OUTPUT)/.config
12LGUEST_GUEST_TOP := ($(CONFIG_PAGE_OFFSET) - 0x08000000) 12LGUEST_GUEST_TOP := ($(CONFIG_PAGE_OFFSET) - 0x08000000)
13 13
14CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 \ 14CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -Wl,-T,lguest.lds
15 -static -DLGUEST_GUEST_TOP="$(LGUEST_GUEST_TOP)" -Wl,-T,lguest.lds
16LDLIBS:=-lz 15LDLIBS:=-lz
17 16
18all: lguest.lds lguest 17all: lguest.lds lguest
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 1432b502a2d..62a8133393e 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -47,12 +47,14 @@ static bool verbose;
47#define verbose(args...) \ 47#define verbose(args...) \
48 do { if (verbose) printf(args); } while(0) 48 do { if (verbose) printf(args); } while(0)
49static int waker_fd; 49static int waker_fd;
50static u32 top;
50 51
51struct device_list 52struct device_list
52{ 53{
53 fd_set infds; 54 fd_set infds;
54 int max_infd; 55 int max_infd;
55 56
57 struct lguest_device_desc *descs;
56 struct device *dev; 58 struct device *dev;
57 struct device **lastdev; 59 struct device **lastdev;
58}; 60};
@@ -324,8 +326,7 @@ static void concat(char *dst, char *args[])
324static int tell_kernel(u32 pgdir, u32 start, u32 page_offset) 326static int tell_kernel(u32 pgdir, u32 start, u32 page_offset)
325{ 327{
326 u32 args[] = { LHREQ_INITIALIZE, 328 u32 args[] = { LHREQ_INITIALIZE,
327 LGUEST_GUEST_TOP/getpagesize(), /* Just below us */ 329 top/getpagesize(), pgdir, start, page_offset };
328 pgdir, start, page_offset };
329 int fd; 330 int fd;
330 331
331 fd = open_or_die("/dev/lguest", O_RDWR); 332 fd = open_or_die("/dev/lguest", O_RDWR);
@@ -382,7 +383,7 @@ static int setup_waker(int lguest_fd, struct device_list *device_list)
382static void *_check_pointer(unsigned long addr, unsigned int size, 383static void *_check_pointer(unsigned long addr, unsigned int size,
383 unsigned int line) 384 unsigned int line)
384{ 385{
385 if (addr >= LGUEST_GUEST_TOP || addr + size >= LGUEST_GUEST_TOP) 386 if (addr >= top || addr + size >= top)
386 errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr); 387 errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr);
387 return (void *)addr; 388 return (void *)addr;
388} 389}
@@ -629,24 +630,26 @@ static void handle_input(int fd, struct device_list *devices)
629 } 630 }
630} 631}
631 632
632static struct lguest_device_desc *new_dev_desc(u16 type, u16 features, 633static struct lguest_device_desc *
633 u16 num_pages) 634new_dev_desc(struct lguest_device_desc *descs,
635 u16 type, u16 features, u16 num_pages)
634{ 636{
635 static unsigned long top = LGUEST_GUEST_TOP; 637 unsigned int i;
636 struct lguest_device_desc *desc;
637 638
638 desc = malloc(sizeof(*desc)); 639 for (i = 0; i < LGUEST_MAX_DEVICES; i++) {
639 desc->type = type; 640 if (!descs[i].type) {
640 desc->num_pages = num_pages; 641 descs[i].type = type;
641 desc->features = features; 642 descs[i].features = features;
642 desc->status = 0; 643 descs[i].num_pages = num_pages;
643 if (num_pages) { 644 if (num_pages) {
644 top -= num_pages*getpagesize(); 645 map_zeroed_pages(top, num_pages);
645 map_zeroed_pages(top, num_pages); 646 descs[i].pfn = top/getpagesize();
646 desc->pfn = top / getpagesize(); 647 top += num_pages*getpagesize();
647 } else 648 }
648 desc->pfn = 0; 649 return &descs[i];
649 return desc; 650 }
651 }
652 errx(1, "too many devices");
650} 653}
651 654
652static struct device *new_device(struct device_list *devices, 655static struct device *new_device(struct device_list *devices,
@@ -669,7 +672,7 @@ static struct device *new_device(struct device_list *devices,
669 dev->fd = fd; 672 dev->fd = fd;
670 if (handle_input) 673 if (handle_input)
671 set_fd(dev->fd, devices); 674 set_fd(dev->fd, devices);
672 dev->desc = new_dev_desc(type, features, num_pages); 675 dev->desc = new_dev_desc(devices->descs, type, features, num_pages);
673 dev->mem = (void *)(dev->desc->pfn * getpagesize()); 676 dev->mem = (void *)(dev->desc->pfn * getpagesize());
674 dev->handle_input = handle_input; 677 dev->handle_input = handle_input;
675 dev->watch_key = (unsigned long)dev->mem + watch_off; 678 dev->watch_key = (unsigned long)dev->mem + watch_off;
@@ -866,30 +869,6 @@ static void setup_tun_net(const char *arg, struct device_list *devices)
866 verbose("attached to bridge: %s\n", br_name); 869 verbose("attached to bridge: %s\n", br_name);
867} 870}
868 871
869/* Now we know how much memory we have, we copy in device descriptors */
870static void map_device_descriptors(struct device_list *devs, unsigned long mem)
871{
872 struct device *i;
873 unsigned int num;
874 struct lguest_device_desc *descs;
875
876 /* Device descriptor array sits just above top of normal memory */
877 descs = map_zeroed_pages(mem, 1);
878
879 for (i = devs->dev, num = 0; i; i = i->next, num++) {
880 if (num == LGUEST_MAX_DEVICES)
881 errx(1, "too many devices");
882 verbose("Device %i: %s\n", num,
883 i->desc->type == LGUEST_DEVICE_T_NET ? "net"
884 : i->desc->type == LGUEST_DEVICE_T_CONSOLE ? "console"
885 : i->desc->type == LGUEST_DEVICE_T_BLOCK ? "block"
886 : "unknown");
887 descs[num] = *i->desc;
888 free(i->desc);
889 i->desc = &descs[num];
890 }
891}
892
893static void __attribute__((noreturn)) 872static void __attribute__((noreturn))
894run_guest(int lguest_fd, struct device_list *device_list) 873run_guest(int lguest_fd, struct device_list *device_list)
895{ 874{
@@ -934,8 +913,8 @@ static void usage(void)
934 913
935int main(int argc, char *argv[]) 914int main(int argc, char *argv[])
936{ 915{
937 unsigned long mem, pgdir, start, page_offset, initrd_size = 0; 916 unsigned long mem = 0, pgdir, start, page_offset, initrd_size = 0;
938 int c, lguest_fd; 917 int i, c, lguest_fd;
939 struct device_list device_list; 918 struct device_list device_list;
940 void *boot = (void *)0; 919 void *boot = (void *)0;
941 const char *initrd_name = NULL; 920 const char *initrd_name = NULL;
@@ -945,6 +924,15 @@ int main(int argc, char *argv[])
945 device_list.lastdev = &device_list.dev; 924 device_list.lastdev = &device_list.dev;
946 FD_ZERO(&device_list.infds); 925 FD_ZERO(&device_list.infds);
947 926
927 /* We need to know how much memory so we can allocate devices. */
928 for (i = 1; i < argc; i++) {
929 if (argv[i][0] != '-') {
930 mem = top = atoi(argv[i]) * 1024 * 1024;
931 device_list.descs = map_zeroed_pages(top, 1);
932 top += getpagesize();
933 break;
934 }
935 }
948 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { 936 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
949 switch (c) { 937 switch (c) {
950 case 'v': 938 case 'v':
@@ -974,16 +962,12 @@ int main(int argc, char *argv[])
974 setup_console(&device_list); 962 setup_console(&device_list);
975 963
976 /* First we map /dev/zero over all of guest-physical memory. */ 964 /* First we map /dev/zero over all of guest-physical memory. */
977 mem = atoi(argv[optind]) * 1024 * 1024;
978 map_zeroed_pages(0, mem / getpagesize()); 965 map_zeroed_pages(0, mem / getpagesize());
979 966
980 /* Now we load the kernel */ 967 /* Now we load the kernel */
981 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY), 968 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY),
982 &page_offset); 969 &page_offset);
983 970
984 /* Write the device descriptors into memory. */
985 map_device_descriptors(&device_list, mem);
986
987 /* Map the initrd image if requested */ 971 /* Map the initrd image if requested */
988 if (initrd_name) { 972 if (initrd_name) {
989 initrd_size = load_initrd(initrd_name, mem); 973 initrd_size = load_initrd(initrd_name, mem);
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index de7e6ef48bd..0360b1f14d1 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work)
161/* 161/*
162 * Mailbox interrupt handler 162 * Mailbox interrupt handler
163 */ 163 */
164static void mbox_txq_fn(request_queue_t * q) 164static void mbox_txq_fn(struct request_queue * q)
165{ 165{
166} 166}
167 167
168static void mbox_rxq_fn(request_queue_t * q) 168static void mbox_rxq_fn(struct request_queue * q)
169{ 169{
170} 170}
171 171
@@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
180{ 180{
181 struct request *rq; 181 struct request *rq;
182 mbox_msg_t msg; 182 mbox_msg_t msg;
183 request_queue_t *q = mbox->rxq->queue; 183 struct request_queue *q = mbox->rxq->queue;
184 184
185 disable_mbox_irq(mbox, IRQ_RX); 185 disable_mbox_irq(mbox, IRQ_RX);
186 186
@@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
297 request_fn_proc * proc, 297 request_fn_proc * proc,
298 void (*work) (struct work_struct *)) 298 void (*work) (struct work_struct *))
299{ 299{
300 request_queue_t *q; 300 struct request_queue *q;
301 struct omap_mbox_queue *mq; 301 struct omap_mbox_queue *mq;
302 302
303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); 303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8b20c0c1556..2bfdeb8ea8b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -40,13 +40,10 @@ enum {
40struct spu_context_ops; 40struct spu_context_ops;
41struct spu_gang; 41struct spu_gang;
42 42
43enum {
44 SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
45};
46
47/* ctx->sched_flags */ 43/* ctx->sched_flags */
48enum { 44enum {
49 SPU_SCHED_NOTIFY_ACTIVE, 45 SPU_SCHED_NOTIFY_ACTIVE,
46 SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
50}; 47};
51 48
52struct spu_context { 49struct spu_context {
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
index 38bd79fe6e7..fdc67238408 100644
--- a/arch/sparc/defconfig
+++ b/arch/sparc/defconfig
@@ -600,7 +600,7 @@ CONFIG_LEGACY_PTY_COUNT=256
600# CONFIG_IPMI_HANDLER is not set 600# CONFIG_IPMI_HANDLER is not set
601# CONFIG_WATCHDOG is not set 601# CONFIG_WATCHDOG is not set
602CONFIG_HW_RANDOM=m 602CONFIG_HW_RANDOM=m
603CONFIG_RTC=m 603CONFIG_JS_RTC=m
604# CONFIG_R3964 is not set 604# CONFIG_R3964 is not set
605# CONFIG_APPLICOM is not set 605# CONFIG_APPLICOM is not set
606# CONFIG_DRM is not set 606# CONFIG_DRM is not set
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 55bac516dfe..7b4abde4302 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -161,6 +161,8 @@ EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
161EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl)); 161EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
162EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one)); 162EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
163 163
164EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
165
164#ifdef CONFIG_SBUS 166#ifdef CONFIG_SBUS
165EXPORT_SYMBOL(sbus_root); 167EXPORT_SYMBOL(sbus_root);
166EXPORT_SYMBOL(dma_chain); 168EXPORT_SYMBOL(dma_chain);
@@ -260,6 +262,7 @@ EXPORT_SYMBOL(__memmove);
260/* Moving data to/from userspace. */ 262/* Moving data to/from userspace. */
261EXPORT_SYMBOL(__copy_user); 263EXPORT_SYMBOL(__copy_user);
262EXPORT_SYMBOL(__strncpy_from_user); 264EXPORT_SYMBOL(__strncpy_from_user);
265EXPORT_SYMBOL(__strnlen_user);
263 266
264/* Networking helper routines. */ 267/* Networking helper routines. */
265EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); 268EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 47583887abc..15109c156e8 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -35,6 +35,8 @@ SECTIONS
35 __ex_table : { *(__ex_table) } 35 __ex_table : { *(__ex_table) }
36 __stop___ex_table = .; 36 __stop___ex_table = .;
37 37
38 NOTES
39
38 . = ALIGN(4096); 40 . = ALIGN(4096);
39 __init_begin = .; 41 __init_begin = .;
40 _sinittext = .; 42 _sinittext = .;
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index a65eba41097..1c37ea892de 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -162,7 +162,7 @@ __bzero:
1628: 1628:
163 add %o0, 1, %o0 163 add %o0, 1, %o0
164 subcc %o1, 1, %o1 164 subcc %o1, 1, %o1
165 bne,a 8b 165 bne 8b
166 EX(stb %g3, [%o0 - 1], add %o1, 1) 166 EX(stb %g3, [%o0 - 1], add %o1, 1)
1670: 1670:
168 retl 168 retl
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
index 27fdac99f79..a36ab9c5ee0 100644
--- a/arch/sparc/prom/printf.c
+++ b/arch/sparc/prom/printf.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16 17
17#include <asm/openprom.h> 18#include <asm/openprom.h>
18#include <asm/oplib.h> 19#include <asm/oplib.h>
@@ -44,3 +45,4 @@ prom_printf(char *fmt, ...)
44 45
45 prom_write(ppbuf, i); 46 prom_write(ppbuf, i);
46} 47}
48EXPORT_SYMBOL(prom_printf);
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 10e301970a4..68338a601f7 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,11 +1,12 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22 3# Linux kernel version: 2.6.23-rc1
4# Thu Jul 19 21:30:37 2007 4# Sun Jul 22 19:24:37 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
8CONFIG_GENERIC_TIME=y 8CONFIG_GENERIC_TIME=y
9CONFIG_GENERIC_CMOS_UPDATE=y
9CONFIG_GENERIC_CLOCKEVENTS=y 10CONFIG_GENERIC_CLOCKEVENTS=y
10CONFIG_64BIT=y 11CONFIG_64BIT=y
11CONFIG_MMU=y 12CONFIG_MMU=y
@@ -17,6 +18,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
17# CONFIG_ARCH_HAS_ILOG2_U64 is not set 18# CONFIG_ARCH_HAS_ILOG2_U64 is not set
18CONFIG_AUDIT_ARCH=y 19CONFIG_AUDIT_ARCH=y
19CONFIG_ARCH_NO_VIRT_TO_BUS=y 20CONFIG_ARCH_NO_VIRT_TO_BUS=y
21CONFIG_OF=y
20CONFIG_SPARC64_PAGE_SIZE_8KB=y 22CONFIG_SPARC64_PAGE_SIZE_8KB=y
21# CONFIG_SPARC64_PAGE_SIZE_64KB is not set 23# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
22# CONFIG_SPARC64_PAGE_SIZE_512KB is not set 24# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
@@ -314,6 +316,7 @@ CONFIG_FW_LOADER=y
314# CONFIG_SYS_HYPERVISOR is not set 316# CONFIG_SYS_HYPERVISOR is not set
315CONFIG_CONNECTOR=m 317CONFIG_CONNECTOR=m
316# CONFIG_MTD is not set 318# CONFIG_MTD is not set
319CONFIG_OF_DEVICE=y
317# CONFIG_PARPORT is not set 320# CONFIG_PARPORT is not set
318CONFIG_BLK_DEV=y 321CONFIG_BLK_DEV=y
319# CONFIG_BLK_DEV_FD is not set 322# CONFIG_BLK_DEV_FD is not set
@@ -433,10 +436,7 @@ CONFIG_SCSI_FC_ATTRS=y
433CONFIG_SCSI_ISCSI_ATTRS=m 436CONFIG_SCSI_ISCSI_ATTRS=m
434# CONFIG_SCSI_SAS_ATTRS is not set 437# CONFIG_SCSI_SAS_ATTRS is not set
435# CONFIG_SCSI_SAS_LIBSAS is not set 438# CONFIG_SCSI_SAS_LIBSAS is not set
436 439CONFIG_SCSI_LOWLEVEL=y
437#
438# SCSI low-level drivers
439#
440CONFIG_ISCSI_TCP=m 440CONFIG_ISCSI_TCP=m
441# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 441# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
442# CONFIG_SCSI_3W_9XXX is not set 442# CONFIG_SCSI_3W_9XXX is not set
@@ -701,7 +701,6 @@ CONFIG_UNIX98_PTYS=y
701# CONFIG_IPMI_HANDLER is not set 701# CONFIG_IPMI_HANDLER is not set
702# CONFIG_WATCHDOG is not set 702# CONFIG_WATCHDOG is not set
703# CONFIG_HW_RANDOM is not set 703# CONFIG_HW_RANDOM is not set
704CONFIG_RTC=y
705# CONFIG_R3964 is not set 704# CONFIG_R3964 is not set
706# CONFIG_APPLICOM is not set 705# CONFIG_APPLICOM is not set
707# CONFIG_DRM is not set 706# CONFIG_DRM is not set
@@ -844,6 +843,7 @@ CONFIG_HWMON=y
844# 843#
845# CONFIG_DISPLAY_SUPPORT is not set 844# CONFIG_DISPLAY_SUPPORT is not set
846# CONFIG_VGASTATE is not set 845# CONFIG_VGASTATE is not set
846# CONFIG_VIDEO_OUTPUT_CONTROL is not set
847CONFIG_FB=y 847CONFIG_FB=y
848# CONFIG_FIRMWARE_EDID is not set 848# CONFIG_FIRMWARE_EDID is not set
849CONFIG_FB_DDC=y 849CONFIG_FB_DDC=y
@@ -937,7 +937,6 @@ CONFIG_SND_MIXER_OSS=m
937CONFIG_SND_PCM_OSS=m 937CONFIG_SND_PCM_OSS=m
938CONFIG_SND_PCM_OSS_PLUGINS=y 938CONFIG_SND_PCM_OSS_PLUGINS=y
939CONFIG_SND_SEQUENCER_OSS=y 939CONFIG_SND_SEQUENCER_OSS=y
940# CONFIG_SND_RTCTIMER is not set
941# CONFIG_SND_DYNAMIC_MINORS is not set 940# CONFIG_SND_DYNAMIC_MINORS is not set
942CONFIG_SND_SUPPORT_OLD_API=y 941CONFIG_SND_SUPPORT_OLD_API=y
943CONFIG_SND_VERBOSE_PROCFS=y 942CONFIG_SND_VERBOSE_PROCFS=y
@@ -1034,6 +1033,10 @@ CONFIG_SND_SUN_CS4231=m
1034# CONFIG_SND_SOC is not set 1033# CONFIG_SND_SOC is not set
1035 1034
1036# 1035#
1036# SoC Audio support for SuperH
1037#
1038
1039#
1037# Open Sound System 1040# Open Sound System
1038# 1041#
1039# CONFIG_SOUND_PRIME is not set 1042# CONFIG_SOUND_PRIME is not set
@@ -1157,19 +1160,7 @@ CONFIG_USB_STORAGE=m
1157# 1160#
1158# CONFIG_USB_GADGET is not set 1161# CONFIG_USB_GADGET is not set
1159# CONFIG_MMC is not set 1162# CONFIG_MMC is not set
1160
1161#
1162# LED devices
1163#
1164# CONFIG_NEW_LEDS is not set 1163# CONFIG_NEW_LEDS is not set
1165
1166#
1167# LED drivers
1168#
1169
1170#
1171# LED Triggers
1172#
1173# CONFIG_INFINIBAND is not set 1164# CONFIG_INFINIBAND is not set
1174 1165
1175# 1166#
@@ -1199,7 +1190,6 @@ CONFIG_USB_STORAGE=m
1199# Misc Linux/SPARC drivers 1190# Misc Linux/SPARC drivers
1200# 1191#
1201CONFIG_SUN_OPENPROMIO=m 1192CONFIG_SUN_OPENPROMIO=m
1202CONFIG_SUN_MOSTEK_RTC=y
1203# CONFIG_OBP_FLASH is not set 1193# CONFIG_OBP_FLASH is not set
1204# CONFIG_SUN_BPP is not set 1194# CONFIG_SUN_BPP is not set
1205# CONFIG_BBC_I2C is not set 1195# CONFIG_BBC_I2C is not set
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 35feacb6b8e..9dbd833d79d 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -1,15 +1,15 @@
1/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $ 1/* head.S: Initial boot code for the Sparc64 port of Linux.
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 * 2 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au) 4 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx) 6 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */ 7 */
9 8
10#include <linux/version.h> 9#include <linux/version.h>
11#include <linux/errno.h> 10#include <linux/errno.h>
12#include <linux/threads.h> 11#include <linux/threads.h>
12#include <linux/init.h>
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/asi.h> 14#include <asm/asi.h>
15#include <asm/pstate.h> 15#include <asm/pstate.h>
@@ -374,6 +374,7 @@ jump_to_sun4u_init:
374 jmpl %g2 + %g0, %g0 374 jmpl %g2 + %g0, %g0
375 nop 375 nop
376 376
377 .section .text.init.refok
377sun4u_init: 378sun4u_init:
378 BRANCH_IF_SUN4V(g1, sun4v_init) 379 BRANCH_IF_SUN4V(g1, sun4v_init)
379 380
@@ -529,6 +530,8 @@ tlb_fixup_done:
529 nop 530 nop
530 /* Not reached... */ 531 /* Not reached... */
531 532
533 .previous
534
532 /* This is meant to allow the sharing of this code between 535 /* This is meant to allow the sharing of this code between
533 * boot processor invocation (via setup_tba() below) and 536 * boot processor invocation (via setup_tba() below) and
534 * secondary processor startup (via trampoline.S). The 537 * secondary processor startup (via trampoline.S). The
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 481861764de..b982fa3dd74 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
45 __ex_table : { *(__ex_table) } 45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .; 46 __stop___ex_table = .;
47 47
48 NOTES
49
48 . = ALIGN(PAGE_SIZE); 50 . = ALIGN(PAGE_SIZE);
49 __init_begin = .; 51 __init_begin = .;
50 .init.text : { 52 .init.text : {
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 542c9ef858f..d8709050740 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -499,7 +499,7 @@ static struct mc_device mem_mc = {
499 .remove = mem_remove, 499 .remove = mem_remove,
500}; 500};
501 501
502static int mem_mc_init(void) 502static int __init mem_mc_init(void)
503{ 503{
504 if(can_drop_memory()) 504 if(can_drop_memory())
505 mconsole_register_dev(&mem_mc); 505 mconsole_register_dev(&mem_mc);
@@ -798,7 +798,7 @@ void mconsole_stack(struct mc_request *req)
798 */ 798 */
799static char *notify_socket = NULL; 799static char *notify_socket = NULL;
800 800
801static int mconsole_init(void) 801static int __init mconsole_init(void)
802{ 802{
803 /* long to avoid size mismatch warnings from gcc */ 803 /* long to avoid size mismatch warnings from gcc */
804 long sock; 804 long sock;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 72773dd5442..d35d0c1ee7f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -623,7 +623,7 @@ static int eth_setup_common(char *str, int index)
623 return found; 623 return found;
624} 624}
625 625
626static int eth_setup(char *str) 626static int __init eth_setup(char *str)
627{ 627{
628 struct eth_init *new; 628 struct eth_init *new;
629 char *error; 629 char *error;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index fc27f6c72b4..aff661fe2ee 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -469,7 +469,7 @@ __uml_help(fakehd,
469" Change the ubd device name to \"hd\".\n\n" 469" Change the ubd device name to \"hd\".\n\n"
470); 470);
471 471
472static void do_ubd_request(request_queue_t * q); 472static void do_ubd_request(struct request_queue * q);
473 473
474/* Only changed by ubd_init, which is an initcall. */ 474/* Only changed by ubd_init, which is an initcall. */
475int thread_fd = -1; 475int thread_fd = -1;
@@ -1081,7 +1081,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
1081} 1081}
1082 1082
1083/* Called with dev->lock held */ 1083/* Called with dev->lock held */
1084static void do_ubd_request(request_queue_t *q) 1084static void do_ubd_request(struct request_queue *q)
1085{ 1085{
1086 struct io_thread_req *io_req; 1086 struct io_thread_req *io_req;
1087 struct request *req; 1087 struct request *req;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 72ff85693a3..d2b11f24269 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -62,7 +62,7 @@ static void setup_highmem(unsigned long highmem_start,
62} 62}
63#endif 63#endif
64 64
65void mem_init(void) 65void __init mem_init(void)
66{ 66{
67 /* clear the zero-page */ 67 /* clear the zero-page */
68 memset((void *) empty_zero_page, 0, PAGE_SIZE); 68 memset((void *) empty_zero_page, 0, PAGE_SIZE);
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 3ba6e4c841d..5ee7e851bbc 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -28,7 +28,8 @@ unsigned long high_physmem;
28 28
29extern unsigned long long physmem_size; 29extern unsigned long long physmem_size;
30 30
31int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem) 31int __init init_maps(unsigned long physmem, unsigned long iomem,
32 unsigned long highmem)
32{ 33{
33 struct page *p, *map; 34 struct page *p, *map;
34 unsigned long phys_len, phys_pages, highmem_len, highmem_pages; 35 unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
@@ -47,13 +48,7 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
47 total_pages = phys_pages + iomem_pages + highmem_pages; 48 total_pages = phys_pages + iomem_pages + highmem_pages;
48 total_len = phys_len + iomem_len + highmem_len; 49 total_len = phys_len + iomem_len + highmem_len;
49 50
50 if(kmalloc_ok){ 51 map = alloc_bootmem_low_pages(total_len);
51 map = kmalloc(total_len, GFP_KERNEL);
52 if(map == NULL)
53 map = vmalloc(total_len);
54 }
55 else map = alloc_bootmem_low_pages(total_len);
56
57 if(map == NULL) 52 if(map == NULL)
58 return -ENOMEM; 53 return -ENOMEM;
59 54
@@ -98,8 +93,8 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
98 93
99extern int __syscall_stub_start; 94extern int __syscall_stub_start;
100 95
101void setup_physmem(unsigned long start, unsigned long reserve_end, 96void __init setup_physmem(unsigned long start, unsigned long reserve_end,
102 unsigned long len, unsigned long long highmem) 97 unsigned long len, unsigned long long highmem)
103{ 98{
104 unsigned long reserve = reserve_end - start; 99 unsigned long reserve = reserve_end - start;
105 int pfn = PFN_UP(__pa(reserve_end)); 100 int pfn = PFN_UP(__pa(reserve_end));
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2a69a7ce579..48051a98525 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -145,7 +145,7 @@ void init_idle_skas(void)
145 145
146extern void start_kernel(void); 146extern void start_kernel(void);
147 147
148static int start_kernel_proc(void *unused) 148static int __init start_kernel_proc(void *unused)
149{ 149{
150 int pid; 150 int pid;
151 151
@@ -165,7 +165,7 @@ extern int userspace_pid[];
165 165
166extern char cpu0_irqstack[]; 166extern char cpu0_irqstack[];
167 167
168int start_uml_skas(void) 168int __init start_uml_skas(void)
169{ 169{
170 stack_protections((unsigned long) &cpu0_irqstack); 170 stack_protections((unsigned long) &cpu0_irqstack);
171 set_sigstack(cpu0_irqstack, THREAD_SIZE); 171 set_sigstack(cpu0_irqstack, THREAD_SIZE);
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index b126df4ea16..59348359f9a 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -14,6 +14,7 @@
14#include "init.h" 14#include "init.h"
15#include "user.h" 15#include "user.h"
16#include "mode.h" 16#include "mode.h"
17#include "kern_constants.h"
17 18
18struct aio_thread_req { 19struct aio_thread_req {
19 enum aio_type type; 20 enum aio_type type;
@@ -65,47 +66,33 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
65static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, 66static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
66 int len, unsigned long long offset, struct aio_context *aio) 67 int len, unsigned long long offset, struct aio_context *aio)
67{ 68{
68 struct iocb iocb, *iocbp = &iocb; 69 struct iocb *iocbp = & ((struct iocb) {
70 .aio_data = (unsigned long) aio,
71 .aio_fildes = fd,
72 .aio_buf = (unsigned long) buf,
73 .aio_nbytes = len,
74 .aio_offset = offset
75 });
69 char c; 76 char c;
70 int err;
71 77
72 iocb = ((struct iocb) { .aio_data = (unsigned long) aio, 78 switch (type) {
73 .aio_reqprio = 0,
74 .aio_fildes = fd,
75 .aio_buf = (unsigned long) buf,
76 .aio_nbytes = len,
77 .aio_offset = offset,
78 .aio_reserved1 = 0,
79 .aio_reserved2 = 0,
80 .aio_reserved3 = 0 });
81
82 switch(type){
83 case AIO_READ: 79 case AIO_READ:
84 iocb.aio_lio_opcode = IOCB_CMD_PREAD; 80 iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
85 err = io_submit(ctx, 1, &iocbp);
86 break; 81 break;
87 case AIO_WRITE: 82 case AIO_WRITE:
88 iocb.aio_lio_opcode = IOCB_CMD_PWRITE; 83 iocbp->aio_lio_opcode = IOCB_CMD_PWRITE;
89 err = io_submit(ctx, 1, &iocbp);
90 break; 84 break;
91 case AIO_MMAP: 85 case AIO_MMAP:
92 iocb.aio_lio_opcode = IOCB_CMD_PREAD; 86 iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
93 iocb.aio_buf = (unsigned long) &c; 87 iocbp->aio_buf = (unsigned long) &c;
94 iocb.aio_nbytes = sizeof(c); 88 iocbp->aio_nbytes = sizeof(c);
95 err = io_submit(ctx, 1, &iocbp);
96 break; 89 break;
97 default: 90 default:
98 printk("Bogus op in do_aio - %d\n", type); 91 printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type);
99 err = -EINVAL; 92 return -EINVAL;
100 break;
101 } 93 }
102 94
103 if(err > 0) 95 return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno;
104 err = 0;
105 else
106 err = -errno;
107
108 return err;
109} 96}
110 97
111/* Initialized in an initcall and unchanged thereafter */ 98/* Initialized in an initcall and unchanged thereafter */
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 2d9d2ca3929..e9c14329751 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -194,7 +194,7 @@ int os_unmap_memory(void *addr, int len)
194#define MADV_REMOVE KERNEL_MADV_REMOVE 194#define MADV_REMOVE KERNEL_MADV_REMOVE
195#endif 195#endif
196 196
197int __init os_drop_memory(void *addr, int length) 197int os_drop_memory(void *addr, int length)
198{ 198{
199 int err; 199 int err;
200 200
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index 419b2d5ff6d..4c37b1b1d0b 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -19,10 +19,7 @@ extern void *memmove(void *, const void *, size_t);
19extern void *memset(void *, int, size_t); 19extern void *memset(void *, int, size_t);
20extern int printf(const char *, ...); 20extern int printf(const char *, ...);
21 21
22/* If they're not defined, the export is included in lib/string.c.*/ 22/* If it's not defined, the export is included in lib/string.c.*/
23#ifdef __HAVE_ARCH_STRLEN
24EXPORT_SYMBOL(strlen);
25#endif
26#ifdef __HAVE_ARCH_STRSTR 23#ifdef __HAVE_ARCH_STRSTR
27EXPORT_SYMBOL(strstr); 24EXPORT_SYMBOL(strstr);
28#endif 25#endif
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 098720be019..d6b3ecd4b77 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -4,7 +4,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
4 4
5obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o 5obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
6 6
7subarch-obj-y = lib/bitops.o lib/semaphore.o 7subarch-obj-y = lib/bitops.o lib/semaphore.o lib/string.o
8subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o 8subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
9subarch-obj-$(CONFIG_MODULES) += kernel/module.o 9subarch-obj-$(CONFIG_MODULES) += kernel/module.o
10 10
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 3e316dd7252..dc715a562e1 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad)
796 * as_completed_request is to be called when a request has completed and 796 * as_completed_request is to be called when a request has completed and
797 * returned something to the requesting process, be it an error or data. 797 * returned something to the requesting process, be it an error or data.
798 */ 798 */
799static void as_completed_request(request_queue_t *q, struct request *rq) 799static void as_completed_request(struct request_queue *q, struct request *rq)
800{ 800{
801 struct as_data *ad = q->elevator->elevator_data; 801 struct as_data *ad = q->elevator->elevator_data;
802 802
@@ -853,7 +853,8 @@ out:
853 * reference unless it replaces the request at somepart of the elevator 853 * reference unless it replaces the request at somepart of the elevator
854 * (ie. the dispatch queue) 854 * (ie. the dispatch queue)
855 */ 855 */
856static void as_remove_queued_request(request_queue_t *q, struct request *rq) 856static void as_remove_queued_request(struct request_queue *q,
857 struct request *rq)
857{ 858{
858 const int data_dir = rq_is_sync(rq); 859 const int data_dir = rq_is_sync(rq);
859 struct as_data *ad = q->elevator->elevator_data; 860 struct as_data *ad = q->elevator->elevator_data;
@@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
978 * read/write expire, batch expire, etc, and moves it to the dispatch 979 * read/write expire, batch expire, etc, and moves it to the dispatch
979 * queue. Returns 1 if a request was found, 0 otherwise. 980 * queue. Returns 1 if a request was found, 0 otherwise.
980 */ 981 */
981static int as_dispatch_request(request_queue_t *q, int force) 982static int as_dispatch_request(struct request_queue *q, int force)
982{ 983{
983 struct as_data *ad = q->elevator->elevator_data; 984 struct as_data *ad = q->elevator->elevator_data;
984 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 985 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
@@ -1139,7 +1140,7 @@ fifo_expired:
1139/* 1140/*
1140 * add rq to rbtree and fifo 1141 * add rq to rbtree and fifo
1141 */ 1142 */
1142static void as_add_request(request_queue_t *q, struct request *rq) 1143static void as_add_request(struct request_queue *q, struct request *rq)
1143{ 1144{
1144 struct as_data *ad = q->elevator->elevator_data; 1145 struct as_data *ad = q->elevator->elevator_data;
1145 int data_dir; 1146 int data_dir;
@@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
1167 RQ_SET_STATE(rq, AS_RQ_QUEUED); 1168 RQ_SET_STATE(rq, AS_RQ_QUEUED);
1168} 1169}
1169 1170
1170static void as_activate_request(request_queue_t *q, struct request *rq) 1171static void as_activate_request(struct request_queue *q, struct request *rq)
1171{ 1172{
1172 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); 1173 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
1173 RQ_SET_STATE(rq, AS_RQ_REMOVED); 1174 RQ_SET_STATE(rq, AS_RQ_REMOVED);
@@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq)
1175 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); 1176 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
1176} 1177}
1177 1178
1178static void as_deactivate_request(request_queue_t *q, struct request *rq) 1179static void as_deactivate_request(struct request_queue *q, struct request *rq)
1179{ 1180{
1180 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); 1181 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
1181 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); 1182 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
@@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq)
1189 * is not empty - it is used in the block layer to check for plugging and 1190 * is not empty - it is used in the block layer to check for plugging and
1190 * merging opportunities 1191 * merging opportunities
1191 */ 1192 */
1192static int as_queue_empty(request_queue_t *q) 1193static int as_queue_empty(struct request_queue *q)
1193{ 1194{
1194 struct as_data *ad = q->elevator->elevator_data; 1195 struct as_data *ad = q->elevator->elevator_data;
1195 1196
@@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q)
1198} 1199}
1199 1200
1200static int 1201static int
1201as_merge(request_queue_t *q, struct request **req, struct bio *bio) 1202as_merge(struct request_queue *q, struct request **req, struct bio *bio)
1202{ 1203{
1203 struct as_data *ad = q->elevator->elevator_data; 1204 struct as_data *ad = q->elevator->elevator_data;
1204 sector_t rb_key = bio->bi_sector + bio_sectors(bio); 1205 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
@@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1216 return ELEVATOR_NO_MERGE; 1217 return ELEVATOR_NO_MERGE;
1217} 1218}
1218 1219
1219static void as_merged_request(request_queue_t *q, struct request *req, int type) 1220static void as_merged_request(struct request_queue *q, struct request *req,
1221 int type)
1220{ 1222{
1221 struct as_data *ad = q->elevator->elevator_data; 1223 struct as_data *ad = q->elevator->elevator_data;
1222 1224
@@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
1234 } 1236 }
1235} 1237}
1236 1238
1237static void as_merged_requests(request_queue_t *q, struct request *req, 1239static void as_merged_requests(struct request_queue *q, struct request *req,
1238 struct request *next) 1240 struct request *next)
1239{ 1241{
1240 /* 1242 /*
@@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work)
1285 spin_unlock_irqrestore(q->queue_lock, flags); 1287 spin_unlock_irqrestore(q->queue_lock, flags);
1286} 1288}
1287 1289
1288static int as_may_queue(request_queue_t *q, int rw) 1290static int as_may_queue(struct request_queue *q, int rw)
1289{ 1291{
1290 int ret = ELV_MQUEUE_MAY; 1292 int ret = ELV_MQUEUE_MAY;
1291 struct as_data *ad = q->elevator->elevator_data; 1293 struct as_data *ad = q->elevator->elevator_data;
@@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e)
1318/* 1320/*
1319 * initialize elevator private data (as_data). 1321 * initialize elevator private data (as_data).
1320 */ 1322 */
1321static void *as_init_queue(request_queue_t *q) 1323static void *as_init_queue(struct request_queue *q)
1322{ 1324{
1323 struct as_data *ad; 1325 struct as_data *ad;
1324 1326
diff --git a/block/blktrace.c b/block/blktrace.c
index 3f0e7c37c05..20c3e22587b 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
231 kfree(bt); 231 kfree(bt);
232} 232}
233 233
234static int blk_trace_remove(request_queue_t *q) 234static int blk_trace_remove(struct request_queue *q)
235{ 235{
236 struct blk_trace *bt; 236 struct blk_trace *bt;
237 237
@@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = {
312/* 312/*
313 * Setup everything required to start tracing 313 * Setup everything required to start tracing
314 */ 314 */
315static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, 315static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
316 char __user *arg) 316 char __user *arg)
317{ 317{
318 struct blk_user_trace_setup buts; 318 struct blk_user_trace_setup buts;
@@ -401,7 +401,7 @@ err:
401 return ret; 401 return ret;
402} 402}
403 403
404static int blk_trace_startstop(request_queue_t *q, int start) 404static int blk_trace_startstop(struct request_queue *q, int start)
405{ 405{
406 struct blk_trace *bt; 406 struct blk_trace *bt;
407 int ret; 407 int ret;
@@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start)
444 **/ 444 **/
445int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 445int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
446{ 446{
447 request_queue_t *q; 447 struct request_queue *q;
448 int ret, start = 0; 448 int ret, start = 0;
449 449
450 q = bdev_get_queue(bdev); 450 q = bdev_get_queue(bdev);
@@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
479 * @q: the request queue associated with the device 479 * @q: the request queue associated with the device
480 * 480 *
481 **/ 481 **/
482void blk_trace_shutdown(request_queue_t *q) 482void blk_trace_shutdown(struct request_queue *q)
483{ 483{
484 if (q->blk_trace) { 484 if (q->blk_trace) {
485 blk_trace_startstop(q, 0); 485 blk_trace_startstop(q, 0);
diff --git a/block/bsg.c b/block/bsg.c
index b571869928a..3b2f05258a9 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,7 @@
37#define BSG_VERSION "0.4" 37#define BSG_VERSION "0.4"
38 38
39struct bsg_device { 39struct bsg_device {
40 request_queue_t *queue; 40 struct request_queue *queue;
41 spinlock_t lock; 41 spinlock_t lock;
42 struct list_head busy_list; 42 struct list_head busy_list;
43 struct list_head done_list; 43 struct list_head done_list;
@@ -180,7 +180,7 @@ unlock:
180 return ret; 180 return ret;
181} 181}
182 182
183static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, 183static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
184 struct sg_io_v4 *hdr, int has_write_perm) 184 struct sg_io_v4 *hdr, int has_write_perm)
185{ 185{
186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -214,7 +214,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
214 * Check if sg_io_v4 from user is allowed and valid 214 * Check if sg_io_v4 from user is allowed and valid
215 */ 215 */
216static int 216static int
217bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) 217bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
218{ 218{
219 int ret = 0; 219 int ret = 0;
220 220
@@ -250,7 +250,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
250static struct request * 250static struct request *
251bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 251bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
252{ 252{
253 request_queue_t *q = bd->queue; 253 struct request_queue *q = bd->queue;
254 struct request *rq, *next_rq = NULL; 254 struct request *rq, *next_rq = NULL;
255 int ret, rw; 255 int ret, rw;
256 unsigned int dxfer_len; 256 unsigned int dxfer_len;
@@ -345,7 +345,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
345 * do final setup of a 'bc' and submit the matching 'rq' to the block 345 * do final setup of a 'bc' and submit the matching 'rq' to the block
346 * layer for io 346 * layer for io
347 */ 347 */
348static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, 348static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
349 struct bsg_command *bc, struct request *rq) 349 struct bsg_command *bc, struct request *rq)
350{ 350{
351 rq->sense = bc->sense; 351 rq->sense = bc->sense;
@@ -611,7 +611,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
611 bc = NULL; 611 bc = NULL;
612 ret = 0; 612 ret = 0;
613 while (nr_commands) { 613 while (nr_commands) {
614 request_queue_t *q = bd->queue; 614 struct request_queue *q = bd->queue;
615 615
616 bc = bsg_alloc_command(bd); 616 bc = bsg_alloc_command(bd);
617 if (IS_ERR(bc)) { 617 if (IS_ERR(bc)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d148ccbc36d..54dc0543900 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,7 +71,7 @@ struct cfq_rb_root {
71 * Per block device queue structure 71 * Per block device queue structure
72 */ 72 */
73struct cfq_data { 73struct cfq_data {
74 request_queue_t *queue; 74 struct request_queue *queue;
75 75
76 /* 76 /*
77 * rr list of queues with requests and the count of them 77 * rr list of queues with requests and the count of them
@@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new);
197CFQ_CFQQ_FNS(sync); 197CFQ_CFQQ_FNS(sync);
198#undef CFQ_CFQQ_FNS 198#undef CFQ_CFQQ_FNS
199 199
200static void cfq_dispatch_insert(request_queue_t *, struct request *); 200static void cfq_dispatch_insert(struct request_queue *, struct request *);
201static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 201static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
202 struct task_struct *, gfp_t); 202 struct task_struct *, gfp_t);
203static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, 203static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
@@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
237 kblockd_schedule_work(&cfqd->unplug_work); 237 kblockd_schedule_work(&cfqd->unplug_work);
238} 238}
239 239
240static int cfq_queue_empty(request_queue_t *q) 240static int cfq_queue_empty(struct request_queue *q)
241{ 241{
242 struct cfq_data *cfqd = q->elevator->elevator_data; 242 struct cfq_data *cfqd = q->elevator->elevator_data;
243 243
@@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
623 return NULL; 623 return NULL;
624} 624}
625 625
626static void cfq_activate_request(request_queue_t *q, struct request *rq) 626static void cfq_activate_request(struct request_queue *q, struct request *rq)
627{ 627{
628 struct cfq_data *cfqd = q->elevator->elevator_data; 628 struct cfq_data *cfqd = q->elevator->elevator_data;
629 629
@@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
642} 642}
643 643
644static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 644static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
645{ 645{
646 struct cfq_data *cfqd = q->elevator->elevator_data; 646 struct cfq_data *cfqd = q->elevator->elevator_data;
647 647
@@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq)
665 } 665 }
666} 666}
667 667
668static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) 668static int cfq_merge(struct request_queue *q, struct request **req,
669 struct bio *bio)
669{ 670{
670 struct cfq_data *cfqd = q->elevator->elevator_data; 671 struct cfq_data *cfqd = q->elevator->elevator_data;
671 struct request *__rq; 672 struct request *__rq;
@@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
679 return ELEVATOR_NO_MERGE; 680 return ELEVATOR_NO_MERGE;
680} 681}
681 682
682static void cfq_merged_request(request_queue_t *q, struct request *req, 683static void cfq_merged_request(struct request_queue *q, struct request *req,
683 int type) 684 int type)
684{ 685{
685 if (type == ELEVATOR_FRONT_MERGE) { 686 if (type == ELEVATOR_FRONT_MERGE) {
@@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req,
690} 691}
691 692
692static void 693static void
693cfq_merged_requests(request_queue_t *q, struct request *rq, 694cfq_merged_requests(struct request_queue *q, struct request *rq,
694 struct request *next) 695 struct request *next)
695{ 696{
696 /* 697 /*
@@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
703 cfq_remove_request(next); 704 cfq_remove_request(next);
704} 705}
705 706
706static int cfq_allow_merge(request_queue_t *q, struct request *rq, 707static int cfq_allow_merge(struct request_queue *q, struct request *rq,
707 struct bio *bio) 708 struct bio *bio)
708{ 709{
709 struct cfq_data *cfqd = q->elevator->elevator_data; 710 struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
913/* 914/*
914 * Move request from internal lists to the request queue dispatch list. 915 * Move request from internal lists to the request queue dispatch list.
915 */ 916 */
916static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) 917static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
917{ 918{
918 struct cfq_data *cfqd = q->elevator->elevator_data; 919 struct cfq_data *cfqd = q->elevator->elevator_data;
919 struct cfq_queue *cfqq = RQ_CFQQ(rq); 920 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1093 return dispatched; 1094 return dispatched;
1094} 1095}
1095 1096
1096static int cfq_dispatch_requests(request_queue_t *q, int force) 1097static int cfq_dispatch_requests(struct request_queue *q, int force)
1097{ 1098{
1098 struct cfq_data *cfqd = q->elevator->elevator_data; 1099 struct cfq_data *cfqd = q->elevator->elevator_data;
1099 struct cfq_queue *cfqq; 1100 struct cfq_queue *cfqq;
@@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
1214 struct cfq_data *cfqd = cic->key; 1215 struct cfq_data *cfqd = cic->key;
1215 1216
1216 if (cfqd) { 1217 if (cfqd) {
1217 request_queue_t *q = cfqd->queue; 1218 struct request_queue *q = cfqd->queue;
1218 1219
1219 spin_lock_irq(q->queue_lock); 1220 spin_lock_irq(q->queue_lock);
1220 __cfq_exit_single_io_context(cfqd, cic); 1221 __cfq_exit_single_io_context(cfqd, cic);
@@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1775 } 1776 }
1776} 1777}
1777 1778
1778static void cfq_insert_request(request_queue_t *q, struct request *rq) 1779static void cfq_insert_request(struct request_queue *q, struct request *rq)
1779{ 1780{
1780 struct cfq_data *cfqd = q->elevator->elevator_data; 1781 struct cfq_data *cfqd = q->elevator->elevator_data;
1781 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1782 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
1789 cfq_rq_enqueued(cfqd, cfqq, rq); 1790 cfq_rq_enqueued(cfqd, cfqq, rq);
1790} 1791}
1791 1792
1792static void cfq_completed_request(request_queue_t *q, struct request *rq) 1793static void cfq_completed_request(struct request_queue *q, struct request *rq)
1793{ 1794{
1794 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1795 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1795 struct cfq_data *cfqd = cfqq->cfqd; 1796 struct cfq_data *cfqd = cfqq->cfqd;
@@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
1868 return ELV_MQUEUE_MAY; 1869 return ELV_MQUEUE_MAY;
1869} 1870}
1870 1871
1871static int cfq_may_queue(request_queue_t *q, int rw) 1872static int cfq_may_queue(struct request_queue *q, int rw)
1872{ 1873{
1873 struct cfq_data *cfqd = q->elevator->elevator_data; 1874 struct cfq_data *cfqd = q->elevator->elevator_data;
1874 struct task_struct *tsk = current; 1875 struct task_struct *tsk = current;
@@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq)
1922 * Allocate cfq data structures associated with this request. 1923 * Allocate cfq data structures associated with this request.
1923 */ 1924 */
1924static int 1925static int
1925cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 1926cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1926{ 1927{
1927 struct cfq_data *cfqd = q->elevator->elevator_data; 1928 struct cfq_data *cfqd = q->elevator->elevator_data;
1928 struct task_struct *tsk = current; 1929 struct task_struct *tsk = current;
@@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work)
1974{ 1975{
1975 struct cfq_data *cfqd = 1976 struct cfq_data *cfqd =
1976 container_of(work, struct cfq_data, unplug_work); 1977 container_of(work, struct cfq_data, unplug_work);
1977 request_queue_t *q = cfqd->queue; 1978 struct request_queue *q = cfqd->queue;
1978 unsigned long flags; 1979 unsigned long flags;
1979 1980
1980 spin_lock_irqsave(q->queue_lock, flags); 1981 spin_lock_irqsave(q->queue_lock, flags);
@@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
2072static void cfq_exit_queue(elevator_t *e) 2073static void cfq_exit_queue(elevator_t *e)
2073{ 2074{
2074 struct cfq_data *cfqd = e->elevator_data; 2075 struct cfq_data *cfqd = e->elevator_data;
2075 request_queue_t *q = cfqd->queue; 2076 struct request_queue *q = cfqd->queue;
2076 2077
2077 cfq_shutdown_timer_wq(cfqd); 2078 cfq_shutdown_timer_wq(cfqd);
2078 2079
@@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e)
2098 kfree(cfqd); 2099 kfree(cfqd);
2099} 2100}
2100 2101
2101static void *cfq_init_queue(request_queue_t *q) 2102static void *cfq_init_queue(struct request_queue *q)
2102{ 2103{
2103 struct cfq_data *cfqd; 2104 struct cfq_data *cfqd;
2104 2105
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 87ca02ac84c..1a511ffaf8a 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
106/* 106/*
107 * remove rq from rbtree and fifo. 107 * remove rq from rbtree and fifo.
108 */ 108 */
109static void deadline_remove_request(request_queue_t *q, struct request *rq) 109static void deadline_remove_request(struct request_queue *q, struct request *rq)
110{ 110{
111 struct deadline_data *dd = q->elevator->elevator_data; 111 struct deadline_data *dd = q->elevator->elevator_data;
112 112
@@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
115} 115}
116 116
117static int 117static int
118deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) 118deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
119{ 119{
120 struct deadline_data *dd = q->elevator->elevator_data; 120 struct deadline_data *dd = q->elevator->elevator_data;
121 struct request *__rq; 121 struct request *__rq;
@@ -144,8 +144,8 @@ out:
144 return ret; 144 return ret;
145} 145}
146 146
147static void deadline_merged_request(request_queue_t *q, struct request *req, 147static void deadline_merged_request(struct request_queue *q,
148 int type) 148 struct request *req, int type)
149{ 149{
150 struct deadline_data *dd = q->elevator->elevator_data; 150 struct deadline_data *dd = q->elevator->elevator_data;
151 151
@@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
159} 159}
160 160
161static void 161static void
162deadline_merged_requests(request_queue_t *q, struct request *req, 162deadline_merged_requests(struct request_queue *q, struct request *req,
163 struct request *next) 163 struct request *next)
164{ 164{
165 /* 165 /*
@@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
185static inline void 185static inline void
186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) 186deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
187{ 187{
188 request_queue_t *q = rq->q; 188 struct request_queue *q = rq->q;
189 189
190 deadline_remove_request(q, rq); 190 deadline_remove_request(q, rq);
191 elv_dispatch_add_tail(q, rq); 191 elv_dispatch_add_tail(q, rq);
@@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
236 * deadline_dispatch_requests selects the best request according to 236 * deadline_dispatch_requests selects the best request according to
237 * read/write expire, fifo_batch, etc 237 * read/write expire, fifo_batch, etc
238 */ 238 */
239static int deadline_dispatch_requests(request_queue_t *q, int force) 239static int deadline_dispatch_requests(struct request_queue *q, int force)
240{ 240{
241 struct deadline_data *dd = q->elevator->elevator_data; 241 struct deadline_data *dd = q->elevator->elevator_data;
242 const int reads = !list_empty(&dd->fifo_list[READ]); 242 const int reads = !list_empty(&dd->fifo_list[READ]);
@@ -335,7 +335,7 @@ dispatch_request:
335 return 1; 335 return 1;
336} 336}
337 337
338static int deadline_queue_empty(request_queue_t *q) 338static int deadline_queue_empty(struct request_queue *q)
339{ 339{
340 struct deadline_data *dd = q->elevator->elevator_data; 340 struct deadline_data *dd = q->elevator->elevator_data;
341 341
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
356/* 356/*
357 * initialize elevator private data (deadline_data). 357 * initialize elevator private data (deadline_data).
358 */ 358 */
359static void *deadline_init_queue(request_queue_t *q) 359static void *deadline_init_queue(struct request_queue *q)
360{ 360{
361 struct deadline_data *dd; 361 struct deadline_data *dd;
362 362
diff --git a/block/elevator.c b/block/elevator.c
index d265963d1ed..c6d153de9fd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -56,7 +56,7 @@ static const int elv_hash_shift = 6;
56 */ 56 */
57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58{ 58{
59 request_queue_t *q = rq->q; 59 struct request_queue *q = rq->q;
60 elevator_t *e = q->elevator; 60 elevator_t *e = q->elevator;
61 61
62 if (e->ops->elevator_allow_merge_fn) 62 if (e->ops->elevator_allow_merge_fn)
@@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name)
141 return e; 141 return e;
142} 142}
143 143
144static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) 144static void *elevator_init_queue(struct request_queue *q,
145 struct elevator_queue *eq)
145{ 146{
146 return eq->ops->elevator_init_fn(q); 147 return eq->ops->elevator_init_fn(q);
147} 148}
148 149
149static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, 150static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
150 void *data) 151 void *data)
151{ 152{
152 q->elevator = eq; 153 q->elevator = eq;
@@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup);
172 173
173static struct kobj_type elv_ktype; 174static struct kobj_type elv_ktype;
174 175
175static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) 176static elevator_t *elevator_alloc(struct request_queue *q,
177 struct elevator_type *e)
176{ 178{
177 elevator_t *eq; 179 elevator_t *eq;
178 int i; 180 int i;
@@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj)
212 kfree(e); 214 kfree(e);
213} 215}
214 216
215int elevator_init(request_queue_t *q, char *name) 217int elevator_init(struct request_queue *q, char *name)
216{ 218{
217 struct elevator_type *e = NULL; 219 struct elevator_type *e = NULL;
218 struct elevator_queue *eq; 220 struct elevator_queue *eq;
@@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e)
264 266
265EXPORT_SYMBOL(elevator_exit); 267EXPORT_SYMBOL(elevator_exit);
266 268
267static void elv_activate_rq(request_queue_t *q, struct request *rq) 269static void elv_activate_rq(struct request_queue *q, struct request *rq)
268{ 270{
269 elevator_t *e = q->elevator; 271 elevator_t *e = q->elevator;
270 272
@@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq)
272 e->ops->elevator_activate_req_fn(q, rq); 274 e->ops->elevator_activate_req_fn(q, rq);
273} 275}
274 276
275static void elv_deactivate_rq(request_queue_t *q, struct request *rq) 277static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
276{ 278{
277 elevator_t *e = q->elevator; 279 elevator_t *e = q->elevator;
278 280
@@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq)
285 hlist_del_init(&rq->hash); 287 hlist_del_init(&rq->hash);
286} 288}
287 289
288static void elv_rqhash_del(request_queue_t *q, struct request *rq) 290static void elv_rqhash_del(struct request_queue *q, struct request *rq)
289{ 291{
290 if (ELV_ON_HASH(rq)) 292 if (ELV_ON_HASH(rq))
291 __elv_rqhash_del(rq); 293 __elv_rqhash_del(rq);
292} 294}
293 295
294static void elv_rqhash_add(request_queue_t *q, struct request *rq) 296static void elv_rqhash_add(struct request_queue *q, struct request *rq)
295{ 297{
296 elevator_t *e = q->elevator; 298 elevator_t *e = q->elevator;
297 299
@@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq)
299 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 301 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
300} 302}
301 303
302static void elv_rqhash_reposition(request_queue_t *q, struct request *rq) 304static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
303{ 305{
304 __elv_rqhash_del(rq); 306 __elv_rqhash_del(rq);
305 elv_rqhash_add(q, rq); 307 elv_rqhash_add(q, rq);
306} 308}
307 309
308static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset) 310static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
309{ 311{
310 elevator_t *e = q->elevator; 312 elevator_t *e = q->elevator;
311 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; 313 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
@@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find);
391 * entry. rq is sort insted into the dispatch queue. To be used by 393 * entry. rq is sort insted into the dispatch queue. To be used by
392 * specific elevators. 394 * specific elevators.
393 */ 395 */
394void elv_dispatch_sort(request_queue_t *q, struct request *rq) 396void elv_dispatch_sort(struct request_queue *q, struct request *rq)
395{ 397{
396 sector_t boundary; 398 sector_t boundary;
397 struct list_head *entry; 399 struct list_head *entry;
@@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
449 451
450EXPORT_SYMBOL(elv_dispatch_add_tail); 452EXPORT_SYMBOL(elv_dispatch_add_tail);
451 453
452int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 454int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
453{ 455{
454 elevator_t *e = q->elevator; 456 elevator_t *e = q->elevator;
455 struct request *__rq; 457 struct request *__rq;
@@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
481 return ELEVATOR_NO_MERGE; 483 return ELEVATOR_NO_MERGE;
482} 484}
483 485
484void elv_merged_request(request_queue_t *q, struct request *rq, int type) 486void elv_merged_request(struct request_queue *q, struct request *rq, int type)
485{ 487{
486 elevator_t *e = q->elevator; 488 elevator_t *e = q->elevator;
487 489
@@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type)
494 q->last_merge = rq; 496 q->last_merge = rq;
495} 497}
496 498
497void elv_merge_requests(request_queue_t *q, struct request *rq, 499void elv_merge_requests(struct request_queue *q, struct request *rq,
498 struct request *next) 500 struct request *next)
499{ 501{
500 elevator_t *e = q->elevator; 502 elevator_t *e = q->elevator;
@@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
509 q->last_merge = rq; 511 q->last_merge = rq;
510} 512}
511 513
512void elv_requeue_request(request_queue_t *q, struct request *rq) 514void elv_requeue_request(struct request_queue *q, struct request *rq)
513{ 515{
514 /* 516 /*
515 * it already went through dequeue, we need to decrement the 517 * it already went through dequeue, we need to decrement the
@@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
526 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 528 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
527} 529}
528 530
529static void elv_drain_elevator(request_queue_t *q) 531static void elv_drain_elevator(struct request_queue *q)
530{ 532{
531 static int printed; 533 static int printed;
532 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 534 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q)
540 } 542 }
541} 543}
542 544
543void elv_insert(request_queue_t *q, struct request *rq, int where) 545void elv_insert(struct request_queue *q, struct request *rq, int where)
544{ 546{
545 struct list_head *pos; 547 struct list_head *pos;
546 unsigned ordseq; 548 unsigned ordseq;
@@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
638 } 640 }
639} 641}
640 642
641void __elv_add_request(request_queue_t *q, struct request *rq, int where, 643void __elv_add_request(struct request_queue *q, struct request *rq, int where,
642 int plug) 644 int plug)
643{ 645{
644 if (q->ordcolor) 646 if (q->ordcolor)
@@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
676 678
677EXPORT_SYMBOL(__elv_add_request); 679EXPORT_SYMBOL(__elv_add_request);
678 680
679void elv_add_request(request_queue_t *q, struct request *rq, int where, 681void elv_add_request(struct request_queue *q, struct request *rq, int where,
680 int plug) 682 int plug)
681{ 683{
682 unsigned long flags; 684 unsigned long flags;
@@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
688 690
689EXPORT_SYMBOL(elv_add_request); 691EXPORT_SYMBOL(elv_add_request);
690 692
691static inline struct request *__elv_next_request(request_queue_t *q) 693static inline struct request *__elv_next_request(struct request_queue *q)
692{ 694{
693 struct request *rq; 695 struct request *rq;
694 696
@@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q)
704 } 706 }
705} 707}
706 708
707struct request *elv_next_request(request_queue_t *q) 709struct request *elv_next_request(struct request_queue *q)
708{ 710{
709 struct request *rq; 711 struct request *rq;
710 int ret; 712 int ret;
@@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q)
770 772
771EXPORT_SYMBOL(elv_next_request); 773EXPORT_SYMBOL(elv_next_request);
772 774
773void elv_dequeue_request(request_queue_t *q, struct request *rq) 775void elv_dequeue_request(struct request_queue *q, struct request *rq)
774{ 776{
775 BUG_ON(list_empty(&rq->queuelist)); 777 BUG_ON(list_empty(&rq->queuelist));
776 BUG_ON(ELV_ON_HASH(rq)); 778 BUG_ON(ELV_ON_HASH(rq));
@@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
788 790
789EXPORT_SYMBOL(elv_dequeue_request); 791EXPORT_SYMBOL(elv_dequeue_request);
790 792
791int elv_queue_empty(request_queue_t *q) 793int elv_queue_empty(struct request_queue *q)
792{ 794{
793 elevator_t *e = q->elevator; 795 elevator_t *e = q->elevator;
794 796
@@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q)
803 805
804EXPORT_SYMBOL(elv_queue_empty); 806EXPORT_SYMBOL(elv_queue_empty);
805 807
806struct request *elv_latter_request(request_queue_t *q, struct request *rq) 808struct request *elv_latter_request(struct request_queue *q, struct request *rq)
807{ 809{
808 elevator_t *e = q->elevator; 810 elevator_t *e = q->elevator;
809 811
@@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
812 return NULL; 814 return NULL;
813} 815}
814 816
815struct request *elv_former_request(request_queue_t *q, struct request *rq) 817struct request *elv_former_request(struct request_queue *q, struct request *rq)
816{ 818{
817 elevator_t *e = q->elevator; 819 elevator_t *e = q->elevator;
818 820
@@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
821 return NULL; 823 return NULL;
822} 824}
823 825
824int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 826int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
825{ 827{
826 elevator_t *e = q->elevator; 828 elevator_t *e = q->elevator;
827 829
@@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
832 return 0; 834 return 0;
833} 835}
834 836
835void elv_put_request(request_queue_t *q, struct request *rq) 837void elv_put_request(struct request_queue *q, struct request *rq)
836{ 838{
837 elevator_t *e = q->elevator; 839 elevator_t *e = q->elevator;
838 840
@@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
840 e->ops->elevator_put_req_fn(rq); 842 e->ops->elevator_put_req_fn(rq);
841} 843}
842 844
843int elv_may_queue(request_queue_t *q, int rw) 845int elv_may_queue(struct request_queue *q, int rw)
844{ 846{
845 elevator_t *e = q->elevator; 847 elevator_t *e = q->elevator;
846 848
@@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw)
850 return ELV_MQUEUE_MAY; 852 return ELV_MQUEUE_MAY;
851} 853}
852 854
853void elv_completed_request(request_queue_t *q, struct request *rq) 855void elv_completed_request(struct request_queue *q, struct request *rq)
854{ 856{
855 elevator_t *e = q->elevator; 857 elevator_t *e = q->elevator;
856 858
@@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
1006 * need for the new one. this way we have a chance of going back to the old 1008 * need for the new one. this way we have a chance of going back to the old
1007 * one, if the new one fails init for some reason. 1009 * one, if the new one fails init for some reason.
1008 */ 1010 */
1009static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) 1011static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1010{ 1012{
1011 elevator_t *old_elevator, *e; 1013 elevator_t *old_elevator, *e;
1012 void *data; 1014 void *data;
@@ -1078,7 +1080,8 @@ fail_register:
1078 return 0; 1080 return 0;
1079} 1081}
1080 1082
1081ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) 1083ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1084 size_t count)
1082{ 1085{
1083 char elevator_name[ELV_NAME_MAX]; 1086 char elevator_name[ELV_NAME_MAX];
1084 size_t len; 1087 size_t len;
@@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
1107 return count; 1110 return count;
1108} 1111}
1109 1112
1110ssize_t elv_iosched_show(request_queue_t *q, char *name) 1113ssize_t elv_iosched_show(struct request_queue *q, char *name)
1111{ 1114{
1112 elevator_t *e = q->elevator; 1115 elevator_t *e = q->elevator;
1113 struct elevator_type *elv = e->elevator_type; 1116 struct elevator_type *elv = e->elevator_type;
@@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
1127 return len; 1130 return len;
1128} 1131}
1129 1132
1130struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) 1133struct request *elv_rb_former_request(struct request_queue *q,
1134 struct request *rq)
1131{ 1135{
1132 struct rb_node *rbprev = rb_prev(&rq->rb_node); 1136 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1133 1137
@@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
1139 1143
1140EXPORT_SYMBOL(elv_rb_former_request); 1144EXPORT_SYMBOL(elv_rb_former_request);
1141 1145
1142struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq) 1146struct request *elv_rb_latter_request(struct request_queue *q,
1147 struct request *rq)
1143{ 1148{
1144 struct rb_node *rbnext = rb_next(&rq->rb_node); 1149 struct rb_node *rbnext = rb_next(&rq->rb_node);
1145 1150
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 66056ca5e63..8c2caff87cc 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work);
40static void blk_unplug_timeout(unsigned long data); 40static void blk_unplug_timeout(unsigned long data);
41static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 41static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
42static void init_request_from_bio(struct request *req, struct bio *bio); 42static void init_request_from_bio(struct request *req, struct bio *bio);
43static int __make_request(request_queue_t *q, struct bio *bio); 43static int __make_request(struct request_queue *q, struct bio *bio);
44static struct io_context *current_io_context(gfp_t gfp_flags, int node); 44static struct io_context *current_io_context(gfp_t gfp_flags, int node);
45 45
46/* 46/*
@@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
122{ 122{
123 struct backing_dev_info *ret = NULL; 123 struct backing_dev_info *ret = NULL;
124 request_queue_t *q = bdev_get_queue(bdev); 124 struct request_queue *q = bdev_get_queue(bdev);
125 125
126 if (q) 126 if (q)
127 ret = &q->backing_dev_info; 127 ret = &q->backing_dev_info;
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
140 * cdb from the request data for instance. 140 * cdb from the request data for instance.
141 * 141 *
142 */ 142 */
143void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) 143void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
144{ 144{
145 q->prep_rq_fn = pfn; 145 q->prep_rq_fn = pfn;
146} 146}
@@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are 163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
164 * honored. 164 * honored.
165 */ 165 */
166void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) 166void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
167{ 167{
168 q->merge_bvec_fn = mbfn; 168 q->merge_bvec_fn = mbfn;
169} 169}
170 170
171EXPORT_SYMBOL(blk_queue_merge_bvec); 171EXPORT_SYMBOL(blk_queue_merge_bvec);
172 172
173void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) 173void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
174{ 174{
175 q->softirq_done_fn = fn; 175 q->softirq_done_fn = fn;
176} 176}
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
200 * blk_queue_bounce() to create a buffer in normal memory. 200 * blk_queue_bounce() to create a buffer in normal memory.
201 **/ 201 **/
202void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) 202void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
203{ 203{
204 /* 204 /*
205 * set defaults 205 * set defaults
@@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
235 235
236EXPORT_SYMBOL(blk_queue_make_request); 236EXPORT_SYMBOL(blk_queue_make_request);
237 237
238static void rq_init(request_queue_t *q, struct request *rq) 238static void rq_init(struct request_queue *q, struct request *rq)
239{ 239{
240 INIT_LIST_HEAD(&rq->queuelist); 240 INIT_LIST_HEAD(&rq->queuelist);
241 INIT_LIST_HEAD(&rq->donelist); 241 INIT_LIST_HEAD(&rq->donelist);
@@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
272 * feature should call this function and indicate so. 272 * feature should call this function and indicate so.
273 * 273 *
274 **/ 274 **/
275int blk_queue_ordered(request_queue_t *q, unsigned ordered, 275int blk_queue_ordered(struct request_queue *q, unsigned ordered,
276 prepare_flush_fn *prepare_flush_fn) 276 prepare_flush_fn *prepare_flush_fn)
277{ 277{
278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
311 * to the block layer by defining it through this call. 311 * to the block layer by defining it through this call.
312 * 312 *
313 **/ 313 **/
314void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) 314void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
315{ 315{
316 q->issue_flush_fn = iff; 316 q->issue_flush_fn = iff;
317} 317}
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
321/* 321/*
322 * Cache flushing for ordered writes handling 322 * Cache flushing for ordered writes handling
323 */ 323 */
324inline unsigned blk_ordered_cur_seq(request_queue_t *q) 324inline unsigned blk_ordered_cur_seq(struct request_queue *q)
325{ 325{
326 if (!q->ordseq) 326 if (!q->ordseq)
327 return 0; 327 return 0;
@@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
330 330
331unsigned blk_ordered_req_seq(struct request *rq) 331unsigned blk_ordered_req_seq(struct request *rq)
332{ 332{
333 request_queue_t *q = rq->q; 333 struct request_queue *q = rq->q;
334 334
335 BUG_ON(q->ordseq == 0); 335 BUG_ON(q->ordseq == 0);
336 336
@@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
357 return QUEUE_ORDSEQ_DONE; 357 return QUEUE_ORDSEQ_DONE;
358} 358}
359 359
360void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) 360void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
361{ 361{
362 struct request *rq; 362 struct request *rq;
363 int uptodate; 363 int uptodate;
@@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error)
401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); 401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
402} 402}
403 403
404static void queue_flush(request_queue_t *q, unsigned which) 404static void queue_flush(struct request_queue *q, unsigned which)
405{ 405{
406 struct request *rq; 406 struct request *rq;
407 rq_end_io_fn *end_io; 407 rq_end_io_fn *end_io;
@@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
426} 426}
427 427
428static inline struct request *start_ordered(request_queue_t *q, 428static inline struct request *start_ordered(struct request_queue *q,
429 struct request *rq) 429 struct request *rq)
430{ 430{
431 q->bi_size = 0; 431 q->bi_size = 0;
@@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q,
476 return rq; 476 return rq;
477} 477}
478 478
479int blk_do_ordered(request_queue_t *q, struct request **rqp) 479int blk_do_ordered(struct request_queue *q, struct request **rqp)
480{ 480{
481 struct request *rq = *rqp; 481 struct request *rq = *rqp;
482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
@@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
527 527
528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) 528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
529{ 529{
530 request_queue_t *q = bio->bi_private; 530 struct request_queue *q = bio->bi_private;
531 531
532 /* 532 /*
533 * This is dry run, restore bio_sector and size. We'll finish 533 * This is dry run, restore bio_sector and size. We'll finish
@@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
551static int ordered_bio_endio(struct request *rq, struct bio *bio, 551static int ordered_bio_endio(struct request *rq, struct bio *bio,
552 unsigned int nbytes, int error) 552 unsigned int nbytes, int error)
553{ 553{
554 request_queue_t *q = rq->q; 554 struct request_queue *q = rq->q;
555 bio_end_io_t *endio; 555 bio_end_io_t *endio;
556 void *private; 556 void *private;
557 557
@@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
589 * buffers for doing I/O to pages residing above @page. 589 * buffers for doing I/O to pages residing above @page.
590 **/ 590 **/
591void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) 591void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
592{ 592{
593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
594 int dma = 0; 594 int dma = 0;
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
624 * Enables a low level driver to set an upper limit on the size of 624 * Enables a low level driver to set an upper limit on the size of
625 * received requests. 625 * received requests.
626 **/ 626 **/
627void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) 627void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
628{ 628{
629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
651 * physical data segments in a request. This would be the largest sized 651 * physical data segments in a request. This would be the largest sized
652 * scatter list the driver could handle. 652 * scatter list the driver could handle.
653 **/ 653 **/
654void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) 654void blk_queue_max_phys_segments(struct request_queue *q,
655 unsigned short max_segments)
655{ 656{
656 if (!max_segments) { 657 if (!max_segments) {
657 max_segments = 1; 658 max_segments = 1;
@@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
674 * address/length pairs the host adapter can actually give as once 675 * address/length pairs the host adapter can actually give as once
675 * to the device. 676 * to the device.
676 **/ 677 **/
677void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) 678void blk_queue_max_hw_segments(struct request_queue *q,
679 unsigned short max_segments)
678{ 680{
679 if (!max_segments) { 681 if (!max_segments) {
680 max_segments = 1; 682 max_segments = 1;
@@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
695 * Enables a low level driver to set an upper limit on the size of a 697 * Enables a low level driver to set an upper limit on the size of a
696 * coalesced segment 698 * coalesced segment
697 **/ 699 **/
698void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) 700void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
699{ 701{
700 if (max_size < PAGE_CACHE_SIZE) { 702 if (max_size < PAGE_CACHE_SIZE) {
701 max_size = PAGE_CACHE_SIZE; 703 max_size = PAGE_CACHE_SIZE;
@@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
718 * even internal read-modify-write operations). Usually the default 720 * even internal read-modify-write operations). Usually the default
719 * of 512 covers most hardware. 721 * of 512 covers most hardware.
720 **/ 722 **/
721void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) 723void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
722{ 724{
723 q->hardsect_size = size; 725 q->hardsect_size = size;
724} 726}
@@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
735 * @t: the stacking driver (top) 737 * @t: the stacking driver (top)
736 * @b: the underlying device (bottom) 738 * @b: the underlying device (bottom)
737 **/ 739 **/
738void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) 740void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
739{ 741{
740 /* zero is "infinity" */ 742 /* zero is "infinity" */
741 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 743 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
@@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
756 * @q: the request queue for the device 758 * @q: the request queue for the device
757 * @mask: the memory boundary mask 759 * @mask: the memory boundary mask
758 **/ 760 **/
759void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) 761void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
760{ 762{
761 if (mask < PAGE_CACHE_SIZE - 1) { 763 if (mask < PAGE_CACHE_SIZE - 1) {
762 mask = PAGE_CACHE_SIZE - 1; 764 mask = PAGE_CACHE_SIZE - 1;
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
778 * this is used when buiding direct io requests for the queue. 780 * this is used when buiding direct io requests for the queue.
779 * 781 *
780 **/ 782 **/
781void blk_queue_dma_alignment(request_queue_t *q, int mask) 783void blk_queue_dma_alignment(struct request_queue *q, int mask)
782{ 784{
783 q->dma_alignment = mask; 785 q->dma_alignment = mask;
784} 786}
@@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
796 * 798 *
797 * no locks need be held. 799 * no locks need be held.
798 **/ 800 **/
799struct request *blk_queue_find_tag(request_queue_t *q, int tag) 801struct request *blk_queue_find_tag(struct request_queue *q, int tag)
800{ 802{
801 return blk_map_queue_find_tag(q->queue_tags, tag); 803 return blk_map_queue_find_tag(q->queue_tags, tag);
802} 804}
@@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
840 * blk_cleanup_queue() will take care of calling this function, if tagging 842 * blk_cleanup_queue() will take care of calling this function, if tagging
841 * has been used. So there's no need to call this directly. 843 * has been used. So there's no need to call this directly.
842 **/ 844 **/
843static void __blk_queue_free_tags(request_queue_t *q) 845static void __blk_queue_free_tags(struct request_queue *q)
844{ 846{
845 struct blk_queue_tag *bqt = q->queue_tags; 847 struct blk_queue_tag *bqt = q->queue_tags;
846 848
@@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags);
877 * This is used to disabled tagged queuing to a device, yet leave 879 * This is used to disabled tagged queuing to a device, yet leave
878 * queue in function. 880 * queue in function.
879 **/ 881 **/
880void blk_queue_free_tags(request_queue_t *q) 882void blk_queue_free_tags(struct request_queue *q)
881{ 883{
882 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 884 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
883} 885}
@@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q)
885EXPORT_SYMBOL(blk_queue_free_tags); 887EXPORT_SYMBOL(blk_queue_free_tags);
886 888
887static int 889static int
888init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) 890init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
889{ 891{
890 struct request **tag_index; 892 struct request **tag_index;
891 unsigned long *tag_map; 893 unsigned long *tag_map;
@@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags);
955 * @depth: the maximum queue depth supported 957 * @depth: the maximum queue depth supported
956 * @tags: the tag to use 958 * @tags: the tag to use
957 **/ 959 **/
958int blk_queue_init_tags(request_queue_t *q, int depth, 960int blk_queue_init_tags(struct request_queue *q, int depth,
959 struct blk_queue_tag *tags) 961 struct blk_queue_tag *tags)
960{ 962{
961 int rc; 963 int rc;
@@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags);
996 * Notes: 998 * Notes:
997 * Must be called with the queue lock held. 999 * Must be called with the queue lock held.
998 **/ 1000 **/
999int blk_queue_resize_tags(request_queue_t *q, int new_depth) 1001int blk_queue_resize_tags(struct request_queue *q, int new_depth)
1000{ 1002{
1001 struct blk_queue_tag *bqt = q->queue_tags; 1003 struct blk_queue_tag *bqt = q->queue_tags;
1002 struct request **tag_index; 1004 struct request **tag_index;
@@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
1059 * Notes: 1061 * Notes:
1060 * queue lock must be held. 1062 * queue lock must be held.
1061 **/ 1063 **/
1062void blk_queue_end_tag(request_queue_t *q, struct request *rq) 1064void blk_queue_end_tag(struct request_queue *q, struct request *rq)
1063{ 1065{
1064 struct blk_queue_tag *bqt = q->queue_tags; 1066 struct blk_queue_tag *bqt = q->queue_tags;
1065 int tag = rq->tag; 1067 int tag = rq->tag;
@@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
1111 * Notes: 1113 * Notes:
1112 * queue lock must be held. 1114 * queue lock must be held.
1113 **/ 1115 **/
1114int blk_queue_start_tag(request_queue_t *q, struct request *rq) 1116int blk_queue_start_tag(struct request_queue *q, struct request *rq)
1115{ 1117{
1116 struct blk_queue_tag *bqt = q->queue_tags; 1118 struct blk_queue_tag *bqt = q->queue_tags;
1117 int tag; 1119 int tag;
@@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag);
1158 * Notes: 1160 * Notes:
1159 * queue lock must be held. 1161 * queue lock must be held.
1160 **/ 1162 **/
1161void blk_queue_invalidate_tags(request_queue_t *q) 1163void blk_queue_invalidate_tags(struct request_queue *q)
1162{ 1164{
1163 struct blk_queue_tag *bqt = q->queue_tags; 1165 struct blk_queue_tag *bqt = q->queue_tags;
1164 struct list_head *tmp, *n; 1166 struct list_head *tmp, *n;
@@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
1205 1207
1206EXPORT_SYMBOL(blk_dump_rq_flags); 1208EXPORT_SYMBOL(blk_dump_rq_flags);
1207 1209
1208void blk_recount_segments(request_queue_t *q, struct bio *bio) 1210void blk_recount_segments(struct request_queue *q, struct bio *bio)
1209{ 1211{
1210 struct bio_vec *bv, *bvprv = NULL; 1212 struct bio_vec *bv, *bvprv = NULL;
1211 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; 1213 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
@@ -1267,7 +1269,7 @@ new_hw_segment:
1267} 1269}
1268EXPORT_SYMBOL(blk_recount_segments); 1270EXPORT_SYMBOL(blk_recount_segments);
1269 1271
1270static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1272static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
1271 struct bio *nxt) 1273 struct bio *nxt)
1272{ 1274{
1273 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) 1275 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1288 return 0; 1290 return 0;
1289} 1291}
1290 1292
1291static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, 1293static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
1292 struct bio *nxt) 1294 struct bio *nxt)
1293{ 1295{
1294 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 1296 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1308 * map a request to scatterlist, return number of sg entries setup. Caller 1310 * map a request to scatterlist, return number of sg entries setup. Caller
1309 * must make sure sg can hold rq->nr_phys_segments entries 1311 * must make sure sg can hold rq->nr_phys_segments entries
1310 */ 1312 */
1311int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) 1313int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1314 struct scatterlist *sg)
1312{ 1315{
1313 struct bio_vec *bvec, *bvprv; 1316 struct bio_vec *bvec, *bvprv;
1314 struct bio *bio; 1317 struct bio *bio;
@@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg);
1361 * specific ones if so desired 1364 * specific ones if so desired
1362 */ 1365 */
1363 1366
1364static inline int ll_new_mergeable(request_queue_t *q, 1367static inline int ll_new_mergeable(struct request_queue *q,
1365 struct request *req, 1368 struct request *req,
1366 struct bio *bio) 1369 struct bio *bio)
1367{ 1370{
@@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
1382 return 1; 1385 return 1;
1383} 1386}
1384 1387
1385static inline int ll_new_hw_segment(request_queue_t *q, 1388static inline int ll_new_hw_segment(struct request_queue *q,
1386 struct request *req, 1389 struct request *req,
1387 struct bio *bio) 1390 struct bio *bio)
1388{ 1391{
@@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1406 return 1; 1409 return 1;
1407} 1410}
1408 1411
1409int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) 1412int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
1410{ 1413{
1411 unsigned short max_sectors; 1414 unsigned short max_sectors;
1412 int len; 1415 int len;
@@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
1444} 1447}
1445EXPORT_SYMBOL(ll_back_merge_fn); 1448EXPORT_SYMBOL(ll_back_merge_fn);
1446 1449
1447static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1450static int ll_front_merge_fn(struct request_queue *q, struct request *req,
1448 struct bio *bio) 1451 struct bio *bio)
1449{ 1452{
1450 unsigned short max_sectors; 1453 unsigned short max_sectors;
@@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1483 return ll_new_hw_segment(q, req, bio); 1486 return ll_new_hw_segment(q, req, bio);
1484} 1487}
1485 1488
1486static int ll_merge_requests_fn(request_queue_t *q, struct request *req, 1489static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
1487 struct request *next) 1490 struct request *next)
1488{ 1491{
1489 int total_phys_segments; 1492 int total_phys_segments;
@@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
1539 * This is called with interrupts off and no requests on the queue and 1542 * This is called with interrupts off and no requests on the queue and
1540 * with the queue lock held. 1543 * with the queue lock held.
1541 */ 1544 */
1542void blk_plug_device(request_queue_t *q) 1545void blk_plug_device(struct request_queue *q)
1543{ 1546{
1544 WARN_ON(!irqs_disabled()); 1547 WARN_ON(!irqs_disabled());
1545 1548
@@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device);
1562 * remove the queue from the plugged list, if present. called with 1565 * remove the queue from the plugged list, if present. called with
1563 * queue lock held and interrupts disabled. 1566 * queue lock held and interrupts disabled.
1564 */ 1567 */
1565int blk_remove_plug(request_queue_t *q) 1568int blk_remove_plug(struct request_queue *q)
1566{ 1569{
1567 WARN_ON(!irqs_disabled()); 1570 WARN_ON(!irqs_disabled());
1568 1571
@@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug);
1578/* 1581/*
1579 * remove the plug and let it rip.. 1582 * remove the plug and let it rip..
1580 */ 1583 */
1581void __generic_unplug_device(request_queue_t *q) 1584void __generic_unplug_device(struct request_queue *q)
1582{ 1585{
1583 if (unlikely(blk_queue_stopped(q))) 1586 if (unlikely(blk_queue_stopped(q)))
1584 return; 1587 return;
@@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
1592 1595
1593/** 1596/**
1594 * generic_unplug_device - fire a request queue 1597 * generic_unplug_device - fire a request queue
1595 * @q: The &request_queue_t in question 1598 * @q: The &struct request_queue in question
1596 * 1599 *
1597 * Description: 1600 * Description:
1598 * Linux uses plugging to build bigger requests queues before letting 1601 * Linux uses plugging to build bigger requests queues before letting
@@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
1601 * gets unplugged, the request_fn defined for the queue is invoked and 1604 * gets unplugged, the request_fn defined for the queue is invoked and
1602 * transfers started. 1605 * transfers started.
1603 **/ 1606 **/
1604void generic_unplug_device(request_queue_t *q) 1607void generic_unplug_device(struct request_queue *q)
1605{ 1608{
1606 spin_lock_irq(q->queue_lock); 1609 spin_lock_irq(q->queue_lock);
1607 __generic_unplug_device(q); 1610 __generic_unplug_device(q);
@@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device);
1612static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 1615static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1613 struct page *page) 1616 struct page *page)
1614{ 1617{
1615 request_queue_t *q = bdi->unplug_io_data; 1618 struct request_queue *q = bdi->unplug_io_data;
1616 1619
1617 /* 1620 /*
1618 * devices don't necessarily have an ->unplug_fn defined 1621 * devices don't necessarily have an ->unplug_fn defined
@@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1627 1630
1628static void blk_unplug_work(struct work_struct *work) 1631static void blk_unplug_work(struct work_struct *work)
1629{ 1632{
1630 request_queue_t *q = container_of(work, request_queue_t, unplug_work); 1633 struct request_queue *q =
1634 container_of(work, struct request_queue, unplug_work);
1631 1635
1632 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1636 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1633 q->rq.count[READ] + q->rq.count[WRITE]); 1637 q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work)
1637 1641
1638static void blk_unplug_timeout(unsigned long data) 1642static void blk_unplug_timeout(unsigned long data)
1639{ 1643{
1640 request_queue_t *q = (request_queue_t *)data; 1644 struct request_queue *q = (struct request_queue *)data;
1641 1645
1642 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 1646 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
1643 q->rq.count[READ] + q->rq.count[WRITE]); 1647 q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data)
1647 1651
1648/** 1652/**
1649 * blk_start_queue - restart a previously stopped queue 1653 * blk_start_queue - restart a previously stopped queue
1650 * @q: The &request_queue_t in question 1654 * @q: The &struct request_queue in question
1651 * 1655 *
1652 * Description: 1656 * Description:
1653 * blk_start_queue() will clear the stop flag on the queue, and call 1657 * blk_start_queue() will clear the stop flag on the queue, and call
1654 * the request_fn for the queue if it was in a stopped state when 1658 * the request_fn for the queue if it was in a stopped state when
1655 * entered. Also see blk_stop_queue(). Queue lock must be held. 1659 * entered. Also see blk_stop_queue(). Queue lock must be held.
1656 **/ 1660 **/
1657void blk_start_queue(request_queue_t *q) 1661void blk_start_queue(struct request_queue *q)
1658{ 1662{
1659 WARN_ON(!irqs_disabled()); 1663 WARN_ON(!irqs_disabled());
1660 1664
@@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue);
1677 1681
1678/** 1682/**
1679 * blk_stop_queue - stop a queue 1683 * blk_stop_queue - stop a queue
1680 * @q: The &request_queue_t in question 1684 * @q: The &struct request_queue in question
1681 * 1685 *
1682 * Description: 1686 * Description:
1683 * The Linux block layer assumes that a block driver will consume all 1687 * The Linux block layer assumes that a block driver will consume all
@@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue);
1689 * the driver has signalled it's ready to go again. This happens by calling 1693 * the driver has signalled it's ready to go again. This happens by calling
1690 * blk_start_queue() to restart queue operations. Queue lock must be held. 1694 * blk_start_queue() to restart queue operations. Queue lock must be held.
1691 **/ 1695 **/
1692void blk_stop_queue(request_queue_t *q) 1696void blk_stop_queue(struct request_queue *q)
1693{ 1697{
1694 blk_remove_plug(q); 1698 blk_remove_plug(q);
1695 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 1699 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
@@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q)
1746EXPORT_SYMBOL(blk_run_queue); 1750EXPORT_SYMBOL(blk_run_queue);
1747 1751
1748/** 1752/**
1749 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed 1753 * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
1750 * @kobj: the kobj belonging of the request queue to be released 1754 * @kobj: the kobj belonging of the request queue to be released
1751 * 1755 *
1752 * Description: 1756 * Description:
@@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue);
1762 **/ 1766 **/
1763static void blk_release_queue(struct kobject *kobj) 1767static void blk_release_queue(struct kobject *kobj)
1764{ 1768{
1765 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 1769 struct request_queue *q =
1770 container_of(kobj, struct request_queue, kobj);
1766 struct request_list *rl = &q->rq; 1771 struct request_list *rl = &q->rq;
1767 1772
1768 blk_sync_queue(q); 1773 blk_sync_queue(q);
@@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj)
1778 kmem_cache_free(requestq_cachep, q); 1783 kmem_cache_free(requestq_cachep, q);
1779} 1784}
1780 1785
1781void blk_put_queue(request_queue_t *q) 1786void blk_put_queue(struct request_queue *q)
1782{ 1787{
1783 kobject_put(&q->kobj); 1788 kobject_put(&q->kobj);
1784} 1789}
1785EXPORT_SYMBOL(blk_put_queue); 1790EXPORT_SYMBOL(blk_put_queue);
1786 1791
1787void blk_cleanup_queue(request_queue_t * q) 1792void blk_cleanup_queue(struct request_queue * q)
1788{ 1793{
1789 mutex_lock(&q->sysfs_lock); 1794 mutex_lock(&q->sysfs_lock);
1790 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 1795 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q)
1798 1803
1799EXPORT_SYMBOL(blk_cleanup_queue); 1804EXPORT_SYMBOL(blk_cleanup_queue);
1800 1805
1801static int blk_init_free_list(request_queue_t *q) 1806static int blk_init_free_list(struct request_queue *q)
1802{ 1807{
1803 struct request_list *rl = &q->rq; 1808 struct request_list *rl = &q->rq;
1804 1809
@@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q)
1817 return 0; 1822 return 0;
1818} 1823}
1819 1824
1820request_queue_t *blk_alloc_queue(gfp_t gfp_mask) 1825struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
1821{ 1826{
1822 return blk_alloc_queue_node(gfp_mask, -1); 1827 return blk_alloc_queue_node(gfp_mask, -1);
1823} 1828}
@@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue);
1825 1830
1826static struct kobj_type queue_ktype; 1831static struct kobj_type queue_ktype;
1827 1832
1828request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1833struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1829{ 1834{
1830 request_queue_t *q; 1835 struct request_queue *q;
1831 1836
1832 q = kmem_cache_alloc_node(requestq_cachep, 1837 q = kmem_cache_alloc_node(requestq_cachep,
1833 gfp_mask | __GFP_ZERO, node_id); 1838 gfp_mask | __GFP_ZERO, node_id);
@@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
1882 * when the block device is deactivated (such as at module unload). 1887 * when the block device is deactivated (such as at module unload).
1883 **/ 1888 **/
1884 1889
1885request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1890struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1886{ 1891{
1887 return blk_init_queue_node(rfn, lock, -1); 1892 return blk_init_queue_node(rfn, lock, -1);
1888} 1893}
1889EXPORT_SYMBOL(blk_init_queue); 1894EXPORT_SYMBOL(blk_init_queue);
1890 1895
1891request_queue_t * 1896struct request_queue *
1892blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1897blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1893{ 1898{
1894 request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 1899 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1895 1900
1896 if (!q) 1901 if (!q)
1897 return NULL; 1902 return NULL;
@@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1940} 1945}
1941EXPORT_SYMBOL(blk_init_queue_node); 1946EXPORT_SYMBOL(blk_init_queue_node);
1942 1947
1943int blk_get_queue(request_queue_t *q) 1948int blk_get_queue(struct request_queue *q)
1944{ 1949{
1945 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 1950 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
1946 kobject_get(&q->kobj); 1951 kobject_get(&q->kobj);
@@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q)
1952 1957
1953EXPORT_SYMBOL(blk_get_queue); 1958EXPORT_SYMBOL(blk_get_queue);
1954 1959
1955static inline void blk_free_request(request_queue_t *q, struct request *rq) 1960static inline void blk_free_request(struct request_queue *q, struct request *rq)
1956{ 1961{
1957 if (rq->cmd_flags & REQ_ELVPRIV) 1962 if (rq->cmd_flags & REQ_ELVPRIV)
1958 elv_put_request(q, rq); 1963 elv_put_request(q, rq);
@@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
1960} 1965}
1961 1966
1962static struct request * 1967static struct request *
1963blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) 1968blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
1964{ 1969{
1965 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 1970 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1966 1971
@@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
1988 * ioc_batching returns true if the ioc is a valid batching request and 1993 * ioc_batching returns true if the ioc is a valid batching request and
1989 * should be given priority access to a request. 1994 * should be given priority access to a request.
1990 */ 1995 */
1991static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) 1996static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
1992{ 1997{
1993 if (!ioc) 1998 if (!ioc)
1994 return 0; 1999 return 0;
@@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
2009 * is the behaviour we want though - once it gets a wakeup it should be given 2014 * is the behaviour we want though - once it gets a wakeup it should be given
2010 * a nice run. 2015 * a nice run.
2011 */ 2016 */
2012static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 2017static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
2013{ 2018{
2014 if (!ioc || ioc_batching(q, ioc)) 2019 if (!ioc || ioc_batching(q, ioc))
2015 return; 2020 return;
@@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
2018 ioc->last_waited = jiffies; 2023 ioc->last_waited = jiffies;
2019} 2024}
2020 2025
2021static void __freed_request(request_queue_t *q, int rw) 2026static void __freed_request(struct request_queue *q, int rw)
2022{ 2027{
2023 struct request_list *rl = &q->rq; 2028 struct request_list *rl = &q->rq;
2024 2029
@@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw)
2037 * A request has just been released. Account for it, update the full and 2042 * A request has just been released. Account for it, update the full and
2038 * congestion status, wake up any waiters. Called under q->queue_lock. 2043 * congestion status, wake up any waiters. Called under q->queue_lock.
2039 */ 2044 */
2040static void freed_request(request_queue_t *q, int rw, int priv) 2045static void freed_request(struct request_queue *q, int rw, int priv)
2041{ 2046{
2042 struct request_list *rl = &q->rq; 2047 struct request_list *rl = &q->rq;
2043 2048
@@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv)
2057 * Returns NULL on failure, with queue_lock held. 2062 * Returns NULL on failure, with queue_lock held.
2058 * Returns !NULL on success, with queue_lock *not held*. 2063 * Returns !NULL on success, with queue_lock *not held*.
2059 */ 2064 */
2060static struct request *get_request(request_queue_t *q, int rw_flags, 2065static struct request *get_request(struct request_queue *q, int rw_flags,
2061 struct bio *bio, gfp_t gfp_mask) 2066 struct bio *bio, gfp_t gfp_mask)
2062{ 2067{
2063 struct request *rq = NULL; 2068 struct request *rq = NULL;
@@ -2162,7 +2167,7 @@ out:
2162 * 2167 *
2163 * Called with q->queue_lock held, and returns with it unlocked. 2168 * Called with q->queue_lock held, and returns with it unlocked.
2164 */ 2169 */
2165static struct request *get_request_wait(request_queue_t *q, int rw_flags, 2170static struct request *get_request_wait(struct request_queue *q, int rw_flags,
2166 struct bio *bio) 2171 struct bio *bio)
2167{ 2172{
2168 const int rw = rw_flags & 0x01; 2173 const int rw = rw_flags & 0x01;
@@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
2204 return rq; 2209 return rq;
2205} 2210}
2206 2211
2207struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) 2212struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
2208{ 2213{
2209 struct request *rq; 2214 struct request *rq;
2210 2215
@@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request);
2234 * 2239 *
2235 * The queue lock must be held with interrupts disabled. 2240 * The queue lock must be held with interrupts disabled.
2236 */ 2241 */
2237void blk_start_queueing(request_queue_t *q) 2242void blk_start_queueing(struct request_queue *q)
2238{ 2243{
2239 if (!blk_queue_plugged(q)) 2244 if (!blk_queue_plugged(q))
2240 q->request_fn(q); 2245 q->request_fn(q);
@@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing);
2253 * more, when that condition happens we need to put the request back 2258 * more, when that condition happens we need to put the request back
2254 * on the queue. Must be called with queue lock held. 2259 * on the queue. Must be called with queue lock held.
2255 */ 2260 */
2256void blk_requeue_request(request_queue_t *q, struct request *rq) 2261void blk_requeue_request(struct request_queue *q, struct request *rq)
2257{ 2262{
2258 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 2263 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
2259 2264
@@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request);
2284 * of the queue for things like a QUEUE_FULL message from a device, or a 2289 * of the queue for things like a QUEUE_FULL message from a device, or a
2285 * host that is unable to accept a particular command. 2290 * host that is unable to accept a particular command.
2286 */ 2291 */
2287void blk_insert_request(request_queue_t *q, struct request *rq, 2292void blk_insert_request(struct request_queue *q, struct request *rq,
2288 int at_head, void *data) 2293 int at_head, void *data)
2289{ 2294{
2290 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2295 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
2330 return ret; 2335 return ret;
2331} 2336}
2332 2337
2333static int __blk_rq_map_user(request_queue_t *q, struct request *rq, 2338static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
2334 void __user *ubuf, unsigned int len) 2339 void __user *ubuf, unsigned int len)
2335{ 2340{
2336 unsigned long uaddr; 2341 unsigned long uaddr;
@@ -2403,8 +2408,8 @@ unmap_bio:
2403 * original bio must be passed back in to blk_rq_unmap_user() for proper 2408 * original bio must be passed back in to blk_rq_unmap_user() for proper
2404 * unmapping. 2409 * unmapping.
2405 */ 2410 */
2406int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, 2411int blk_rq_map_user(struct request_queue *q, struct request *rq,
2407 unsigned long len) 2412 void __user *ubuf, unsigned long len)
2408{ 2413{
2409 unsigned long bytes_read = 0; 2414 unsigned long bytes_read = 0;
2410 struct bio *bio = NULL; 2415 struct bio *bio = NULL;
@@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
2470 * original bio must be passed back in to blk_rq_unmap_user() for proper 2475 * original bio must be passed back in to blk_rq_unmap_user() for proper
2471 * unmapping. 2476 * unmapping.
2472 */ 2477 */
2473int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, 2478int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
2474 struct sg_iovec *iov, int iov_count, unsigned int len) 2479 struct sg_iovec *iov, int iov_count, unsigned int len)
2475{ 2480{
2476 struct bio *bio; 2481 struct bio *bio;
@@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
2540 * @len: length of user data 2545 * @len: length of user data
2541 * @gfp_mask: memory allocation flags 2546 * @gfp_mask: memory allocation flags
2542 */ 2547 */
2543int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, 2548int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
2544 unsigned int len, gfp_t gfp_mask) 2549 unsigned int len, gfp_t gfp_mask)
2545{ 2550{
2546 struct bio *bio; 2551 struct bio *bio;
@@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
2577 * Insert a fully prepared request at the back of the io scheduler queue 2582 * Insert a fully prepared request at the back of the io scheduler queue
2578 * for execution. Don't wait for completion. 2583 * for execution. Don't wait for completion.
2579 */ 2584 */
2580void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2585void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
2581 struct request *rq, int at_head, 2586 struct request *rq, int at_head,
2582 rq_end_io_fn *done) 2587 rq_end_io_fn *done)
2583{ 2588{
@@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
2605 * Insert a fully prepared request at the back of the io scheduler queue 2610 * Insert a fully prepared request at the back of the io scheduler queue
2606 * for execution and wait for completion. 2611 * for execution and wait for completion.
2607 */ 2612 */
2608int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2613int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
2609 struct request *rq, int at_head) 2614 struct request *rq, int at_head)
2610{ 2615{
2611 DECLARE_COMPLETION_ONSTACK(wait); 2616 DECLARE_COMPLETION_ONSTACK(wait);
@@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq);
2648 */ 2653 */
2649int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2654int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2650{ 2655{
2651 request_queue_t *q; 2656 struct request_queue *q;
2652 2657
2653 if (bdev->bd_disk == NULL) 2658 if (bdev->bd_disk == NULL)
2654 return -ENXIO; 2659 return -ENXIO;
@@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2684 * queue lock is held and interrupts disabled, as we muck with the 2689 * queue lock is held and interrupts disabled, as we muck with the
2685 * request queue list. 2690 * request queue list.
2686 */ 2691 */
2687static inline void add_request(request_queue_t * q, struct request * req) 2692static inline void add_request(struct request_queue * q, struct request * req)
2688{ 2693{
2689 drive_stat_acct(req, req->nr_sectors, 1); 2694 drive_stat_acct(req, req->nr_sectors, 1);
2690 2695
@@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats);
2730/* 2735/*
2731 * queue lock must be held 2736 * queue lock must be held
2732 */ 2737 */
2733void __blk_put_request(request_queue_t *q, struct request *req) 2738void __blk_put_request(struct request_queue *q, struct request *req)
2734{ 2739{
2735 if (unlikely(!q)) 2740 if (unlikely(!q))
2736 return; 2741 return;
@@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
2760void blk_put_request(struct request *req) 2765void blk_put_request(struct request *req)
2761{ 2766{
2762 unsigned long flags; 2767 unsigned long flags;
2763 request_queue_t *q = req->q; 2768 struct request_queue *q = req->q;
2764 2769
2765 /* 2770 /*
2766 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the 2771 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
@@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq);
2798/* 2803/*
2799 * Has to be called with the request spinlock acquired 2804 * Has to be called with the request spinlock acquired
2800 */ 2805 */
2801static int attempt_merge(request_queue_t *q, struct request *req, 2806static int attempt_merge(struct request_queue *q, struct request *req,
2802 struct request *next) 2807 struct request *next)
2803{ 2808{
2804 if (!rq_mergeable(req) || !rq_mergeable(next)) 2809 if (!rq_mergeable(req) || !rq_mergeable(next))
@@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2851 return 1; 2856 return 1;
2852} 2857}
2853 2858
2854static inline int attempt_back_merge(request_queue_t *q, struct request *rq) 2859static inline int attempt_back_merge(struct request_queue *q,
2860 struct request *rq)
2855{ 2861{
2856 struct request *next = elv_latter_request(q, rq); 2862 struct request *next = elv_latter_request(q, rq);
2857 2863
@@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2861 return 0; 2867 return 0;
2862} 2868}
2863 2869
2864static inline int attempt_front_merge(request_queue_t *q, struct request *rq) 2870static inline int attempt_front_merge(struct request_queue *q,
2871 struct request *rq)
2865{ 2872{
2866 struct request *prev = elv_former_request(q, rq); 2873 struct request *prev = elv_former_request(q, rq);
2867 2874
@@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
2905 req->start_time = jiffies; 2912 req->start_time = jiffies;
2906} 2913}
2907 2914
2908static int __make_request(request_queue_t *q, struct bio *bio) 2915static int __make_request(struct request_queue *q, struct bio *bio)
2909{ 2916{
2910 struct request *req; 2917 struct request *req;
2911 int el_ret, nr_sectors, barrier, err; 2918 int el_ret, nr_sectors, barrier, err;
@@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio)
3119 */ 3126 */
3120static inline void __generic_make_request(struct bio *bio) 3127static inline void __generic_make_request(struct bio *bio)
3121{ 3128{
3122 request_queue_t *q; 3129 struct request_queue *q;
3123 sector_t maxsector; 3130 sector_t maxsector;
3124 sector_t old_sector; 3131 sector_t old_sector;
3125 int ret, nr_sectors = bio_sectors(bio); 3132 int ret, nr_sectors = bio_sectors(bio);
@@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq)
3312 struct bio *bio, *prevbio = NULL; 3319 struct bio *bio, *prevbio = NULL;
3313 int nr_phys_segs, nr_hw_segs; 3320 int nr_phys_segs, nr_hw_segs;
3314 unsigned int phys_size, hw_size; 3321 unsigned int phys_size, hw_size;
3315 request_queue_t *q = rq->q; 3322 struct request_queue *q = rq->q;
3316 3323
3317 if (!rq->bio) 3324 if (!rq->bio)
3318 return; 3325 return;
@@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate)
3658 3665
3659EXPORT_SYMBOL(end_request); 3666EXPORT_SYMBOL(end_request);
3660 3667
3661void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3668void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3669 struct bio *bio)
3662{ 3670{
3663 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 3671 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3664 rq->cmd_flags |= (bio->bi_rw & 3); 3672 rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3701,7 +3709,7 @@ int __init blk_dev_init(void)
3701 sizeof(struct request), 0, SLAB_PANIC, NULL); 3709 sizeof(struct request), 0, SLAB_PANIC, NULL);
3702 3710
3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3711 requestq_cachep = kmem_cache_create("blkdev_queue",
3704 sizeof(request_queue_t), 0, SLAB_PANIC, NULL); 3712 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3705 3713
3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3714 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3707 sizeof(struct io_context), 0, SLAB_PANIC, NULL); 3715 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
@@ -4021,7 +4029,8 @@ static ssize_t
4021queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4029queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4022{ 4030{
4023 struct queue_sysfs_entry *entry = to_queue(attr); 4031 struct queue_sysfs_entry *entry = to_queue(attr);
4024 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4032 struct request_queue *q =
4033 container_of(kobj, struct request_queue, kobj);
4025 ssize_t res; 4034 ssize_t res;
4026 4035
4027 if (!entry->show) 4036 if (!entry->show)
@@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
4041 const char *page, size_t length) 4050 const char *page, size_t length)
4042{ 4051{
4043 struct queue_sysfs_entry *entry = to_queue(attr); 4052 struct queue_sysfs_entry *entry = to_queue(attr);
4044 request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4053 struct request_queue *q = container_of(kobj, struct request_queue, kobj);
4045 4054
4046 ssize_t res; 4055 ssize_t res;
4047 4056
@@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk)
4072{ 4081{
4073 int ret; 4082 int ret;
4074 4083
4075 request_queue_t *q = disk->queue; 4084 struct request_queue *q = disk->queue;
4076 4085
4077 if (!q || !q->request_fn) 4086 if (!q || !q->request_fn)
4078 return -ENXIO; 4087 return -ENXIO;
@@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk)
4097 4106
4098void blk_unregister_queue(struct gendisk *disk) 4107void blk_unregister_queue(struct gendisk *disk)
4099{ 4108{
4100 request_queue_t *q = disk->queue; 4109 struct request_queue *q = disk->queue;
4101 4110
4102 if (q && q->request_fn) { 4111 if (q && q->request_fn) {
4103 elv_unregister_queue(q); 4112 elv_unregister_queue(q);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 1c3de2b9a6b..7563d8aa394 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -11,13 +11,13 @@ struct noop_data {
11 struct list_head queue; 11 struct list_head queue;
12}; 12};
13 13
14static void noop_merged_requests(request_queue_t *q, struct request *rq, 14static void noop_merged_requests(struct request_queue *q, struct request *rq,
15 struct request *next) 15 struct request *next)
16{ 16{
17 list_del_init(&next->queuelist); 17 list_del_init(&next->queuelist);
18} 18}
19 19
20static int noop_dispatch(request_queue_t *q, int force) 20static int noop_dispatch(struct request_queue *q, int force)
21{ 21{
22 struct noop_data *nd = q->elevator->elevator_data; 22 struct noop_data *nd = q->elevator->elevator_data;
23 23
@@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force)
31 return 0; 31 return 0;
32} 32}
33 33
34static void noop_add_request(request_queue_t *q, struct request *rq) 34static void noop_add_request(struct request_queue *q, struct request *rq)
35{ 35{
36 struct noop_data *nd = q->elevator->elevator_data; 36 struct noop_data *nd = q->elevator->elevator_data;
37 37
38 list_add_tail(&rq->queuelist, &nd->queue); 38 list_add_tail(&rq->queuelist, &nd->queue);
39} 39}
40 40
41static int noop_queue_empty(request_queue_t *q) 41static int noop_queue_empty(struct request_queue *q)
42{ 42{
43 struct noop_data *nd = q->elevator->elevator_data; 43 struct noop_data *nd = q->elevator->elevator_data;
44 44
@@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q)
46} 46}
47 47
48static struct request * 48static struct request *
49noop_former_request(request_queue_t *q, struct request *rq) 49noop_former_request(struct request_queue *q, struct request *rq)
50{ 50{
51 struct noop_data *nd = q->elevator->elevator_data; 51 struct noop_data *nd = q->elevator->elevator_data;
52 52
@@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq)
56} 56}
57 57
58static struct request * 58static struct request *
59noop_latter_request(request_queue_t *q, struct request *rq) 59noop_latter_request(struct request_queue *q, struct request *rq)
60{ 60{
61 struct noop_data *nd = q->elevator->elevator_data; 61 struct noop_data *nd = q->elevator->elevator_data;
62 62
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
65 return list_entry(rq->queuelist.next, struct request, queuelist); 65 return list_entry(rq->queuelist.next, struct request, queuelist);
66} 66}
67 67
68static void *noop_init_queue(request_queue_t *q) 68static void *noop_init_queue(struct request_queue *q)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index d359a715bbc..91c73224f4c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -49,22 +49,22 @@ static int sg_get_version(int __user *p)
49 return put_user(sg_version_num, p); 49 return put_user(sg_version_num, p);
50} 50}
51 51
52static int scsi_get_idlun(request_queue_t *q, int __user *p) 52static int scsi_get_idlun(struct request_queue *q, int __user *p)
53{ 53{
54 return put_user(0, p); 54 return put_user(0, p);
55} 55}
56 56
57static int scsi_get_bus(request_queue_t *q, int __user *p) 57static int scsi_get_bus(struct request_queue *q, int __user *p)
58{ 58{
59 return put_user(0, p); 59 return put_user(0, p);
60} 60}
61 61
62static int sg_get_timeout(request_queue_t *q) 62static int sg_get_timeout(struct request_queue *q)
63{ 63{
64 return q->sg_timeout / (HZ / USER_HZ); 64 return q->sg_timeout / (HZ / USER_HZ);
65} 65}
66 66
67static int sg_set_timeout(request_queue_t *q, int __user *p) 67static int sg_set_timeout(struct request_queue *q, int __user *p)
68{ 68{
69 int timeout, err = get_user(timeout, p); 69 int timeout, err = get_user(timeout, p);
70 70
@@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
74 return err; 74 return err;
75} 75}
76 76
77static int sg_get_reserved_size(request_queue_t *q, int __user *p) 77static int sg_get_reserved_size(struct request_queue *q, int __user *p)
78{ 78{
79 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 79 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
80 80
81 return put_user(val, p); 81 return put_user(val, p);
82} 82}
83 83
84static int sg_set_reserved_size(request_queue_t *q, int __user *p) 84static int sg_set_reserved_size(struct request_queue *q, int __user *p)
85{ 85{
86 int size, err = get_user(size, p); 86 int size, err = get_user(size, p);
87 87
@@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p)
101 * will always return that we are ATAPI even for a real SCSI drive, I'm not 101 * will always return that we are ATAPI even for a real SCSI drive, I'm not
102 * so sure this is worth doing anything about (why would you care??) 102 * so sure this is worth doing anything about (why would you care??)
103 */ 103 */
104static int sg_emulated_host(request_queue_t *q, int __user *p) 104static int sg_emulated_host(struct request_queue *q, int __user *p)
105{ 105{
106 return put_user(1, p); 106 return put_user(1, p);
107} 107}
@@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm)
214} 214}
215EXPORT_SYMBOL_GPL(blk_verify_command); 215EXPORT_SYMBOL_GPL(blk_verify_command);
216 216
217static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq, 217static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
218 struct sg_io_hdr *hdr, int has_write_perm) 218 struct sg_io_hdr *hdr, int has_write_perm)
219{ 219{
220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
286 return r; 286 return r;
287} 287}
288 288
289static int sg_io(struct file *file, request_queue_t *q, 289static int sg_io(struct file *file, struct request_queue *q,
290 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 290 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
291{ 291{
292 unsigned long start_time; 292 unsigned long start_time;
@@ -519,7 +519,8 @@ error:
519EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 519EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
520 520
521/* Send basic block requests */ 521/* Send basic block requests */
522static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) 522static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
523 int cmd, int data)
523{ 524{
524 struct request *rq; 525 struct request *rq;
525 int err; 526 int err;
@@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
539 return err; 540 return err;
540} 541}
541 542
542static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data) 543static inline int blk_send_start_stop(struct request_queue *q,
544 struct gendisk *bd_disk, int data)
543{ 545{
544 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); 546 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
545} 547}
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 423ed08fb6f..d7e18ce8dad 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -372,7 +372,7 @@ static int fd_test_drive_present(int drive);
372static void config_types(void); 372static void config_types(void);
373static int floppy_open(struct inode *inode, struct file *filp); 373static int floppy_open(struct inode *inode, struct file *filp);
374static int floppy_release(struct inode *inode, struct file *filp); 374static int floppy_release(struct inode *inode, struct file *filp);
375static void do_fd_request(request_queue_t *); 375static void do_fd_request(struct request_queue *);
376 376
377/************************* End of Prototypes **************************/ 377/************************* End of Prototypes **************************/
378 378
@@ -1271,7 +1271,7 @@ static void fd1772_checkint(void)
1271 } 1271 }
1272} 1272}
1273 1273
1274static void do_fd_request(request_queue_t* q) 1274static void do_fd_request(struct request_queue* q)
1275{ 1275{
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c
index d85520f78e6..74058db674d 100644
--- a/drivers/acorn/block/mfmhd.c
+++ b/drivers/acorn/block/mfmhd.c
@@ -924,7 +924,7 @@ static void mfm_request(void)
924 DBG("mfm_request: Dropping out bottom\n"); 924 DBG("mfm_request: Dropping out bottom\n");
925} 925}
926 926
927static void do_mfm_request(request_queue_t *q) 927static void do_mfm_request(struct request_queue *q)
928{ 928{
929 DBG("do_mfm_request: about to mfm_request\n"); 929 DBG("do_mfm_request: about to mfm_request\n");
930 mfm_request(); 930 mfm_request();
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 12ac0b511f7..e83647651b3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -768,7 +768,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
768 * Decrement max hw segments accordingly. 768 * Decrement max hw segments accordingly.
769 */ 769 */
770 if (dev->class == ATA_DEV_ATAPI) { 770 if (dev->class == ATA_DEV_ATAPI) {
771 request_queue_t *q = sdev->request_queue; 771 struct request_queue *q = sdev->request_queue;
772 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 772 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
773 } 773 }
774 774
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6ce8b897e26..c9751b2b57e 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1422,7 +1422,7 @@ static void redo_fd_request(void)
1422 goto repeat; 1422 goto repeat;
1423} 1423}
1424 1424
1425static void do_fd_request(request_queue_t * q) 1425static void do_fd_request(struct request_queue * q)
1426{ 1426{
1427 redo_fd_request(); 1427 redo_fd_request();
1428} 1428}
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 1d846681794..ba07f762c4c 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -138,7 +138,7 @@ struct aoedev {
138 u16 maxbcnt; 138 u16 maxbcnt;
139 struct work_struct work;/* disk create work struct */ 139 struct work_struct work;/* disk create work struct */
140 struct gendisk *gd; 140 struct gendisk *gd;
141 request_queue_t blkq; 141 struct request_queue blkq;
142 struct hd_geometry geo; 142 struct hd_geometry geo;
143 sector_t ssize; 143 sector_t ssize;
144 struct timer_list timer; 144 struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 4f598270fa3..007faaf008e 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -125,7 +125,7 @@ aoeblk_release(struct inode *inode, struct file *filp)
125} 125}
126 126
127static int 127static int
128aoeblk_make_request(request_queue_t *q, struct bio *bio) 128aoeblk_make_request(struct request_queue *q, struct bio *bio)
129{ 129{
130 struct aoedev *d; 130 struct aoedev *d;
131 struct buf *buf; 131 struct buf *buf;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 14d6b949275..94268c75d04 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1466,7 +1466,7 @@ repeat:
1466} 1466}
1467 1467
1468 1468
1469void do_fd_request(request_queue_t * q) 1469void do_fd_request(struct request_queue * q)
1470{ 1470{
1471 unsigned long flags; 1471 unsigned long flags;
1472 1472
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a2d6612b80d..1be82d544dc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -139,7 +139,7 @@ static struct board_type products[] = {
139 139
140static ctlr_info_t *hba[MAX_CTLR]; 140static ctlr_info_t *hba[MAX_CTLR];
141 141
142static void do_cciss_request(request_queue_t *q); 142static void do_cciss_request(struct request_queue *q);
143static irqreturn_t do_cciss_intr(int irq, void *dev_id); 143static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144static int cciss_open(struct inode *inode, struct file *filep); 144static int cciss_open(struct inode *inode, struct file *filep);
145static int cciss_release(struct inode *inode, struct file *filep); 145static int cciss_release(struct inode *inode, struct file *filep);
@@ -1584,7 +1584,7 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1584 */ 1584 */
1585 if (h->gendisk[0] != disk) { 1585 if (h->gendisk[0] != disk) {
1586 if (disk) { 1586 if (disk) {
1587 request_queue_t *q = disk->queue; 1587 struct request_queue *q = disk->queue;
1588 if (disk->flags & GENHD_FL_UP) 1588 if (disk->flags & GENHD_FL_UP)
1589 del_gendisk(disk); 1589 del_gendisk(disk);
1590 if (q) { 1590 if (q) {
@@ -2511,7 +2511,7 @@ after_error_processing:
2511/* 2511/*
2512 * Get a request and submit it to the controller. 2512 * Get a request and submit it to the controller.
2513 */ 2513 */
2514static void do_cciss_request(request_queue_t *q) 2514static void do_cciss_request(struct request_queue *q)
2515{ 2515{
2516 ctlr_info_t *h = q->queuedata; 2516 ctlr_info_t *h = q->queuedata;
2517 CommandList_struct *c; 2517 CommandList_struct *c;
@@ -3380,7 +3380,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3380 do { 3380 do {
3381 drive_info_struct *drv = &(hba[i]->drv[j]); 3381 drive_info_struct *drv = &(hba[i]->drv[j]);
3382 struct gendisk *disk = hba[i]->gendisk[j]; 3382 struct gendisk *disk = hba[i]->gendisk[j];
3383 request_queue_t *q; 3383 struct request_queue *q;
3384 3384
3385 /* Check if the disk was allocated already */ 3385 /* Check if the disk was allocated already */
3386 if (!disk){ 3386 if (!disk){
@@ -3523,7 +3523,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3523 for (j = 0; j < CISS_MAX_LUN; j++) { 3523 for (j = 0; j < CISS_MAX_LUN; j++) {
3524 struct gendisk *disk = hba[i]->gendisk[j]; 3524 struct gendisk *disk = hba[i]->gendisk[j];
3525 if (disk) { 3525 if (disk) {
3526 request_queue_t *q = disk->queue; 3526 struct request_queue *q = disk->queue;
3527 3527
3528 if (disk->flags & GENHD_FL_UP) 3528 if (disk->flags & GENHD_FL_UP)
3529 del_gendisk(disk); 3529 del_gendisk(disk);
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b94cd1c3213..be4e3477d83 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -161,7 +161,7 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
161static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); 161static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
162static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); 162static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
163 163
164static void do_ida_request(request_queue_t *q); 164static void do_ida_request(struct request_queue *q);
165static void start_io(ctlr_info_t *h); 165static void start_io(ctlr_info_t *h);
166 166
167static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); 167static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
@@ -391,7 +391,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
391/* pdev is NULL for eisa */ 391/* pdev is NULL for eisa */
392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
393{ 393{
394 request_queue_t *q; 394 struct request_queue *q;
395 int j; 395 int j;
396 396
397 /* 397 /*
@@ -886,7 +886,7 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
886 * are in here (either via the dummy do_ida_request functions or by being 886 * are in here (either via the dummy do_ida_request functions or by being
887 * called from the interrupt handler 887 * called from the interrupt handler
888 */ 888 */
889static void do_ida_request(request_queue_t *q) 889static void do_ida_request(struct request_queue *q)
890{ 890{
891 ctlr_info_t *h = q->queuedata; 891 ctlr_info_t *h = q->queuedata;
892 cmdlist_t *c; 892 cmdlist_t *c;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fe088045dd0..085b7794fb3 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -251,7 +251,7 @@ static int irqdma_allocated;
251 251
252static struct request *current_req; 252static struct request *current_req;
253static struct request_queue *floppy_queue; 253static struct request_queue *floppy_queue;
254static void do_fd_request(request_queue_t * q); 254static void do_fd_request(struct request_queue * q);
255 255
256#ifndef fd_get_dma_residue 256#ifndef fd_get_dma_residue
257#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) 257#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
@@ -2981,7 +2981,7 @@ static void process_fd_request(void)
2981 schedule_bh(redo_fd_request); 2981 schedule_bh(redo_fd_request);
2982} 2982}
2983 2983
2984static void do_fd_request(request_queue_t * q) 2984static void do_fd_request(struct request_queue * q)
2985{ 2985{
2986 if (max_buffer_sectors == 0) { 2986 if (max_buffer_sectors == 0) {
2987 printk("VFS: do_fd_request called on non-open device\n"); 2987 printk("VFS: do_fd_request called on non-open device\n");
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
index 1634c2dd25e..5b79d072417 100644
--- a/drivers/block/lguest_blk.c
+++ b/drivers/block/lguest_blk.c
@@ -137,7 +137,7 @@ static void do_read(struct blockdev *bd, struct request *req)
137 lguest_send_dma(bd->phys_addr, &ping); 137 lguest_send_dma(bd->phys_addr, &ping);
138} 138}
139 139
140static void do_lgb_request(request_queue_t *q) 140static void do_lgb_request(struct request_queue *q)
141{ 141{
142 struct blockdev *bd; 142 struct blockdev *bd;
143 struct request *req; 143 struct request *req;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e425daa1eac..9f015fce413 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -529,7 +529,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
529 return bio; 529 return bio;
530} 530}
531 531
532static int loop_make_request(request_queue_t *q, struct bio *old_bio) 532static int loop_make_request(struct request_queue *q, struct bio *old_bio)
533{ 533{
534 struct loop_device *lo = q->queuedata; 534 struct loop_device *lo = q->queuedata;
535 int rw = bio_rw(old_bio); 535 int rw = bio_rw(old_bio);
@@ -558,7 +558,7 @@ out:
558/* 558/*
559 * kick off io on the underlying address space 559 * kick off io on the underlying address space
560 */ 560 */
561static void loop_unplug(request_queue_t *q) 561static void loop_unplug(struct request_queue *q)
562{ 562{
563 struct loop_device *lo = q->queuedata; 563 struct loop_device *lo = q->queuedata;
564 564
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c1295102409..be92c658f06 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -100,7 +100,7 @@ static const char *nbdcmd_to_ascii(int cmd)
100static void nbd_end_request(struct request *req) 100static void nbd_end_request(struct request *req)
101{ 101{
102 int uptodate = (req->errors == 0) ? 1 : 0; 102 int uptodate = (req->errors == 0) ? 1 : 0;
103 request_queue_t *q = req->q; 103 struct request_queue *q = req->q;
104 unsigned long flags; 104 unsigned long flags;
105 105
106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
@@ -410,7 +410,7 @@ static void nbd_clear_que(struct nbd_device *lo)
410 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 410 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
411 */ 411 */
412 412
413static void do_nbd_request(request_queue_t * q) 413static void do_nbd_request(struct request_queue * q)
414{ 414{
415 struct request *req; 415 struct request *req;
416 416
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 1eeb8f2cde7..b8a994a2b01 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,7 +183,7 @@ static int pcd_packet(struct cdrom_device_info *cdi,
183static int pcd_detect(void); 183static int pcd_detect(void);
184static void pcd_probe_capabilities(void); 184static void pcd_probe_capabilities(void);
185static void do_pcd_read_drq(void); 185static void do_pcd_read_drq(void);
186static void do_pcd_request(request_queue_t * q); 186static void do_pcd_request(struct request_queue * q);
187static void do_pcd_read(void); 187static void do_pcd_read(void);
188 188
189struct pcd_unit { 189struct pcd_unit {
@@ -713,7 +713,7 @@ static int pcd_detect(void)
713/* I/O request processing */ 713/* I/O request processing */
714static struct request_queue *pcd_queue; 714static struct request_queue *pcd_queue;
715 715
716static void do_pcd_request(request_queue_t * q) 716static void do_pcd_request(struct request_queue * q)
717{ 717{
718 if (pcd_busy) 718 if (pcd_busy)
719 return; 719 return;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 31e01488eb5..df819f8a95a 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -698,7 +698,7 @@ static enum action pd_identify(struct pd_unit *disk)
698 698
699/* end of io request engine */ 699/* end of io request engine */
700 700
701static void do_pd_request(request_queue_t * q) 701static void do_pd_request(struct request_queue * q)
702{ 702{
703 if (pd_req) 703 if (pd_req)
704 return; 704 return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 5826508f673..ceffa6034e2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -202,7 +202,7 @@ module_param_array(drive3, int, NULL, 0);
202#define ATAPI_WRITE_10 0x2a 202#define ATAPI_WRITE_10 0x2a
203 203
204static int pf_open(struct inode *inode, struct file *file); 204static int pf_open(struct inode *inode, struct file *file);
205static void do_pf_request(request_queue_t * q); 205static void do_pf_request(struct request_queue * q);
206static int pf_ioctl(struct inode *inode, struct file *file, 206static int pf_ioctl(struct inode *inode, struct file *file,
207 unsigned int cmd, unsigned long arg); 207 unsigned int cmd, unsigned long arg);
208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); 208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -760,7 +760,7 @@ static void pf_end_request(int uptodate)
760 } 760 }
761} 761}
762 762
763static void do_pf_request(request_queue_t * q) 763static void do_pf_request(struct request_queue * q)
764{ 764{
765 if (pf_busy) 765 if (pf_busy)
766 return; 766 return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 31be33e4f11..fadbfd880ba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -752,7 +752,7 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio
752 */ 752 */
753static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 753static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
754{ 754{
755 request_queue_t *q = bdev_get_queue(pd->bdev); 755 struct request_queue *q = bdev_get_queue(pd->bdev);
756 struct request *rq; 756 struct request *rq;
757 int ret = 0; 757 int ret = 0;
758 758
@@ -979,7 +979,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
979 * Special care is needed if the underlying block device has a small 979 * Special care is needed if the underlying block device has a small
980 * max_phys_segments value. 980 * max_phys_segments value.
981 */ 981 */
982static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q) 982static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
983{ 983{
984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
985 /* 985 /*
@@ -2314,7 +2314,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
2314{ 2314{
2315 int ret; 2315 int ret;
2316 long lba; 2316 long lba;
2317 request_queue_t *q; 2317 struct request_queue *q;
2318 2318
2319 /* 2319 /*
2320 * We need to re-open the cdrom device without O_NONBLOCK to be able 2320 * We need to re-open the cdrom device without O_NONBLOCK to be able
@@ -2477,7 +2477,7 @@ static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int
2477 return 0; 2477 return 0;
2478} 2478}
2479 2479
2480static int pkt_make_request(request_queue_t *q, struct bio *bio) 2480static int pkt_make_request(struct request_queue *q, struct bio *bio)
2481{ 2481{
2482 struct pktcdvd_device *pd; 2482 struct pktcdvd_device *pd;
2483 char b[BDEVNAME_SIZE]; 2483 char b[BDEVNAME_SIZE];
@@ -2626,7 +2626,7 @@ end_io:
2626 2626
2627 2627
2628 2628
2629static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec) 2629static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
2630{ 2630{
2631 struct pktcdvd_device *pd = q->queuedata; 2631 struct pktcdvd_device *pd = q->queuedata;
2632 sector_t zone = ZONE(bio->bi_sector, pd); 2632 sector_t zone = ZONE(bio->bi_sector, pd);
@@ -2647,7 +2647,7 @@ static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *b
2647 2647
2648static void pkt_init_queue(struct pktcdvd_device *pd) 2648static void pkt_init_queue(struct pktcdvd_device *pd)
2649{ 2649{
2650 request_queue_t *q = pd->disk->queue; 2650 struct request_queue *q = pd->disk->queue;
2651 2651
2652 blk_queue_make_request(q, pkt_make_request); 2652 blk_queue_make_request(q, pkt_make_request);
2653 blk_queue_hardsect_size(q, CD_FRAMESIZE); 2653 blk_queue_hardsect_size(q, CD_FRAMESIZE);
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 688a4fb0dc9..3c796e23625 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -64,7 +64,7 @@ static void reset_ctrl(void);
64 64
65static int ps2esdi_geninit(void); 65static int ps2esdi_geninit(void);
66 66
67static void do_ps2esdi_request(request_queue_t * q); 67static void do_ps2esdi_request(struct request_queue * q);
68 68
69static void ps2esdi_readwrite(int cmd, struct request *req); 69static void ps2esdi_readwrite(int cmd, struct request *req);
70 70
@@ -473,7 +473,7 @@ static void __init ps2esdi_get_device_cfg(void)
473} 473}
474 474
475/* strategy routine that handles most of the IO requests */ 475/* strategy routine that handles most of the IO requests */
476static void do_ps2esdi_request(request_queue_t * q) 476static void do_ps2esdi_request(struct request_queue * q)
477{ 477{
478 struct request *req; 478 struct request *req;
479 /* since, this routine is called with interrupts cleared - they 479 /* since, this routine is called with interrupts cleared - they
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 170fb33dba9..aa8b890c80d 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -190,7 +190,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
190} 190}
191 191
192static void ps3disk_do_request(struct ps3_storage_device *dev, 192static void ps3disk_do_request(struct ps3_storage_device *dev,
193 request_queue_t *q) 193 struct request_queue *q)
194{ 194{
195 struct request *req; 195 struct request *req;
196 196
@@ -211,7 +211,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
211 } 211 }
212} 212}
213 213
214static void ps3disk_request(request_queue_t *q) 214static void ps3disk_request(struct request_queue *q)
215{ 215{
216 struct ps3_storage_device *dev = q->queuedata; 216 struct ps3_storage_device *dev = q->queuedata;
217 struct ps3disk_private *priv = dev->sbd.core.driver_data; 217 struct ps3disk_private *priv = dev->sbd.core.driver_data;
@@ -404,7 +404,7 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
404 return 0; 404 return 0;
405} 405}
406 406
407static void ps3disk_prepare_flush(request_queue_t *q, struct request *req) 407static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
408{ 408{
409 struct ps3_storage_device *dev = q->queuedata; 409 struct ps3_storage_device *dev = q->queuedata;
410 410
@@ -414,7 +414,7 @@ static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
414 req->cmd_type = REQ_TYPE_FLUSH; 414 req->cmd_type = REQ_TYPE_FLUSH;
415} 415}
416 416
417static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk, 417static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
418 sector_t *sector) 418 sector_t *sector)
419{ 419{
420 struct ps3_storage_device *dev = q->queuedata; 420 struct ps3_storage_device *dev = q->queuedata;
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index a1512da3241..65150b548f3 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -264,7 +264,7 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
264 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support 264 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support
265 * 265 *
266 */ 266 */
267static int rd_make_request(request_queue_t *q, struct bio *bio) 267static int rd_make_request(struct request_queue *q, struct bio *bio)
268{ 268{
269 struct block_device *bdev = bio->bi_bdev; 269 struct block_device *bdev = bio->bi_bdev;
270 struct address_space * mapping = bdev->bd_inode->i_mapping; 270 struct address_space * mapping = bdev->bd_inode->i_mapping;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index d50b8238115..4dff49256ac 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -444,7 +444,7 @@ out:
444 return err; 444 return err;
445} 445}
446 446
447static void do_vdc_request(request_queue_t *q) 447static void do_vdc_request(struct request_queue *q)
448{ 448{
449 while (1) { 449 while (1) {
450 struct request *req = elv_next_request(q); 450 struct request *req = elv_next_request(q);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1a65979f1f0..b4e462f154e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -225,7 +225,7 @@ static unsigned short write_postamble[] = {
225static void swim3_select(struct floppy_state *fs, int sel); 225static void swim3_select(struct floppy_state *fs, int sel);
226static void swim3_action(struct floppy_state *fs, int action); 226static void swim3_action(struct floppy_state *fs, int action);
227static int swim3_readbit(struct floppy_state *fs, int bit); 227static int swim3_readbit(struct floppy_state *fs, int bit);
228static void do_fd_request(request_queue_t * q); 228static void do_fd_request(struct request_queue * q);
229static void start_request(struct floppy_state *fs); 229static void start_request(struct floppy_state *fs);
230static void set_timeout(struct floppy_state *fs, int nticks, 230static void set_timeout(struct floppy_state *fs, int nticks,
231 void (*proc)(unsigned long)); 231 void (*proc)(unsigned long));
@@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
290 return (stat & DATA) == 0; 290 return (stat & DATA) == 0;
291} 291}
292 292
293static void do_fd_request(request_queue_t * q) 293static void do_fd_request(struct request_queue * q)
294{ 294{
295 int i; 295 int i;
296 for(i=0;i<floppy_count;i++) 296 for(i=0;i<floppy_count;i++)
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 949ae93499e..402209fec59 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -278,7 +278,7 @@ struct carm_host {
278 unsigned int state; 278 unsigned int state;
279 u32 fw_ver; 279 u32 fw_ver;
280 280
281 request_queue_t *oob_q; 281 struct request_queue *oob_q;
282 unsigned int n_oob; 282 unsigned int n_oob;
283 283
284 unsigned int hw_sg_used; 284 unsigned int hw_sg_used;
@@ -287,7 +287,7 @@ struct carm_host {
287 287
288 unsigned int wait_q_prod; 288 unsigned int wait_q_prod;
289 unsigned int wait_q_cons; 289 unsigned int wait_q_cons;
290 request_queue_t *wait_q[CARM_MAX_WAIT_Q]; 290 struct request_queue *wait_q[CARM_MAX_WAIT_Q];
291 291
292 unsigned int n_msgs; 292 unsigned int n_msgs;
293 u64 msg_alloc; 293 u64 msg_alloc;
@@ -756,7 +756,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
756 assert(rc == 0); 756 assert(rc == 0);
757} 757}
758 758
759static inline void carm_push_q (struct carm_host *host, request_queue_t *q) 759static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
760{ 760{
761 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; 761 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
762 762
@@ -768,7 +768,7 @@ static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
768 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ 768 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
769} 769}
770 770
771static inline request_queue_t *carm_pop_q(struct carm_host *host) 771static inline struct request_queue *carm_pop_q(struct carm_host *host)
772{ 772{
773 unsigned int idx; 773 unsigned int idx;
774 774
@@ -783,7 +783,7 @@ static inline request_queue_t *carm_pop_q(struct carm_host *host)
783 783
784static inline void carm_round_robin(struct carm_host *host) 784static inline void carm_round_robin(struct carm_host *host)
785{ 785{
786 request_queue_t *q = carm_pop_q(host); 786 struct request_queue *q = carm_pop_q(host);
787 if (q) { 787 if (q) {
788 blk_start_queue(q); 788 blk_start_queue(q);
789 VPRINTK("STARTED QUEUE %p\n", q); 789 VPRINTK("STARTED QUEUE %p\n", q);
@@ -802,7 +802,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
802 } 802 }
803} 803}
804 804
805static void carm_oob_rq_fn(request_queue_t *q) 805static void carm_oob_rq_fn(struct request_queue *q)
806{ 806{
807 struct carm_host *host = q->queuedata; 807 struct carm_host *host = q->queuedata;
808 struct carm_request *crq; 808 struct carm_request *crq;
@@ -833,7 +833,7 @@ static void carm_oob_rq_fn(request_queue_t *q)
833 } 833 }
834} 834}
835 835
836static void carm_rq_fn(request_queue_t *q) 836static void carm_rq_fn(struct request_queue *q)
837{ 837{
838 struct carm_port *port = q->queuedata; 838 struct carm_port *port = q->queuedata;
839 struct carm_host *host = port->host; 839 struct carm_host *host = port->host;
@@ -1494,7 +1494,7 @@ static int carm_init_disks(struct carm_host *host)
1494 1494
1495 for (i = 0; i < CARM_MAX_PORTS; i++) { 1495 for (i = 0; i < CARM_MAX_PORTS; i++) {
1496 struct gendisk *disk; 1496 struct gendisk *disk;
1497 request_queue_t *q; 1497 struct request_queue *q;
1498 struct carm_port *port; 1498 struct carm_port *port;
1499 1499
1500 port = &host->port[i]; 1500 port = &host->port[i];
@@ -1538,7 +1538,7 @@ static void carm_free_disks(struct carm_host *host)
1538 for (i = 0; i < CARM_MAX_PORTS; i++) { 1538 for (i = 0; i < CARM_MAX_PORTS; i++) {
1539 struct gendisk *disk = host->port[i].disk; 1539 struct gendisk *disk = host->port[i].disk;
1540 if (disk) { 1540 if (disk) {
1541 request_queue_t *q = disk->queue; 1541 struct request_queue *q = disk->queue;
1542 1542
1543 if (disk->flags & GENHD_FL_UP) 1543 if (disk->flags & GENHD_FL_UP)
1544 del_gendisk(disk); 1544 del_gendisk(disk);
@@ -1571,7 +1571,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1571 struct carm_host *host; 1571 struct carm_host *host;
1572 unsigned int pci_dac; 1572 unsigned int pci_dac;
1573 int rc; 1573 int rc;
1574 request_queue_t *q; 1574 struct request_queue *q;
1575 unsigned int i; 1575 unsigned int i;
1576 1576
1577 if (!printed_version++) 1577 if (!printed_version++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 8b13d7d2cb6..c57dd2b3a0c 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -503,7 +503,7 @@ static void ub_cleanup(struct ub_dev *sc)
503{ 503{
504 struct list_head *p; 504 struct list_head *p;
505 struct ub_lun *lun; 505 struct ub_lun *lun;
506 request_queue_t *q; 506 struct request_queue *q;
507 507
508 while (!list_empty(&sc->luns)) { 508 while (!list_empty(&sc->luns)) {
509 p = sc->luns.next; 509 p = sc->luns.next;
@@ -619,7 +619,7 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
619 * The request function is our main entry point 619 * The request function is our main entry point
620 */ 620 */
621 621
622static void ub_request_fn(request_queue_t *q) 622static void ub_request_fn(struct request_queue *q)
623{ 623{
624 struct ub_lun *lun = q->queuedata; 624 struct ub_lun *lun = q->queuedata;
625 struct request *rq; 625 struct request *rq;
@@ -2273,7 +2273,7 @@ err_core:
2273static int ub_probe_lun(struct ub_dev *sc, int lnum) 2273static int ub_probe_lun(struct ub_dev *sc, int lnum)
2274{ 2274{
2275 struct ub_lun *lun; 2275 struct ub_lun *lun;
2276 request_queue_t *q; 2276 struct request_queue *q;
2277 struct gendisk *disk; 2277 struct gendisk *disk;
2278 int rc; 2278 int rc;
2279 2279
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index dec74bd2349..6b7c02d6360 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -114,7 +114,7 @@ struct cardinfo {
114 */ 114 */
115 struct bio *bio, *currentbio, **biotail; 115 struct bio *bio, *currentbio, **biotail;
116 116
117 request_queue_t *queue; 117 struct request_queue *queue;
118 118
119 struct mm_page { 119 struct mm_page {
120 dma_addr_t page_dma; 120 dma_addr_t page_dma;
@@ -357,7 +357,7 @@ static inline void reset_page(struct mm_page *page)
357 page->biotail = & page->bio; 357 page->biotail = & page->bio;
358} 358}
359 359
360static void mm_unplug_device(request_queue_t *q) 360static void mm_unplug_device(struct request_queue *q)
361{ 361{
362 struct cardinfo *card = q->queuedata; 362 struct cardinfo *card = q->queuedata;
363 unsigned long flags; 363 unsigned long flags;
@@ -541,7 +541,7 @@ static void process_page(unsigned long data)
541-- mm_make_request 541-- mm_make_request
542----------------------------------------------------------------------------------- 542-----------------------------------------------------------------------------------
543*/ 543*/
544static int mm_make_request(request_queue_t *q, struct bio *bio) 544static int mm_make_request(struct request_queue *q, struct bio *bio)
545{ 545{
546 struct cardinfo *card = q->queuedata; 546 struct cardinfo *card = q->queuedata;
547 pr_debug("mm_make_request %llu %u\n", 547 pr_debug("mm_make_request %llu %u\n",
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index dae39911a11..85916e2665d 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -400,7 +400,7 @@ error_ret:
400/* 400/*
401 * This is the external request processing routine 401 * This is the external request processing routine
402 */ 402 */
403static void do_viodasd_request(request_queue_t *q) 403static void do_viodasd_request(struct request_queue *q)
404{ 404{
405 struct request *req; 405 struct request *req;
406 406
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0d97b7eb818..624d30f7da3 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -298,7 +298,7 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address)
298} 298}
299 299
300/* do_xd_request: handle an incoming request */ 300/* do_xd_request: handle an incoming request */
301static void do_xd_request (request_queue_t * q) 301static void do_xd_request (struct request_queue * q)
302{ 302{
303 struct request *req; 303 struct request *req;
304 304
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
index 82e090fea95..cffd44a2038 100644
--- a/drivers/block/xd.h
+++ b/drivers/block/xd.h
@@ -104,7 +104,7 @@ static int xd_manual_geo_init (char *command);
104static u_char xd_detect (u_char *controller, unsigned int *address); 104static u_char xd_detect (u_char *controller, unsigned int *address);
105static u_char xd_initdrives (void (*init_drive)(u_char drive)); 105static u_char xd_initdrives (void (*init_drive)(u_char drive));
106 106
107static void do_xd_request (request_queue_t * q); 107static void do_xd_request (struct request_queue * q);
108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg); 108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); 109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
110static void xd_recalibrate (u_char drive); 110static void xd_recalibrate (u_char drive);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6746c29181f..964e51634f2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -241,7 +241,7 @@ static inline void flush_requests(struct blkfront_info *info)
241 * do_blkif_request 241 * do_blkif_request
242 * read a block; request is in a request queue 242 * read a block; request is in a request queue
243 */ 243 */
244static void do_blkif_request(request_queue_t *rq) 244static void do_blkif_request(struct request_queue *rq)
245{ 245{
246 struct blkfront_info *info = NULL; 246 struct blkfront_info *info = NULL;
247 struct request *req; 247 struct request *req;
@@ -287,7 +287,7 @@ wait:
287 287
288static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 288static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
289{ 289{
290 request_queue_t *rq; 290 struct request_queue *rq;
291 291
292 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 292 rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
293 if (rq == NULL) 293 if (rq == NULL)
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 732ec63b6e9..cb27e8863d7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -458,7 +458,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
458} 458}
459 459
460/* Get the next read/write request; ending requests that we don't handle */ 460/* Get the next read/write request; ending requests that we don't handle */
461struct request *ace_get_next_request(request_queue_t * q) 461struct request *ace_get_next_request(struct request_queue * q)
462{ 462{
463 struct request *req; 463 struct request *req;
464 464
@@ -825,7 +825,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
825/* --------------------------------------------------------------------- 825/* ---------------------------------------------------------------------
826 * Block ops 826 * Block ops
827 */ 827 */
828static void ace_request(request_queue_t * q) 828static void ace_request(struct request_queue * q)
829{ 829{
830 struct request *req; 830 struct request *req;
831 struct ace_device *ace; 831 struct ace_device *ace;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index e40fa98842e..2d5853cbd4b 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(z2ram_lock);
67static struct block_device_operations z2_fops; 67static struct block_device_operations z2_fops;
68static struct gendisk *z2ram_gendisk; 68static struct gendisk *z2ram_gendisk;
69 69
70static void do_z2_request(request_queue_t *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73 while ((req = elv_next_request(q)) != NULL) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 499019bf8f4..67ee3d4b287 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2094,7 +2094,7 @@ out:
2094static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, 2094static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2095 int lba, int nframes) 2095 int lba, int nframes)
2096{ 2096{
2097 request_queue_t *q = cdi->disk->queue; 2097 struct request_queue *q = cdi->disk->queue;
2098 struct request *rq; 2098 struct request *rq;
2099 struct bio *bio; 2099 struct bio *bio;
2100 unsigned int len; 2100 unsigned int len;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 44cd7b2ddf0..e51550db157 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -398,7 +398,7 @@ static void viocd_end_request(struct request *req, int uptodate)
398 398
399static int rwreq; 399static int rwreq;
400 400
401static void do_viocd_request(request_queue_t *q) 401static void do_viocd_request(struct request_queue *q)
402{ 402{
403 struct request *req; 403 struct request *req;
404 404
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c8dfd18bea4..acdbcdc3e45 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -726,7 +726,7 @@ config NVRAM
726 726
727config RTC 727config RTC
728 tristate "Enhanced Real Time Clock Support" 728 tristate "Enhanced Real Time Clock Support"
729 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC64 && (!SPARC32 || PCI) && !FRV && !ARM && !SUPERH && !S390 729 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV && !ARM && !SUPERH && !S390
730 ---help--- 730 ---help---
731 If you say Y here and create a character special file /dev/rtc with 731 If you say Y here and create a character special file /dev/rtc with
732 major number 10 and minor number 135 using mknod ("man mknod"), you 732 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -750,6 +750,28 @@ config RTC
750 To compile this driver as a module, choose M here: the 750 To compile this driver as a module, choose M here: the
751 module will be called rtc. 751 module will be called rtc.
752 752
753config JS_RTC
754 tristate "Enhanced Real Time Clock Support"
755 depends on SPARC32 && PCI
756 ---help---
757 If you say Y here and create a character special file /dev/rtc with
758 major number 10 and minor number 135 using mknod ("man mknod"), you
759 will get access to the real time clock (or hardware clock) built
760 into your computer.
761
762 Every PC has such a clock built in. It can be used to generate
763 signals from as low as 1Hz up to 8192Hz, and can also be used
764 as a 24 hour alarm. It reports status information via the file
765 /proc/driver/rtc and its behaviour is set by various ioctls on
766 /dev/rtc.
767
768 If you think you have a use for such a device (such as periodic data
769 sampling), then say Y here, and read <file:Documentation/rtc.txt>
770 for details.
771
772 To compile this driver as a module, choose M here: the
773 module will be called js-rtc.
774
753config SGI_DS1286 775config SGI_DS1286
754 tristate "SGI DS1286 RTC support" 776 tristate "SGI DS1286 RTC support"
755 depends on SGI_IP22 777 depends on SGI_IP22
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 8fecaf4010b..23b26b87cc3 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -109,6 +109,9 @@ obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o 110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111 111
112obj-$(CONFIG_JS_RTC) += js-rtc.o
113js-rtc-y = rtc.o
114
112# Files generated that shall be removed upon make clean 115# Files generated that shall be removed upon make clean
113clean-files := consolemap_deftbl.c defkeymap.c 116clean-files := consolemap_deftbl.c defkeymap.c
114 117
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1486eb212cc..ca843522f91 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3071,7 +3071,7 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
3071/* 3071/*
3072 * standard prep_rq_fn that builds 10 byte cmds 3072 * standard prep_rq_fn that builds 10 byte cmds
3073 */ 3073 */
3074static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq) 3074static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
3075{ 3075{
3076 int hard_sect = queue_hardsect_size(q); 3076 int hard_sect = queue_hardsect_size(q);
3077 long block = (long)rq->hard_sector / (hard_sect >> 9); 3077 long block = (long)rq->hard_sector / (hard_sect >> 9);
@@ -3137,7 +3137,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
3137 return BLKPREP_OK; 3137 return BLKPREP_OK;
3138} 3138}
3139 3139
3140static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) 3140static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
3141{ 3141{
3142 if (blk_fs_request(rq)) 3142 if (blk_fs_request(rq))
3143 return ide_cdrom_prep_fs(q, rq); 3143 return ide_cdrom_prep_fs(q, rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index b1304a7f3e0..5ce4216f72a 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -679,7 +679,7 @@ static ide_proc_entry_t idedisk_proc[] = {
679}; 679};
680#endif /* CONFIG_IDE_PROC_FS */ 680#endif /* CONFIG_IDE_PROC_FS */
681 681
682static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) 682static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
683{ 683{
684 ide_drive_t *drive = q->queuedata; 684 ide_drive_t *drive = q->queuedata;
685 685
@@ -697,7 +697,7 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
697 rq->buffer = rq->cmd; 697 rq->buffer = rq->cmd;
698} 698}
699 699
700static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 700static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
701 sector_t *error_sector) 701 sector_t *error_sector)
702{ 702{
703 ide_drive_t *drive = q->queuedata; 703 ide_drive_t *drive = q->queuedata;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 484c50e7144..aa9f5f0b1e6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1327,7 +1327,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1327/* 1327/*
1328 * Passes the stuff to ide_do_request 1328 * Passes the stuff to ide_do_request
1329 */ 1329 */
1330void do_ide_request(request_queue_t *q) 1330void do_ide_request(struct request_queue *q)
1331{ 1331{
1332 ide_drive_t *drive = q->queuedata; 1332 ide_drive_t *drive = q->queuedata;
1333 1333
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5a4c5ea12f8..3a2a9a338fd 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -945,7 +945,7 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
945 */ 945 */
946static int ide_init_queue(ide_drive_t *drive) 946static int ide_init_queue(ide_drive_t *drive)
947{ 947{
948 request_queue_t *q; 948 struct request_queue *q;
949 ide_hwif_t *hwif = HWIF(drive); 949 ide_hwif_t *hwif = HWIF(drive);
950 int max_sectors = 256; 950 int max_sectors = 256;
951 int max_sg_entries = PRD_ENTRIES; 951 int max_sg_entries = PRD_ENTRIES;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 8f2db8dd35f..8e05d88e81b 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -652,7 +652,7 @@ repeat:
652 } 652 }
653} 653}
654 654
655static void do_hd_request (request_queue_t * q) 655static void do_hd_request (struct request_queue * q)
656{ 656{
657 disable_irq(HD_IRQ); 657 disable_irq(HD_IRQ);
658 hd_request(); 658 hd_request();
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b0016..2bcde5798b5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
526 526
527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) 527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
528{ 528{
529 request_queue_t *q = bdev_get_queue(bdev); 529 struct request_queue *q = bdev_get_queue(bdev);
530 struct io_restrictions *rs = &ti->limits; 530 struct io_restrictions *rs = &ti->limits;
531 531
532 /* 532 /*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
979 devices = dm_table_get_devices(t); 979 devices = dm_table_get_devices(t);
980 for (d = devices->next; d != devices; d = d->next) { 980 for (d = devices->next; d != devices; d = d->next) {
981 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 981 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
982 request_queue_t *q = bdev_get_queue(dd->bdev); 982 struct request_queue *q = bdev_get_queue(dd->bdev);
983 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 983 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
984 } 984 }
985 985
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
992 992
993 for (d = devices->next; d != devices; d = d->next) { 993 for (d = devices->next; d != devices; d = d->next) {
994 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 994 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
995 request_queue_t *q = bdev_get_queue(dd->bdev); 995 struct request_queue *q = bdev_get_queue(dd->bdev);
996 996
997 if (q->unplug_fn) 997 if (q->unplug_fn)
998 q->unplug_fn(q); 998 q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
1011 1011
1012 for (d = devices->next; d != devices; d = d->next) { 1012 for (d = devices->next; d != devices; d = d->next) {
1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
1014 request_queue_t *q = bdev_get_queue(dd->bdev); 1014 struct request_queue *q = bdev_get_queue(dd->bdev);
1015 int err; 1015 int err;
1016 1016
1017 if (!q->issue_flush_fn) 1017 if (!q->issue_flush_fn)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e676c..141ff9fa296 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
80 80
81 unsigned long flags; 81 unsigned long flags;
82 82
83 request_queue_t *queue; 83 struct request_queue *queue;
84 struct gendisk *disk; 84 struct gendisk *disk;
85 char name[16]; 85 char name[16];
86 86
@@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
792 * The request function that just remaps the bio built up by 792 * The request function that just remaps the bio built up by
793 * dm_merge_bvec. 793 * dm_merge_bvec.
794 */ 794 */
795static int dm_request(request_queue_t *q, struct bio *bio) 795static int dm_request(struct request_queue *q, struct bio *bio)
796{ 796{
797 int r; 797 int r;
798 int rw = bio_data_dir(bio); 798 int rw = bio_data_dir(bio);
@@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
844 return 0; 844 return 0;
845} 845}
846 846
847static int dm_flush_all(request_queue_t *q, struct gendisk *disk, 847static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
848 sector_t *error_sector) 848 sector_t *error_sector)
849{ 849{
850 struct mapped_device *md = q->queuedata; 850 struct mapped_device *md = q->queuedata;
@@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
859 return ret; 859 return ret;
860} 860}
861 861
862static void dm_unplug_all(request_queue_t *q) 862static void dm_unplug_all(struct request_queue *q)
863{ 863{
864 struct mapped_device *md = q->queuedata; 864 struct mapped_device *md = q->queuedata;
865 struct dm_table *map = dm_get_table(md); 865 struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
1110 1110
1111static int __bind(struct mapped_device *md, struct dm_table *t) 1111static int __bind(struct mapped_device *md, struct dm_table *t)
1112{ 1112{
1113 request_queue_t *q = md->queue; 1113 struct request_queue *q = md->queue;
1114 sector_t size; 1114 sector_t size;
1115 1115
1116 size = dm_table_get_size(t); 1116 size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2a75e..cb059cf14c2 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
167 conf->nfaults = n+1; 167 conf->nfaults = n+1;
168} 168}
169 169
170static int make_request(request_queue_t *q, struct bio *bio) 170static int make_request(struct request_queue *q, struct bio *bio)
171{ 171{
172 mddev_t *mddev = q->queuedata; 172 mddev_t *mddev = q->queuedata;
173 conf_t *conf = (conf_t*)mddev->private; 173 conf_t *conf = (conf_t*)mddev->private;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 19274108319..17f795c3e0a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
59{ 59{
60 mddev_t *mddev = q->queuedata; 60 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 61 dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
79 return maxsectors << 9; 79 return maxsectors << 9;
80} 80}
81 81
82static void linear_unplug(request_queue_t *q) 82static void linear_unplug(struct request_queue *q)
83{ 83{
84 mddev_t *mddev = q->queuedata; 84 mddev_t *mddev = q->queuedata;
85 linear_conf_t *conf = mddev_to_conf(mddev); 85 linear_conf_t *conf = mddev_to_conf(mddev);
86 int i; 86 int i;
87 87
88 for (i=0; i < mddev->raid_disks; i++) { 88 for (i=0; i < mddev->raid_disks; i++) {
89 request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 89 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
90 if (r_queue->unplug_fn) 90 if (r_queue->unplug_fn)
91 r_queue->unplug_fn(r_queue); 91 r_queue->unplug_fn(r_queue);
92 } 92 }
93} 93}
94 94
95static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, 95static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
96 sector_t *error_sector) 96 sector_t *error_sector)
97{ 97{
98 mddev_t *mddev = q->queuedata; 98 mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
101 101
102 for (i=0; i < mddev->raid_disks && ret == 0; i++) { 102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {
103 struct block_device *bdev = conf->disks[i].rdev->bdev; 103 struct block_device *bdev = conf->disks[i].rdev->bdev;
104 request_queue_t *r_queue = bdev_get_queue(bdev); 104 struct request_queue *r_queue = bdev_get_queue(bdev);
105 105
106 if (!r_queue->issue_flush_fn) 106 if (!r_queue->issue_flush_fn)
107 ret = -EOPNOTSUPP; 107 ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
118 int i, ret = 0; 118 int i, ret = 0;
119 119
120 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 120 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
121 request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev); 121 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
122 ret |= bdi_congested(&q->backing_dev_info, bits); 122 ret |= bdi_congested(&q->backing_dev_info, bits);
123 } 123 }
124 return ret; 124 return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
330 return 0; 330 return 0;
331} 331}
332 332
333static int linear_make_request (request_queue_t *q, struct bio *bio) 333static int linear_make_request (struct request_queue *q, struct bio *bio)
334{ 334{
335 const int rw = bio_data_dir(bio); 335 const int rw = bio_data_dir(bio);
336 mddev_t *mddev = q->queuedata; 336 mddev_t *mddev = q->queuedata;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc887dfd..f883b7e37f3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
211 ) 211 )
212 212
213 213
214static int md_fail_request (request_queue_t *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio, bio->bi_size);
217 return 0; 217 return 0;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37fee37..1e2af43a73b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
126 if (rdev && !test_bit(Faulty, &rdev->flags) 126 if (rdev && !test_bit(Faulty, &rdev->flags)
127 && atomic_read(&rdev->nr_pending)) { 127 && atomic_read(&rdev->nr_pending)) {
128 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 128 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
129 129
130 atomic_inc(&rdev->nr_pending); 130 atomic_inc(&rdev->nr_pending);
131 rcu_read_unlock(); 131 rcu_read_unlock();
@@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev)
140 rcu_read_unlock(); 140 rcu_read_unlock();
141} 141}
142 142
143static void multipath_unplug(request_queue_t *q) 143static void multipath_unplug(struct request_queue *q)
144{ 144{
145 unplug_slaves(q->queuedata); 145 unplug_slaves(q->queuedata);
146} 146}
147 147
148 148
149static int multipath_make_request (request_queue_t *q, struct bio * bio) 149static int multipath_make_request (struct request_queue *q, struct bio * bio)
150{ 150{
151 mddev_t *mddev = q->queuedata; 151 mddev_t *mddev = q->queuedata;
152 multipath_conf_t *conf = mddev_to_conf(mddev); 152 multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
199 seq_printf (seq, "]"); 199 seq_printf (seq, "]");
200} 200}
201 201
202static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, 202static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
203 sector_t *error_sector) 203 sector_t *error_sector)
204{ 204{
205 mddev_t *mddev = q->queuedata; 205 mddev_t *mddev = q->queuedata;
@@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
212 if (rdev && !test_bit(Faulty, &rdev->flags)) { 212 if (rdev && !test_bit(Faulty, &rdev->flags)) {
213 struct block_device *bdev = rdev->bdev; 213 struct block_device *bdev = rdev->bdev;
214 request_queue_t *r_queue = bdev_get_queue(bdev); 214 struct request_queue *r_queue = bdev_get_queue(bdev);
215 215
216 if (!r_queue->issue_flush_fn) 216 if (!r_queue->issue_flush_fn)
217 ret = -EOPNOTSUPP; 217 ret = -EOPNOTSUPP;
@@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits)
238 for (i = 0; i < mddev->raid_disks ; i++) { 238 for (i = 0; i < mddev->raid_disks ; i++) {
239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
240 if (rdev && !test_bit(Faulty, &rdev->flags)) { 240 if (rdev && !test_bit(Faulty, &rdev->flags)) {
241 request_queue_t *q = bdev_get_queue(rdev->bdev); 241 struct request_queue *q = bdev_get_queue(rdev->bdev);
242 242
243 ret |= bdi_congested(&q->backing_dev_info, bits); 243 ret |= bdi_congested(&q->backing_dev_info, bits);
244 /* Just like multipath_map, we just check the 244 /* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f73a37..b8216bc6db4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
25#define MD_DRIVER 25#define MD_DRIVER
26#define MD_PERSONALITY 26#define MD_PERSONALITY
27 27
28static void raid0_unplug(request_queue_t *q) 28static void raid0_unplug(struct request_queue *q)
29{ 29{
30 mddev_t *mddev = q->queuedata; 30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev); 31 raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
33 int i; 33 int i;
34 34
35 for (i=0; i<mddev->raid_disks; i++) { 35 for (i=0; i<mddev->raid_disks; i++) {
36 request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); 36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
37 37
38 if (r_queue->unplug_fn) 38 if (r_queue->unplug_fn)
39 r_queue->unplug_fn(r_queue); 39 r_queue->unplug_fn(r_queue);
40 } 40 }
41} 41}
42 42
43static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, 43static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
44 sector_t *error_sector) 44 sector_t *error_sector)
45{ 45{
46 mddev_t *mddev = q->queuedata; 46 mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
50 50
51 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 51 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
52 struct block_device *bdev = devlist[i]->bdev; 52 struct block_device *bdev = devlist[i]->bdev;
53 request_queue_t *r_queue = bdev_get_queue(bdev); 53 struct request_queue *r_queue = bdev_get_queue(bdev);
54 54
55 if (!r_queue->issue_flush_fn) 55 if (!r_queue->issue_flush_fn)
56 ret = -EOPNOTSUPP; 56 ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
68 int i, ret = 0; 68 int i, ret = 0;
69 69
70 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 70 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
71 request_queue_t *q = bdev_get_queue(devlist[i]->bdev); 71 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
72 72
73 ret |= bdi_congested(&q->backing_dev_info, bits); 73 ret |= bdi_congested(&q->backing_dev_info, bits);
74 } 74 }
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
268 * 268 *
269 * Return amount of bytes we can accept at this offset 269 * Return amount of bytes we can accept at this offset
270 */ 270 */
271static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 271static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
272{ 272{
273 mddev_t *mddev = q->queuedata; 273 mddev_t *mddev = q->queuedata;
274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
408 return 0; 408 return 0;
409} 409}
410 410
411static int raid0_make_request (request_queue_t *q, struct bio *bio) 411static int raid0_make_request (struct request_queue *q, struct bio *bio)
412{ 412{
413 mddev_t *mddev = q->queuedata; 413 mddev_t *mddev = q->queuedata;
414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; 414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b77b13..650991bddd8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev)
552 for (i=0; i<mddev->raid_disks; i++) { 552 for (i=0; i<mddev->raid_disks; i++) {
553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
555 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 555 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
556 556
557 atomic_inc(&rdev->nr_pending); 557 atomic_inc(&rdev->nr_pending);
558 rcu_read_unlock(); 558 rcu_read_unlock();
@@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev)
567 rcu_read_unlock(); 567 rcu_read_unlock();
568} 568}
569 569
570static void raid1_unplug(request_queue_t *q) 570static void raid1_unplug(struct request_queue *q)
571{ 571{
572 mddev_t *mddev = q->queuedata; 572 mddev_t *mddev = q->queuedata;
573 573
@@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q)
575 md_wakeup_thread(mddev->thread); 575 md_wakeup_thread(mddev->thread);
576} 576}
577 577
578static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, 578static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
579 sector_t *error_sector) 579 sector_t *error_sector)
580{ 580{
581 mddev_t *mddev = q->queuedata; 581 mddev_t *mddev = q->queuedata;
@@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
588 if (rdev && !test_bit(Faulty, &rdev->flags)) { 588 if (rdev && !test_bit(Faulty, &rdev->flags)) {
589 struct block_device *bdev = rdev->bdev; 589 struct block_device *bdev = rdev->bdev;
590 request_queue_t *r_queue = bdev_get_queue(bdev); 590 struct request_queue *r_queue = bdev_get_queue(bdev);
591 591
592 if (!r_queue->issue_flush_fn) 592 if (!r_queue->issue_flush_fn)
593 ret = -EOPNOTSUPP; 593 ret = -EOPNOTSUPP;
@@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits)
615 for (i = 0; i < mddev->raid_disks; i++) { 615 for (i = 0; i < mddev->raid_disks; i++) {
616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
617 if (rdev && !test_bit(Faulty, &rdev->flags)) { 617 if (rdev && !test_bit(Faulty, &rdev->flags)) {
618 request_queue_t *q = bdev_get_queue(rdev->bdev); 618 struct request_queue *q = bdev_get_queue(rdev->bdev);
619 619
620 /* Note the '|| 1' - when read_balance prefers 620 /* Note the '|| 1' - when read_balance prefers
621 * non-congested targets, it can be removed 621 * non-congested targets, it can be removed
@@ -765,7 +765,7 @@ do_sync_io:
765 return NULL; 765 return NULL;
766} 766}
767 767
768static int make_request(request_queue_t *q, struct bio * bio) 768static int make_request(struct request_queue *q, struct bio * bio)
769{ 769{
770 mddev_t *mddev = q->queuedata; 770 mddev_t *mddev = q->queuedata;
771 conf_t *conf = mddev_to_conf(mddev); 771 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1cfac..f730a144baf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
453 * If near_copies == raid_disk, there are no striping issues, 453 * If near_copies == raid_disk, there are no striping issues,
454 * but in that case, the function isn't called at all. 454 * but in that case, the function isn't called at all.
455 */ 455 */
456static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio, 456static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
457 struct bio_vec *bio_vec) 457 struct bio_vec *bio_vec)
458{ 458{
459 mddev_t *mddev = q->queuedata; 459 mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
595 for (i=0; i<mddev->raid_disks; i++) { 595 for (i=0; i<mddev->raid_disks; i++) {
596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
598 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 598 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
599 599
600 atomic_inc(&rdev->nr_pending); 600 atomic_inc(&rdev->nr_pending);
601 rcu_read_unlock(); 601 rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
610 rcu_read_unlock(); 610 rcu_read_unlock();
611} 611}
612 612
613static void raid10_unplug(request_queue_t *q) 613static void raid10_unplug(struct request_queue *q)
614{ 614{
615 mddev_t *mddev = q->queuedata; 615 mddev_t *mddev = q->queuedata;
616 616
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
618 md_wakeup_thread(mddev->thread); 618 md_wakeup_thread(mddev->thread);
619} 619}
620 620
621static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 621static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
622 sector_t *error_sector) 622 sector_t *error_sector)
623{ 623{
624 mddev_t *mddev = q->queuedata; 624 mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
631 if (rdev && !test_bit(Faulty, &rdev->flags)) { 631 if (rdev && !test_bit(Faulty, &rdev->flags)) {
632 struct block_device *bdev = rdev->bdev; 632 struct block_device *bdev = rdev->bdev;
633 request_queue_t *r_queue = bdev_get_queue(bdev); 633 struct request_queue *r_queue = bdev_get_queue(bdev);
634 634
635 if (!r_queue->issue_flush_fn) 635 if (!r_queue->issue_flush_fn)
636 ret = -EOPNOTSUPP; 636 ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) { 658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
660 if (rdev && !test_bit(Faulty, &rdev->flags)) { 660 if (rdev && !test_bit(Faulty, &rdev->flags)) {
661 request_queue_t *q = bdev_get_queue(rdev->bdev); 661 struct request_queue *q = bdev_get_queue(rdev->bdev);
662 662
663 ret |= bdi_congested(&q->backing_dev_info, bits); 663 ret |= bdi_congested(&q->backing_dev_info, bits);
664 } 664 }
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
772 spin_unlock_irq(&conf->resync_lock); 772 spin_unlock_irq(&conf->resync_lock);
773} 773}
774 774
775static int make_request(request_queue_t *q, struct bio * bio) 775static int make_request(struct request_queue *q, struct bio * bio)
776{ 776{
777 mddev_t *mddev = q->queuedata; 777 mddev_t *mddev = q->queuedata;
778 conf_t *conf = mddev_to_conf(mddev); 778 conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d90ee145eff..2aff4be35dc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
289} 289}
290 290
291static void unplug_slaves(mddev_t *mddev); 291static void unplug_slaves(mddev_t *mddev);
292static void raid5_unplug_device(request_queue_t *q); 292static void raid5_unplug_device(struct request_queue *q);
293 293
294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
295 int pd_idx, int noblock) 295 int pd_idx, int noblock)
@@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev)
3182 for (i=0; i<mddev->raid_disks; i++) { 3182 for (i=0; i<mddev->raid_disks; i++) {
3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3185 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 3185 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
3186 3186
3187 atomic_inc(&rdev->nr_pending); 3187 atomic_inc(&rdev->nr_pending);
3188 rcu_read_unlock(); 3188 rcu_read_unlock();
@@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev)
3197 rcu_read_unlock(); 3197 rcu_read_unlock();
3198} 3198}
3199 3199
3200static void raid5_unplug_device(request_queue_t *q) 3200static void raid5_unplug_device(struct request_queue *q)
3201{ 3201{
3202 mddev_t *mddev = q->queuedata; 3202 mddev_t *mddev = q->queuedata;
3203 raid5_conf_t *conf = mddev_to_conf(mddev); 3203 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q)
3216 unplug_slaves(mddev); 3216 unplug_slaves(mddev);
3217} 3217}
3218 3218
3219static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 3219static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
3220 sector_t *error_sector) 3220 sector_t *error_sector)
3221{ 3221{
3222 mddev_t *mddev = q->queuedata; 3222 mddev_t *mddev = q->queuedata;
@@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3229 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3229 if (rdev && !test_bit(Faulty, &rdev->flags)) {
3230 struct block_device *bdev = rdev->bdev; 3230 struct block_device *bdev = rdev->bdev;
3231 request_queue_t *r_queue = bdev_get_queue(bdev); 3231 struct request_queue *r_queue = bdev_get_queue(bdev);
3232 3232
3233 if (!r_queue->issue_flush_fn) 3233 if (!r_queue->issue_flush_fn)
3234 ret = -EOPNOTSUPP; 3234 ret = -EOPNOTSUPP;
@@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits)
3267/* We want read requests to align with chunks where possible, 3267/* We want read requests to align with chunks where possible,
3268 * but write requests don't need to. 3268 * but write requests don't need to.
3269 */ 3269 */
3270static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 3270static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
3271{ 3271{
3272 mddev_t *mddev = q->queuedata; 3272 mddev_t *mddev = q->queuedata;
3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3377 3377
3378static int bio_fits_rdev(struct bio *bi) 3378static int bio_fits_rdev(struct bio *bi)
3379{ 3379{
3380 request_queue_t *q = bdev_get_queue(bi->bi_bdev); 3380 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3381 3381
3382 if ((bi->bi_size>>9) > q->max_sectors) 3382 if ((bi->bi_size>>9) > q->max_sectors)
3383 return 0; 3383 return 0;
@@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi)
3396} 3396}
3397 3397
3398 3398
3399static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 3399static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3400{ 3400{
3401 mddev_t *mddev = q->queuedata; 3401 mddev_t *mddev = q->queuedata;
3402 raid5_conf_t *conf = mddev_to_conf(mddev); 3402 raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
3466} 3466}
3467 3467
3468 3468
3469static int make_request(request_queue_t *q, struct bio * bi) 3469static int make_request(struct request_queue *q, struct bio * bi)
3470{ 3470{
3471 mddev_t *mddev = q->queuedata; 3471 mddev_t *mddev = q->queuedata;
3472 raid5_conf_t *conf = mddev_to_conf(mddev); 3472 raid5_conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 988c8ce47f5..5e1c99f83ab 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -159,7 +159,7 @@ static int i2o_block_device_flush(struct i2o_device *dev)
159 * Returns 0 on success or negative error code on failure. 159 * Returns 0 on success or negative error code on failure.
160 */ 160 */
161 161
162static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, 162static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
163 sector_t * error_sector) 163 sector_t * error_sector)
164{ 164{
165 struct i2o_block_device *i2o_blk_dev = queue->queuedata; 165 struct i2o_block_device *i2o_blk_dev = queue->queuedata;
@@ -445,7 +445,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
445{ 445{
446 struct i2o_block_request *ireq = req->special; 446 struct i2o_block_request *ireq = req->special;
447 struct i2o_block_device *dev = ireq->i2o_blk_dev; 447 struct i2o_block_device *dev = ireq->i2o_blk_dev;
448 request_queue_t *q = req->q; 448 struct request_queue *q = req->q;
449 unsigned long flags; 449 unsigned long flags;
450 450
451 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 451 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index f7530605997..6b89854bd3f 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -1067,19 +1067,16 @@ static void asus_backlight_exit(void)
1067} 1067}
1068 1068
1069#define ASUS_LED_UNREGISTER(object) \ 1069#define ASUS_LED_UNREGISTER(object) \
1070 if(object##_led.class_dev \ 1070 led_classdev_unregister(&object##_led)
1071 && !IS_ERR(object##_led.class_dev)) \
1072 led_classdev_unregister(&object##_led)
1073 1071
1074static void asus_led_exit(void) 1072static void asus_led_exit(void)
1075{ 1073{
1074 destroy_workqueue(led_workqueue);
1076 ASUS_LED_UNREGISTER(mled); 1075 ASUS_LED_UNREGISTER(mled);
1077 ASUS_LED_UNREGISTER(tled); 1076 ASUS_LED_UNREGISTER(tled);
1078 ASUS_LED_UNREGISTER(pled); 1077 ASUS_LED_UNREGISTER(pled);
1079 ASUS_LED_UNREGISTER(rled); 1078 ASUS_LED_UNREGISTER(rled);
1080 ASUS_LED_UNREGISTER(gled); 1079 ASUS_LED_UNREGISTER(gled);
1081
1082 destroy_workqueue(led_workqueue);
1083} 1080}
1084 1081
1085static void __exit asus_laptop_exit(void) 1082static void __exit asus_laptop_exit(void)
@@ -1135,29 +1132,42 @@ static int asus_led_init(struct device *dev)
1135 1132
1136 rv = ASUS_LED_REGISTER(mled, dev); 1133 rv = ASUS_LED_REGISTER(mled, dev);
1137 if (rv) 1134 if (rv)
1138 return rv; 1135 goto out;
1139 1136
1140 rv = ASUS_LED_REGISTER(tled, dev); 1137 rv = ASUS_LED_REGISTER(tled, dev);
1141 if (rv) 1138 if (rv)
1142 return rv; 1139 goto out1;
1143 1140
1144 rv = ASUS_LED_REGISTER(rled, dev); 1141 rv = ASUS_LED_REGISTER(rled, dev);
1145 if (rv) 1142 if (rv)
1146 return rv; 1143 goto out2;
1147 1144
1148 rv = ASUS_LED_REGISTER(pled, dev); 1145 rv = ASUS_LED_REGISTER(pled, dev);
1149 if (rv) 1146 if (rv)
1150 return rv; 1147 goto out3;
1151 1148
1152 rv = ASUS_LED_REGISTER(gled, dev); 1149 rv = ASUS_LED_REGISTER(gled, dev);
1153 if (rv) 1150 if (rv)
1154 return rv; 1151 goto out4;
1155 1152
1156 led_workqueue = create_singlethread_workqueue("led_workqueue"); 1153 led_workqueue = create_singlethread_workqueue("led_workqueue");
1157 if (!led_workqueue) 1154 if (!led_workqueue)
1158 return -ENOMEM; 1155 goto out5;
1159 1156
1160 return 0; 1157 return 0;
1158out5:
1159 rv = -ENOMEM;
1160 ASUS_LED_UNREGISTER(gled);
1161out4:
1162 ASUS_LED_UNREGISTER(pled);
1163out3:
1164 ASUS_LED_UNREGISTER(rled);
1165out2:
1166 ASUS_LED_UNREGISTER(tled);
1167out1:
1168 ASUS_LED_UNREGISTER(mled);
1169out:
1170 return rv;
1161} 1171}
1162 1172
1163static int __init asus_laptop_init(void) 1173static int __init asus_laptop_init(void)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b53dac8d1b6..e02eac87636 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -83,7 +83,7 @@ static int mmc_queue_thread(void *d)
83 * on any queue on this host, and attempt to issue it. This may 83 * on any queue on this host, and attempt to issue it. This may
84 * not be the queue we were asked to process. 84 * not be the queue we were asked to process.
85 */ 85 */
86static void mmc_request(request_queue_t *q) 86static void mmc_request(struct request_queue *q)
87{ 87{
88 struct mmc_queue *mq = q->queuedata; 88 struct mmc_queue *mq = q->queuedata;
89 struct request *req; 89 struct request *req;
@@ -211,7 +211,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
211 211
212void mmc_cleanup_queue(struct mmc_queue *mq) 212void mmc_cleanup_queue(struct mmc_queue *mq)
213{ 213{
214 request_queue_t *q = mq->queue; 214 struct request_queue *q = mq->queue;
215 unsigned long flags; 215 unsigned long flags;
216 216
217 /* Mark that we should start throwing out stragglers */ 217 /* Mark that we should start throwing out stragglers */
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(mmc_cleanup_queue);
252 */ 252 */
253void mmc_queue_suspend(struct mmc_queue *mq) 253void mmc_queue_suspend(struct mmc_queue *mq)
254{ 254{
255 request_queue_t *q = mq->queue; 255 struct request_queue *q = mq->queue;
256 unsigned long flags; 256 unsigned long flags;
257 257
258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { 258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
@@ -272,7 +272,7 @@ void mmc_queue_suspend(struct mmc_queue *mq)
272 */ 272 */
273void mmc_queue_resume(struct mmc_queue *mq) 273void mmc_queue_resume(struct mmc_queue *mq)
274{ 274{
275 request_queue_t *q = mq->queue; 275 struct request_queue *q = mq->queue;
276 unsigned long flags; 276 unsigned long flags;
277 277
278 if (mq->flags & MMC_QUEUE_SUSPENDED) { 278 if (mq->flags & MMC_QUEUE_SUSPENDED) {
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 3ff1155459a..d915837193c 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -57,6 +57,7 @@
57#include <asm/io.h> 57#include <asm/io.h>
58#include <asm/dma.h> 58#include <asm/dma.h>
59#include <asm/pgtable.h> 59#include <asm/pgtable.h>
60#include <asm/cacheflush.h>
60 61
61static char version[] __initdata = 62static char version[] __initdata =
62 "82596.c $Revision: 1.5 $\n"; 63 "82596.c $Revision: 1.5 $\n";
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 336af0635df..94b78cc5fe8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,7 +18,7 @@ gianfar_driver-objs := gianfar.o \
18 gianfar_sysfs.o 18 gianfar_sysfs.o
19 19
20obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o 20obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
21ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o 21ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o ucc_geth_ethtool.o
22 22
23# 23#
24# link order important here 24# link order important here
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b78a4e5ceeb..62e660a7938 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -3128,12 +3128,6 @@ static int __devinit read_eeprom_byte(struct net_device *dev,
3128 int result = 0; 3128 int result = 0;
3129 short i; 3129 short i;
3130 3130
3131 if (!dev) {
3132 printk(KERN_ERR "No device!\n");
3133 result = -ENODEV;
3134 goto out;
3135 }
3136
3137 /* 3131 /*
3138 * Don't take interrupts on this CPU will bit banging 3132 * Don't take interrupts on this CPU will bit banging
3139 * the %#%#@$ I2C device 3133 * the %#%#@$ I2C device
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h
index 100c09c66e6..939aa0f53f6 100644
--- a/drivers/net/atl1/atl1_hw.h
+++ b/drivers/net/atl1/atl1_hw.h
@@ -680,11 +680,6 @@ void atl1_check_options(struct atl1_adapter *adapter);
680#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ 680#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
681#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ 681#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
682 682
683/* The size (in bytes) of a ethernet packet */
684#define ENET_HEADER_SIZE 14
685#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
686#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
687#define ETHERNET_FCS_SIZE 4
688#define MAX_JUMBO_FRAME_SIZE 0x2800 683#define MAX_JUMBO_FRAME_SIZE 0x2800
689 684
690#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 685#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -929,8 +924,8 @@ enum atl1_dma_req_block {
929 atl1_dma_req_128 = 0, 924 atl1_dma_req_128 = 0,
930 atl1_dma_req_256 = 1, 925 atl1_dma_req_256 = 1,
931 atl1_dma_req_512 = 2, 926 atl1_dma_req_512 = 2,
932 atl1_dam_req_1024 = 3, 927 atl1_dma_req_1024 = 3,
933 atl1_dam_req_2048 = 4, 928 atl1_dma_req_2048 = 4,
934 atl1_dma_req_4096 = 5 929 atl1_dma_req_4096 = 5
935}; 930};
936 931
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index fd1e156f174..56f6389a300 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -59,6 +59,7 @@
59#include <linux/skbuff.h> 59#include <linux/skbuff.h>
60#include <linux/etherdevice.h> 60#include <linux/etherdevice.h>
61#include <linux/if_vlan.h> 61#include <linux/if_vlan.h>
62#include <linux/if_ether.h>
62#include <linux/irqreturn.h> 63#include <linux/irqreturn.h>
63#include <linux/workqueue.h> 64#include <linux/workqueue.h>
64#include <linux/timer.h> 65#include <linux/timer.h>
@@ -120,8 +121,8 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
120 struct atl1_hw *hw = &adapter->hw; 121 struct atl1_hw *hw = &adapter->hw;
121 struct net_device *netdev = adapter->netdev; 122 struct net_device *netdev = adapter->netdev;
122 123
123 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 124 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
124 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 125 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
125 126
126 adapter->wol = 0; 127 adapter->wol = 0;
127 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 128 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
@@ -314,7 +315,7 @@ err_nomem:
314 return -ENOMEM; 315 return -ENOMEM;
315} 316}
316 317
317void atl1_init_ring_ptrs(struct atl1_adapter *adapter) 318static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
318{ 319{
319 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 320 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
320 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 321 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -688,9 +689,9 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
688{ 689{
689 struct atl1_adapter *adapter = netdev_priv(netdev); 690 struct atl1_adapter *adapter = netdev_priv(netdev);
690 int old_mtu = netdev->mtu; 691 int old_mtu = netdev->mtu;
691 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 692 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
692 693
693 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 694 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
694 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 695 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
695 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 696 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
696 return -EINVAL; 697 return -EINVAL;
@@ -908,8 +909,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
908 /* config DMA Engine */ 909 /* config DMA Engine */
909 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 910 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
910 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 911 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
911 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 912 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
912 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | 913 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
913 DMA_CTRL_DMAW_EN; 914 DMA_CTRL_DMAW_EN;
914 value |= (u32) hw->dma_ord; 915 value |= (u32) hw->dma_ord;
915 if (atl1_rcb_128 == hw->rcb_value) 916 if (atl1_rcb_128 == hw->rcb_value)
@@ -917,7 +918,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
917 iowrite32(value, hw->hw_addr + REG_DMA_CTRL); 918 iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
918 919
919 /* config CMB / SMB */ 920 /* config CMB / SMB */
920 value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16); 921 value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
922 hw->cmb_tpd : adapter->tpd_ring.count;
923 value <<= 16;
924 value |= hw->cmb_rrd;
921 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); 925 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
922 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); 926 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
923 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); 927 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
@@ -1334,7 +1338,7 @@ rrd_ok:
1334 skb = buffer_info->skb; 1338 skb = buffer_info->skb;
1335 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); 1339 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
1336 1340
1337 skb_put(skb, length - ETHERNET_FCS_SIZE); 1341 skb_put(skb, length - ETH_FCS_LEN);
1338 1342
1339 /* Receive Checksum Offload */ 1343 /* Receive Checksum Offload */
1340 atl1_rx_checksum(adapter, rrd, skb); 1344 atl1_rx_checksum(adapter, rrd, skb);
@@ -1422,7 +1426,7 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
1422 netif_wake_queue(adapter->netdev); 1426 netif_wake_queue(adapter->netdev);
1423} 1427}
1424 1428
1425static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) 1429static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1426{ 1430{
1427 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1431 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1428 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 1432 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
@@ -1453,7 +1457,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1453 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1457 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1454 iph->daddr, 0, IPPROTO_TCP, 0); 1458 iph->daddr, 0, IPPROTO_TCP, 0);
1455 ipofst = skb_network_offset(skb); 1459 ipofst = skb_network_offset(skb);
1456 if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ 1460 if (ipofst != ETH_HLEN) /* 802.3 frame */
1457 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; 1461 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1458 1462
1459 tso->tsopl |= (iph->ihl & 1463 tso->tsopl |= (iph->ihl &
@@ -1708,7 +1712,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1708 return NETDEV_TX_LOCKED; 1712 return NETDEV_TX_LOCKED;
1709 } 1713 }
1710 1714
1711 if (tpd_avail(&adapter->tpd_ring) < count) { 1715 if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
1712 /* not enough descriptors */ 1716 /* not enough descriptors */
1713 netif_stop_queue(netdev); 1717 netif_stop_queue(netdev);
1714 spin_unlock_irqrestore(&adapter->lock, flags); 1718 spin_unlock_irqrestore(&adapter->lock, flags);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 7df23dc2819..9c8e3f9f5e5 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -200,6 +200,7 @@
200 200
201/* Include files */ 201/* Include files */
202#include <linux/bitops.h> 202#include <linux/bitops.h>
203#include <linux/compiler.h>
203#include <linux/delay.h> 204#include <linux/delay.h>
204#include <linux/dma-mapping.h> 205#include <linux/dma-mapping.h>
205#include <linux/eisa.h> 206#include <linux/eisa.h>
@@ -240,8 +241,6 @@ static char version[] __devinitdata =
240 */ 241 */
241#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) 242#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
242 243
243#define __unused __attribute__ ((unused))
244
245#ifdef CONFIG_PCI 244#ifdef CONFIG_PCI
246#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type) 245#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
247#else 246#else
@@ -375,7 +374,7 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
375 374
376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) 375static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
377{ 376{
378 struct device __unused *bdev = bp->bus_dev; 377 struct device __maybe_unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev); 378 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 379 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
381 380
@@ -399,7 +398,7 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
399 398
400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) 399static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
401{ 400{
402 struct device __unused *bdev = bp->bus_dev; 401 struct device __maybe_unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev); 402 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 403 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
405 404
@@ -866,7 +865,7 @@ static void __devinit dfx_bus_uninit(struct net_device *dev)
866 865
867static void __devinit dfx_bus_config_check(DFX_board_t *bp) 866static void __devinit dfx_bus_config_check(DFX_board_t *bp)
868{ 867{
869 struct device __unused *bdev = bp->bus_dev; 868 struct device __maybe_unused *bdev = bp->bus_dev;
870 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 869 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
871 int status; /* return code from adapter port control call */ 870 int status; /* return code from adapter port control call */
872 u32 host_data; /* LW data returned from port control call */ 871 u32 host_data; /* LW data returned from port control call */
@@ -3624,8 +3623,8 @@ static void __devexit dfx_unregister(struct device *bdev)
3624} 3623}
3625 3624
3626 3625
3627static int __devinit __unused dfx_dev_register(struct device *); 3626static int __devinit __maybe_unused dfx_dev_register(struct device *);
3628static int __devexit __unused dfx_dev_unregister(struct device *); 3627static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
3629 3628
3630#ifdef CONFIG_PCI 3629#ifdef CONFIG_PCI
3631static int __devinit dfx_pci_register(struct pci_dev *, 3630static int __devinit dfx_pci_register(struct pci_dev *,
@@ -3699,7 +3698,7 @@ static struct tc_driver dfx_tc_driver = {
3699}; 3698};
3700#endif /* CONFIG_TC */ 3699#endif /* CONFIG_TC */
3701 3700
3702static int __devinit __unused dfx_dev_register(struct device *dev) 3701static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
3703{ 3702{
3704 int status; 3703 int status;
3705 3704
@@ -3709,7 +3708,7 @@ static int __devinit __unused dfx_dev_register(struct device *dev)
3709 return status; 3708 return status;
3710} 3709}
3711 3710
3712static int __devexit __unused dfx_dev_unregister(struct device *dev) 3711static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
3713{ 3712{
3714 put_device(dev); 3713 put_device(dev);
3715 dfx_unregister(dev); 3714 dfx_unregister(dev);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 489c8b260dd..8ee2c2c86b4 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0071" 42#define DRV_VERSION "EHEA_0072"
43 43
44/* eHEA capability flags */ 44/* eHEA capability flags */
45#define DLPAR_PORT_ADD_REM 1 45#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 4c70a9301c1..58702f54c3f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -589,6 +589,23 @@ static int ehea_poll(struct net_device *dev, int *budget)
589 return 1; 589 return 1;
590} 590}
591 591
592#ifdef CONFIG_NET_POLL_CONTROLLER
593static void ehea_netpoll(struct net_device *dev)
594{
595 struct ehea_port *port = netdev_priv(dev);
596
597 netif_rx_schedule(port->port_res[0].d_netdev);
598}
599#endif
600
601static int ehea_poll_firstqueue(struct net_device *dev, int *budget)
602{
603 struct ehea_port *port = netdev_priv(dev);
604 struct net_device *d_dev = port->port_res[0].d_netdev;
605
606 return ehea_poll(d_dev, budget);
607}
608
592static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 609static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
593{ 610{
594 struct ehea_port_res *pr = param; 611 struct ehea_port_res *pr = param;
@@ -2626,7 +2643,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2626 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 2643 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2627 2644
2628 dev->open = ehea_open; 2645 dev->open = ehea_open;
2629 dev->poll = ehea_poll; 2646 dev->poll = ehea_poll_firstqueue;
2647#ifdef CONFIG_NET_POLL_CONTROLLER
2648 dev->poll_controller = ehea_netpoll;
2649#endif
2630 dev->weight = 64; 2650 dev->weight = 64;
2631 dev->stop = ehea_stop; 2651 dev->stop = ehea_stop;
2632 dev->hard_start_xmit = ehea_start_xmit; 2652 dev->hard_start_xmit = ehea_start_xmit;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6d1d50a1978..661c747389e 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5546,6 +5546,22 @@ static struct pci_device_id pci_tbl[] = {
5546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5548 }, 5548 },
5549 { /* MCP73 Ethernet Controller */
5550 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5551 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5552 },
5553 { /* MCP73 Ethernet Controller */
5554 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5555 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5556 },
5557 { /* MCP73 Ethernet Controller */
5558 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5559 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5560 },
5561 { /* MCP73 Ethernet Controller */
5562 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5563 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5564 },
5549 {0,}, 5565 {0,},
5550}; 5566};
5551 5567
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 325269d8ae3..d4c92cc879d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1179,8 +1179,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
1179 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) 1179 NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
1180 printk(KERN_ERR "failed to read dma watchdog status\n"); 1180 printk(KERN_ERR "failed to read dma watchdog status\n");
1181 1181
1182 return ((netxen_get_dma_watchdog_enabled(ctrl) == 0) && 1182 return (netxen_get_dma_watchdog_enabled(ctrl) == 0);
1183 (netxen_get_dma_watchdog_disabled(ctrl) == 0));
1184} 1183}
1185 1184
1186static inline int 1185static inline int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b703ccfe040..19e2fa940ac 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
46MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
47MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 47MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
48 48
49char netxen_nic_driver_name[] = "netxen-nic"; 49char netxen_nic_driver_name[] = "netxen_nic";
50static char netxen_nic_driver_string[] = "NetXen Network Driver version " 50static char netxen_nic_driver_string[] = "NetXen Network Driver version "
51 NETXEN_NIC_LINUX_VERSIONID; 51 NETXEN_NIC_LINUX_VERSIONID;
52 52
@@ -640,6 +640,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
640 NETXEN_CRB_NORMALIZE(adapter, 640 NETXEN_CRB_NORMALIZE(adapter,
641 NETXEN_ROMUSB_GLB_PEGTUNE_DONE)); 641 NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
642 /* Handshake with the card before we register the devices. */ 642 /* Handshake with the card before we register the devices. */
643 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
644 netxen_pinit_from_rom(adapter, 0);
645 msleep(1);
646 netxen_load_firmware(adapter);
643 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 647 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
644 } 648 }
645 649
@@ -782,19 +786,18 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
782 786
783 if (adapter->portnum == 0) { 787 if (adapter->portnum == 0) {
784 if (init_firmware_done) { 788 if (init_firmware_done) {
785 dma_watchdog_shutdown_request(adapter);
786 msleep(100);
787 i = 100; 789 i = 100;
788 while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) { 790 do {
789 printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n"); 791 if (dma_watchdog_shutdown_request(adapter) == 1)
792 break;
790 msleep(100); 793 msleep(100);
791 i--; 794 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
792 } 795 break;
796 } while (--i);
793 797
794 if (i == 0) { 798 if (i == 0)
795 printk(KERN_ERR "dma_watchdog_shutdown_request failed\n"); 799 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
796 return; 800 netdev->name);
797 }
798 801
799 /* clear the register for future unloads/loads */ 802 /* clear the register for future unloads/loads */
800 writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); 803 writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc)));
@@ -803,11 +806,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
803 806
804 /* leave the hw in the same state as reboot */ 807 /* leave the hw in the same state as reboot */
805 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); 808 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
806 if (netxen_pinit_from_rom(adapter, 0)) 809 netxen_pinit_from_rom(adapter, 0);
807 return;
808 msleep(1); 810 msleep(1);
809 if (netxen_load_firmware(adapter)) 811 netxen_load_firmware(adapter);
810 return;
811 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 812 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
812 } 813 }
813 814
@@ -816,22 +817,21 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
816 printk(KERN_INFO "State: 0x%0x\n", 817 printk(KERN_INFO "State: 0x%0x\n",
817 readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); 818 readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)));
818 819
819 dma_watchdog_shutdown_request(adapter);
820 msleep(100);
821 i = 100; 820 i = 100;
822 while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) { 821 do {
823 printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n"); 822 if (dma_watchdog_shutdown_request(adapter) == 1)
823 break;
824 msleep(100); 824 msleep(100);
825 i--; 825 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
826 } 826 break;
827 } while (--i);
827 828
828 if (i) { 829 if (i) {
829 netxen_free_adapter_offload(adapter); 830 netxen_free_adapter_offload(adapter);
830 } else { 831 } else {
831 printk(KERN_ERR "failed to dma shutdown\n"); 832 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
832 return; 833 netdev->name);
833 } 834 }
834
835 } 835 }
836 836
837 iounmap(adapter->ahw.db_base); 837 iounmap(adapter->ahw.db_base);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 6a538564791..8874497b6bb 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -109,7 +109,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
109 */ 109 */
110 err = phy_read(phydev, MII_VSC8244_ISTAT); 110 err = phy_read(phydev, MII_VSC8244_ISTAT);
111 111
112 if (err) 112 if (err < 0)
113 return err; 113 return err;
114 114
115 err = phy_write(phydev, MII_VSC8244_IMASK, 0); 115 err = phy_write(phydev, MII_VSC8244_IMASK, 0);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 08d25066f05..13d1c0a2a25 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -290,7 +290,8 @@ static void gelic_net_release_rx_chain(struct gelic_net_card *card)
290 descr->buf_addr = 0; 290 descr->buf_addr = 0;
291 dev_kfree_skb_any(descr->skb); 291 dev_kfree_skb_any(descr->skb);
292 descr->skb = NULL; 292 descr->skb = NULL;
293 descr->dmac_cmd_status = GELIC_NET_DESCR_NOT_IN_USE; 293 gelic_net_set_descr_status(descr,
294 GELIC_NET_DESCR_NOT_IN_USE);
294 } 295 }
295 descr = descr->next; 296 descr = descr->next;
296 } while (descr != card->rx_chain.head); 297 } while (descr != card->rx_chain.head);
@@ -374,7 +375,7 @@ static void gelic_net_release_tx_descr(struct gelic_net_card *card,
374 descr->skb = NULL; 375 descr->skb = NULL;
375 376
376 /* set descr status */ 377 /* set descr status */
377 descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE; 378 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE);
378} 379}
379 380
380/** 381/**
@@ -403,26 +404,29 @@ static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop)
403 "%s: forcing end of tx descriptor " \ 404 "%s: forcing end of tx descriptor " \
404 "with status %x\n", 405 "with status %x\n",
405 __func__, status); 406 __func__, status);
406 card->netdev_stats.tx_dropped++; 407 card->netdev->stats.tx_dropped++;
407 break; 408 break;
408 409
409 case GELIC_NET_DESCR_COMPLETE: 410 case GELIC_NET_DESCR_COMPLETE:
410 card->netdev_stats.tx_packets++; 411 if (tx_chain->tail->skb) {
411 card->netdev_stats.tx_bytes += 412 card->netdev->stats.tx_packets++;
412 tx_chain->tail->skb->len; 413 card->netdev->stats.tx_bytes +=
414 tx_chain->tail->skb->len;
415 }
413 break; 416 break;
414 417
415 case GELIC_NET_DESCR_CARDOWNED: 418 case GELIC_NET_DESCR_CARDOWNED:
416 /* pending tx request */ 419 /* pending tx request */
417 default: 420 default:
418 /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */ 421 /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */
419 goto out; 422 if (!stop)
423 goto out;
420 } 424 }
421 gelic_net_release_tx_descr(card, tx_chain->tail); 425 gelic_net_release_tx_descr(card, tx_chain->tail);
422 release = 1; 426 release ++;
423 } 427 }
424out: 428out:
425 if (!stop && release) 429 if (!stop && (2 < release))
426 netif_wake_queue(card->netdev); 430 netif_wake_queue(card->netdev);
427} 431}
428 432
@@ -659,19 +663,21 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
659{ 663{
660 dma_addr_t buf[2]; 664 dma_addr_t buf[2];
661 unsigned int vlan_len; 665 unsigned int vlan_len;
666 struct gelic_net_descr *sec_descr = descr->next;
662 667
663 if (skb->len < GELIC_NET_VLAN_POS) 668 if (skb->len < GELIC_NET_VLAN_POS)
664 return -EINVAL; 669 return -EINVAL;
665 670
666 memcpy(&descr->vlan, skb->data, GELIC_NET_VLAN_POS); 671 vlan_len = GELIC_NET_VLAN_POS;
672 memcpy(&descr->vlan, skb->data, vlan_len);
667 if (card->vlan_index != -1) { 673 if (card->vlan_index != -1) {
674 /* internal vlan tag used */
668 descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/ 675 descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/
669 descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]); 676 descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]);
670 vlan_len = GELIC_NET_VLAN_POS + VLAN_HLEN; /* VLAN_HLEN=4 */ 677 vlan_len += VLAN_HLEN; /* added for above two lines */
671 } else 678 }
672 vlan_len = GELIC_NET_VLAN_POS; /* no vlan tag */
673 679
674 /* first descr */ 680 /* map data area */
675 buf[0] = dma_map_single(ctodev(card), &descr->vlan, 681 buf[0] = dma_map_single(ctodev(card), &descr->vlan,
676 vlan_len, DMA_TO_DEVICE); 682 vlan_len, DMA_TO_DEVICE);
677 683
@@ -682,20 +688,6 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
682 return -ENOMEM; 688 return -ENOMEM;
683 } 689 }
684 690
685 descr->buf_addr = buf[0];
686 descr->buf_size = vlan_len;
687 descr->skb = skb; /* not used */
688 descr->data_status = 0;
689 gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */
690
691 /* second descr */
692 card->tx_chain.head = card->tx_chain.head->next;
693 descr->next_descr_addr = descr->next->bus_addr;
694 descr = descr->next;
695 if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE)
696 /* XXX will be removed */
697 dev_err(ctodev(card), "descr is not free!\n");
698
699 buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS, 691 buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS,
700 skb->len - GELIC_NET_VLAN_POS, 692 skb->len - GELIC_NET_VLAN_POS,
701 DMA_TO_DEVICE); 693 DMA_TO_DEVICE);
@@ -710,13 +702,24 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
710 return -ENOMEM; 702 return -ENOMEM;
711 } 703 }
712 704
713 descr->buf_addr = buf[1]; 705 /* first descr */
714 descr->buf_size = skb->len - GELIC_NET_VLAN_POS; 706 descr->buf_addr = buf[0];
715 descr->skb = skb; 707 descr->buf_size = vlan_len;
708 descr->skb = NULL; /* not used */
716 descr->data_status = 0; 709 descr->data_status = 0;
717 descr->next_descr_addr = 0; /* terminate hw descr */ 710 descr->next_descr_addr = descr->next->bus_addr;
718 gelic_net_set_txdescr_cmdstat(descr, skb, 0); 711 gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */
719 712
713 /* second descr */
714 sec_descr->buf_addr = buf[1];
715 sec_descr->buf_size = skb->len - GELIC_NET_VLAN_POS;
716 sec_descr->skb = skb;
717 sec_descr->data_status = 0;
718 sec_descr->next_descr_addr = 0; /* terminate hw descr */
719 gelic_net_set_txdescr_cmdstat(sec_descr, skb, 0);
720
721 /* bump free descriptor pointer */
722 card->tx_chain.head = sec_descr->next;
720 return 0; 723 return 0;
721} 724}
722 725
@@ -729,7 +732,7 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
729static int gelic_net_kick_txdma(struct gelic_net_card *card, 732static int gelic_net_kick_txdma(struct gelic_net_card *card,
730 struct gelic_net_descr *descr) 733 struct gelic_net_descr *descr)
731{ 734{
732 int status = -ENXIO; 735 int status = 0;
733 int count = 10; 736 int count = 10;
734 737
735 if (card->tx_dma_progress) 738 if (card->tx_dma_progress)
@@ -763,47 +766,62 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card,
763static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 766static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
764{ 767{
765 struct gelic_net_card *card = netdev_priv(netdev); 768 struct gelic_net_card *card = netdev_priv(netdev);
766 struct gelic_net_descr *descr = NULL; 769 struct gelic_net_descr *descr;
767 int result; 770 int result;
768 unsigned long flags; 771 unsigned long flags;
769 772
770 spin_lock_irqsave(&card->tx_dma_lock, flags); 773 spin_lock_irqsave(&card->tx_dma_lock, flags);
771 774
772 gelic_net_release_tx_chain(card, 0); 775 gelic_net_release_tx_chain(card, 0);
773 if (!skb) 776
774 goto kick;
775 descr = gelic_net_get_next_tx_descr(card); 777 descr = gelic_net_get_next_tx_descr(card);
776 if (!descr) { 778 if (!descr) {
779 /*
780 * no more descriptors free
781 */
777 netif_stop_queue(netdev); 782 netif_stop_queue(netdev);
778 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 783 spin_unlock_irqrestore(&card->tx_dma_lock, flags);
779 return NETDEV_TX_BUSY; 784 return NETDEV_TX_BUSY;
780 } 785 }
781 result = gelic_net_prepare_tx_descr_v(card, descr, skb);
782
783 if (result)
784 goto error;
785 786
786 card->tx_chain.head = card->tx_chain.head->next; 787 result = gelic_net_prepare_tx_descr_v(card, descr, skb);
787 788 if (result) {
788 if (descr->prev) 789 /*
789 descr->prev->next_descr_addr = descr->bus_addr; 790 * DMA map failed. As chanses are that failure
790kick: 791 * would continue, just release skb and return
792 */
793 card->netdev->stats.tx_dropped++;
794 dev_kfree_skb_any(skb);
795 spin_unlock_irqrestore(&card->tx_dma_lock, flags);
796 return NETDEV_TX_OK;
797 }
798 /*
799 * link this prepared descriptor to previous one
800 * to achieve high performance
801 */
802 descr->prev->next_descr_addr = descr->bus_addr;
791 /* 803 /*
792 * as hardware descriptor is modified in the above lines, 804 * as hardware descriptor is modified in the above lines,
793 * ensure that the hardware sees it 805 * ensure that the hardware sees it
794 */ 806 */
795 wmb(); 807 wmb();
796 if (gelic_net_kick_txdma(card, card->tx_chain.tail)) 808 if (gelic_net_kick_txdma(card, descr)) {
797 goto error; 809 /*
810 * kick failed.
811 * release descriptors which were just prepared
812 */
813 card->netdev->stats.tx_dropped++;
814 gelic_net_release_tx_descr(card, descr);
815 gelic_net_release_tx_descr(card, descr->next);
816 card->tx_chain.tail = descr->next->next;
817 dev_info(ctodev(card), "%s: kick failure\n", __func__);
818 } else {
819 /* OK, DMA started/reserved */
820 netdev->trans_start = jiffies;
821 }
798 822
799 netdev->trans_start = jiffies;
800 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 823 spin_unlock_irqrestore(&card->tx_dma_lock, flags);
801 return NETDEV_TX_OK; 824 return NETDEV_TX_OK;
802
803error:
804 card->netdev_stats.tx_dropped++;
805 spin_unlock_irqrestore(&card->tx_dma_lock, flags);
806 return NETDEV_TX_LOCKED;
807} 825}
808 826
809/** 827/**
@@ -854,8 +872,8 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
854 skb->ip_summed = CHECKSUM_NONE; 872 skb->ip_summed = CHECKSUM_NONE;
855 873
856 /* update netdevice statistics */ 874 /* update netdevice statistics */
857 card->netdev_stats.rx_packets++; 875 card->netdev->stats.rx_packets++;
858 card->netdev_stats.rx_bytes += skb->len; 876 card->netdev->stats.rx_bytes += skb->len;
859 877
860 /* pass skb up to stack */ 878 /* pass skb up to stack */
861 netif_receive_skb(skb); 879 netif_receive_skb(skb);
@@ -895,38 +913,67 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card)
895 (status == GELIC_NET_DESCR_FORCE_END)) { 913 (status == GELIC_NET_DESCR_FORCE_END)) {
896 dev_info(ctodev(card), "dropping RX descriptor with state %x\n", 914 dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
897 status); 915 status);
898 card->netdev_stats.rx_dropped++; 916 card->netdev->stats.rx_dropped++;
899 goto refill; 917 goto refill;
900 } 918 }
901 919
902 if ((status != GELIC_NET_DESCR_COMPLETE) && 920 if (status == GELIC_NET_DESCR_BUFFER_FULL) {
903 (status != GELIC_NET_DESCR_FRAME_END)) { 921 /*
922 * Buffer full would occur if and only if
923 * the frame length was longer than the size of this
924 * descriptor's buffer. If the frame length was equal
925 * to or shorter than buffer'size, FRAME_END condition
926 * would occur.
927 * Anyway this frame was longer than the MTU,
928 * just drop it.
929 */
930 dev_info(ctodev(card), "overlength frame\n");
931 goto refill;
932 }
933 /*
934 * descriptoers any other than FRAME_END here should
935 * be treated as error.
936 */
937 if (status != GELIC_NET_DESCR_FRAME_END) {
904 dev_dbg(ctodev(card), "RX descriptor with state %x\n", 938 dev_dbg(ctodev(card), "RX descriptor with state %x\n",
905 status); 939 status);
906 goto refill; 940 goto refill;
907 } 941 }
908 942
909 /* ok, we've got a packet in descr */ 943 /* ok, we've got a packet in descr */
910 gelic_net_pass_skb_up(descr, card); /* 1: skb_up sccess */ 944 gelic_net_pass_skb_up(descr, card);
911
912refill: 945refill:
913 descr->next_descr_addr = 0; /* unlink the descr */ 946 /*
947 * So that always DMAC can see the end
948 * of the descriptor chain to avoid
949 * from unwanted DMAC overrun.
950 */
951 descr->next_descr_addr = 0;
914 952
915 /* change the descriptor state: */ 953 /* change the descriptor state: */
916 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 954 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE);
917 955
918 /* refill one desc 956 /*
919 * FIXME: this can fail, but for now, just leave this 957 * this call can fail, but for now, just leave this
920 * descriptor without skb 958 * decriptor without skb
921 */ 959 */
922 gelic_net_prepare_rx_descr(card, descr); 960 gelic_net_prepare_rx_descr(card, descr);
961
923 chain->head = descr; 962 chain->head = descr;
924 chain->tail = descr->next; 963 chain->tail = descr->next;
964
965 /*
966 * Set this descriptor the end of the chain.
967 */
925 descr->prev->next_descr_addr = descr->bus_addr; 968 descr->prev->next_descr_addr = descr->bus_addr;
926 969
970 /*
971 * If dmac chain was met, DMAC stopped.
972 * thus re-enable it
973 */
927 if (dmac_chain_ended) { 974 if (dmac_chain_ended) {
928 gelic_net_enable_rxdmac(card); 975 card->rx_dma_restart_required = 1;
929 dev_dbg(ctodev(card), "reenable rx dma\n"); 976 dev_dbg(ctodev(card), "reenable rx dma scheduled\n");
930 } 977 }
931 978
932 return 1; 979 return 1;
@@ -968,20 +1015,6 @@ static int gelic_net_poll(struct net_device *netdev, int *budget)
968 } else 1015 } else
969 return 1; 1016 return 1;
970} 1017}
971
972/**
973 * gelic_net_get_stats - get interface statistics
974 * @netdev: interface device structure
975 *
976 * returns the interface statistics residing in the gelic_net_card struct
977 */
978static struct net_device_stats *gelic_net_get_stats(struct net_device *netdev)
979{
980 struct gelic_net_card *card = netdev_priv(netdev);
981
982 return &card->netdev_stats;
983}
984
985/** 1018/**
986 * gelic_net_change_mtu - changes the MTU of an interface 1019 * gelic_net_change_mtu - changes the MTU of an interface
987 * @netdev: interface device structure 1020 * @netdev: interface device structure
@@ -1016,6 +1049,11 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1016 if (!status) 1049 if (!status)
1017 return IRQ_NONE; 1050 return IRQ_NONE;
1018 1051
1052 if (card->rx_dma_restart_required) {
1053 card->rx_dma_restart_required = 0;
1054 gelic_net_enable_rxdmac(card);
1055 }
1056
1019 if (status & GELIC_NET_RXINT) { 1057 if (status & GELIC_NET_RXINT) {
1020 gelic_net_rx_irq_off(card); 1058 gelic_net_rx_irq_off(card);
1021 netif_rx_schedule(netdev); 1059 netif_rx_schedule(netdev);
@@ -1024,9 +1062,10 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1024 if (status & GELIC_NET_TXINT) { 1062 if (status & GELIC_NET_TXINT) {
1025 spin_lock_irqsave(&card->tx_dma_lock, flags); 1063 spin_lock_irqsave(&card->tx_dma_lock, flags);
1026 card->tx_dma_progress = 0; 1064 card->tx_dma_progress = 0;
1065 gelic_net_release_tx_chain(card, 0);
1066 /* kick outstanding tx descriptor if any */
1067 gelic_net_kick_txdma(card, card->tx_chain.tail);
1027 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 1068 spin_unlock_irqrestore(&card->tx_dma_lock, flags);
1028 /* start pending DMA */
1029 gelic_net_xmit(NULL, netdev);
1030 } 1069 }
1031 return IRQ_HANDLED; 1070 return IRQ_HANDLED;
1032} 1071}
@@ -1068,7 +1107,7 @@ static int gelic_net_open_device(struct gelic_net_card *card)
1068 } 1107 }
1069 1108
1070 result = request_irq(card->netdev->irq, gelic_net_interrupt, 1109 result = request_irq(card->netdev->irq, gelic_net_interrupt,
1071 IRQF_DISABLED, "gelic network", card->netdev); 1110 IRQF_DISABLED, card->netdev->name, card->netdev);
1072 1111
1073 if (result) { 1112 if (result) {
1074 dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n", 1113 dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n",
@@ -1107,7 +1146,7 @@ static int gelic_net_open(struct net_device *netdev)
1107 card->descr, GELIC_NET_TX_DESCRIPTORS)) 1146 card->descr, GELIC_NET_TX_DESCRIPTORS))
1108 goto alloc_tx_failed; 1147 goto alloc_tx_failed;
1109 if (gelic_net_init_chain(card, &card->rx_chain, 1148 if (gelic_net_init_chain(card, &card->rx_chain,
1110 card->descr + GELIC_NET_RX_DESCRIPTORS, 1149 card->descr + GELIC_NET_TX_DESCRIPTORS,
1111 GELIC_NET_RX_DESCRIPTORS)) 1150 GELIC_NET_RX_DESCRIPTORS))
1112 goto alloc_rx_failed; 1151 goto alloc_rx_failed;
1113 1152
@@ -1129,7 +1168,6 @@ static int gelic_net_open(struct net_device *netdev)
1129 1168
1130 netif_start_queue(netdev); 1169 netif_start_queue(netdev);
1131 netif_carrier_on(netdev); 1170 netif_carrier_on(netdev);
1132 netif_poll_enable(netdev);
1133 1171
1134 return 0; 1172 return 0;
1135 1173
@@ -1141,7 +1179,6 @@ alloc_tx_failed:
1141 return -ENOMEM; 1179 return -ENOMEM;
1142} 1180}
1143 1181
1144#ifdef GELIC_NET_ETHTOOL
1145static void gelic_net_get_drvinfo (struct net_device *netdev, 1182static void gelic_net_get_drvinfo (struct net_device *netdev,
1146 struct ethtool_drvinfo *info) 1183 struct ethtool_drvinfo *info)
1147{ 1184{
@@ -1261,7 +1298,6 @@ static struct ethtool_ops gelic_net_ethtool_ops = {
1261 .get_rx_csum = gelic_net_get_rx_csum, 1298 .get_rx_csum = gelic_net_get_rx_csum,
1262 .set_rx_csum = gelic_net_set_rx_csum, 1299 .set_rx_csum = gelic_net_set_rx_csum,
1263}; 1300};
1264#endif
1265 1301
1266/** 1302/**
1267 * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout 1303 * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout
@@ -1320,7 +1356,6 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
1320 netdev->open = &gelic_net_open; 1356 netdev->open = &gelic_net_open;
1321 netdev->stop = &gelic_net_stop; 1357 netdev->stop = &gelic_net_stop;
1322 netdev->hard_start_xmit = &gelic_net_xmit; 1358 netdev->hard_start_xmit = &gelic_net_xmit;
1323 netdev->get_stats = &gelic_net_get_stats;
1324 netdev->set_multicast_list = &gelic_net_set_multi; 1359 netdev->set_multicast_list = &gelic_net_set_multi;
1325 netdev->change_mtu = &gelic_net_change_mtu; 1360 netdev->change_mtu = &gelic_net_change_mtu;
1326 /* tx watchdog */ 1361 /* tx watchdog */
@@ -1329,9 +1364,7 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
1329 /* NAPI */ 1364 /* NAPI */
1330 netdev->poll = &gelic_net_poll; 1365 netdev->poll = &gelic_net_poll;
1331 netdev->weight = GELIC_NET_NAPI_WEIGHT; 1366 netdev->weight = GELIC_NET_NAPI_WEIGHT;
1332#ifdef GELIC_NET_ETHTOOL
1333 netdev->ethtool_ops = &gelic_net_ethtool_ops; 1367 netdev->ethtool_ops = &gelic_net_ethtool_ops;
1334#endif
1335} 1368}
1336 1369
1337/** 1370/**
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 5e1c28654e1..a9c4c4fc254 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -28,21 +28,12 @@
28#ifndef _GELIC_NET_H 28#ifndef _GELIC_NET_H
29#define _GELIC_NET_H 29#define _GELIC_NET_H
30 30
31#define GELIC_NET_DRV_NAME "Gelic Network Driver"
32#define GELIC_NET_DRV_VERSION "1.0"
33
34#define GELIC_NET_ETHTOOL /* use ethtool */
35
36/* ioctl */
37#define GELIC_NET_GET_MODE (SIOCDEVPRIVATE + 0)
38#define GELIC_NET_SET_MODE (SIOCDEVPRIVATE + 1)
39
40/* descriptors */ 31/* descriptors */
41#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ 32#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
42#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ 33#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
43 34
44#define GELIC_NET_MAX_MTU 2308 35#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
45#define GELIC_NET_MIN_MTU 64 36#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
46#define GELIC_NET_RXBUF_ALIGN 128 37#define GELIC_NET_RXBUF_ALIGN 128
47#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */ 38#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */
48#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
@@ -90,7 +81,8 @@ enum gelic_net_int1_status {
90 */ 81 */
91#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */ 82#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */
92/* bit 20..16 reserved */ 83/* bit 20..16 reserved */
93#define GELIC_NET_RXRECNUM 0x0000ff00 /* reception receipt number */ 84#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
85#define GELIC_NET_RXRRECNUM_SHIFT 8
94/* bit 7..0 reserved */ 86/* bit 7..0 reserved */
95 87
96#define GELIC_NET_TXDESC_TAIL 0 88#define GELIC_NET_TXDESC_TAIL 0
@@ -133,19 +125,19 @@ enum gelic_net_int1_status {
133 * interrupt status */ 125 * interrupt status */
134 126
135#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */ 127#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */
136#define GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE 0xb0000000
137#define GELIC_NET_DESCR_IND_PROC_SHIFT 28 128#define GELIC_NET_DESCR_IND_PROC_SHIFT 28
138#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff 129#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff
139 130
140 131
141enum gelic_net_descr_status { 132enum gelic_net_descr_status {
142 GELIC_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ 133 GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */
134 GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */
143 GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 135 GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
144 GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ 136 GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
145 GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */ 137 GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
146 GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ 138 GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
147 GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ 139 GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
148 GELIC_NET_DESCR_NOT_IN_USE /* any other value */ 140 GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
149}; 141};
150/* for lv1_net_control */ 142/* for lv1_net_control */
151#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001 143#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001
@@ -216,10 +208,10 @@ struct gelic_net_card {
216 208
217 struct gelic_net_descr_chain tx_chain; 209 struct gelic_net_descr_chain tx_chain;
218 struct gelic_net_descr_chain rx_chain; 210 struct gelic_net_descr_chain rx_chain;
211 int rx_dma_restart_required;
219 /* gurad dmac descriptor chain*/ 212 /* gurad dmac descriptor chain*/
220 spinlock_t chain_lock; 213 spinlock_t chain_lock;
221 214
222 struct net_device_stats netdev_stats;
223 int rx_csum; 215 int rx_csum;
224 /* guard tx_dma_progress */ 216 /* guard tx_dma_progress */
225 spinlock_t tx_dma_lock; 217 spinlock_t tx_dma_lock;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e4736a3b1b7..12e01b24105 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -43,10 +43,6 @@
43 43
44#undef DEBUG 44#undef DEBUG
45 45
46#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
47#define DRV_NAME "ucc_geth"
48#define DRV_VERSION "1.1"
49
50#define ugeth_printk(level, format, arg...) \ 46#define ugeth_printk(level, format, arg...) \
51 printk(level format "\n", ## arg) 47 printk(level format "\n", ## arg)
52 48
@@ -64,9 +60,19 @@
64#else 60#else
65#define ugeth_vdbg(fmt, args...) do { } while (0) 61#define ugeth_vdbg(fmt, args...) do { } while (0)
66#endif /* UGETH_VERBOSE_DEBUG */ 62#endif /* UGETH_VERBOSE_DEBUG */
63#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
67 64
65void uec_set_ethtool_ops(struct net_device *netdev);
66
68static DEFINE_SPINLOCK(ugeth_lock); 67static DEFINE_SPINLOCK(ugeth_lock);
69 68
69static struct {
70 u32 msg_enable;
71} debug = { -1 };
72
73module_param_named(debug, debug.msg_enable, int, 0);
74MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
75
70static struct ucc_geth_info ugeth_primary_info = { 76static struct ucc_geth_info ugeth_primary_info = {
71 .uf_info = { 77 .uf_info = {
72 .bd_mem_part = MEM_PART_SYSTEM, 78 .bd_mem_part = MEM_PART_SYSTEM,
@@ -104,6 +110,7 @@ static struct ucc_geth_info ugeth_primary_info = {
104 .maxRetransmission = 0xf, 110 .maxRetransmission = 0xf,
105 .collisionWindow = 0x37, 111 .collisionWindow = 0x37,
106 .receiveFlowControl = 1, 112 .receiveFlowControl = 1,
113 .transmitFlowControl = 1,
107 .maxGroupAddrInHash = 4, 114 .maxGroupAddrInHash = 4,
108 .maxIndAddrInHash = 4, 115 .maxIndAddrInHash = 4,
109 .prel = 7, 116 .prel = 7,
@@ -139,7 +146,9 @@ static struct ucc_geth_info ugeth_primary_info = {
139 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, 146 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
140 .largestexternallookupkeysize = 147 .largestexternallookupkeysize =
141 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, 148 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
142 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, 149 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
151 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
143 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, 152 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
144 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, 153 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
145 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, 154 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
@@ -281,7 +290,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
281 290
282 for (i = 0; i < num_entries; i++) { 291 for (i = 0; i < num_entries; i++) {
283 if ((snum = qe_get_snum()) < 0) { 292 if ((snum = qe_get_snum()) < 0) {
284 ugeth_err("fill_init_enet_entries: Can not get SNUM."); 293 if (netif_msg_ifup(ugeth))
294 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
285 return snum; 295 return snum;
286 } 296 }
287 if ((i == 0) && skip_page_for_first_entry) 297 if ((i == 0) && skip_page_for_first_entry)
@@ -291,8 +301,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
291 init_enet_offset = 301 init_enet_offset =
292 qe_muram_alloc(thread_size, thread_alignment); 302 qe_muram_alloc(thread_size, thread_alignment);
293 if (IS_ERR_VALUE(init_enet_offset)) { 303 if (IS_ERR_VALUE(init_enet_offset)) {
294 ugeth_err 304 if (netif_msg_ifup(ugeth))
295 ("fill_init_enet_entries: Can not allocate DPRAM memory."); 305 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
296 qe_put_snum((u8) snum); 306 qe_put_snum((u8) snum);
297 return -ENOMEM; 307 return -ENOMEM;
298 } 308 }
@@ -1200,7 +1210,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1200 return 0; 1210 return 0;
1201} 1211}
1202 1212
1203static int init_flow_control_params(u32 automatic_flow_control_mode, 1213int init_flow_control_params(u32 automatic_flow_control_mode,
1204 int rx_flow_control_enable, 1214 int rx_flow_control_enable,
1205 int tx_flow_control_enable, 1215 int tx_flow_control_enable,
1206 u16 pause_period, 1216 u16 pause_period,
@@ -1486,9 +1496,9 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1486 1496
1487 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); 1497 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1488 if (ret_val != 0) { 1498 if (ret_val != 0) {
1489 ugeth_err 1499 if (netif_msg_probe(ugeth))
1490 ("%s: Preamble length must be between 3 and 7 inclusive.", 1500 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1491 __FUNCTION__); 1501 __FUNCTION__);
1492 return ret_val; 1502 return ret_val;
1493 } 1503 }
1494 1504
@@ -1726,7 +1736,8 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1726 1736
1727 /* check if the UCC number is in range. */ 1737 /* check if the UCC number is in range. */
1728 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1738 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1729 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1739 if (netif_msg_probe(ugeth))
1740 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1730 return -EINVAL; 1741 return -EINVAL;
1731 } 1742 }
1732 1743
@@ -1754,7 +1765,8 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1754 1765
1755 /* check if the UCC number is in range. */ 1766 /* check if the UCC number is in range. */
1756 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1767 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1757 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1768 if (netif_msg_probe(ugeth))
1769 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1758 return -EINVAL; 1770 return -EINVAL;
1759 } 1771 }
1760 1772
@@ -2306,7 +2318,9 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2306 2318
2307 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2319 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2308 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2320 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2309 ugeth_err("%s: Bad memory partition value.", __FUNCTION__); 2321 if (netif_msg_probe(ugeth))
2322 ugeth_err("%s: Bad memory partition value.",
2323 __FUNCTION__);
2310 return -EINVAL; 2324 return -EINVAL;
2311 } 2325 }
2312 2326
@@ -2315,9 +2329,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2315 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || 2329 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2316 (ug_info->bdRingLenRx[i] % 2330 (ug_info->bdRingLenRx[i] %
2317 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { 2331 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2318 ugeth_err 2332 if (netif_msg_probe(ugeth))
2319 ("%s: Rx BD ring length must be multiple of 4," 2333 ugeth_err
2320 " no smaller than 8.", __FUNCTION__); 2334 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2335 __FUNCTION__);
2321 return -EINVAL; 2336 return -EINVAL;
2322 } 2337 }
2323 } 2338 }
@@ -2325,9 +2340,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2325 /* Tx BD lengths */ 2340 /* Tx BD lengths */
2326 for (i = 0; i < ug_info->numQueuesTx; i++) { 2341 for (i = 0; i < ug_info->numQueuesTx; i++) {
2327 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { 2342 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2328 ugeth_err 2343 if (netif_msg_probe(ugeth))
2329 ("%s: Tx BD ring length must be no smaller than 2.", 2344 ugeth_err
2330 __FUNCTION__); 2345 ("%s: Tx BD ring length must be no smaller than 2.",
2346 __FUNCTION__);
2331 return -EINVAL; 2347 return -EINVAL;
2332 } 2348 }
2333 } 2349 }
@@ -2335,31 +2351,35 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2335 /* mrblr */ 2351 /* mrblr */
2336 if ((uf_info->max_rx_buf_length == 0) || 2352 if ((uf_info->max_rx_buf_length == 0) ||
2337 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { 2353 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2338 ugeth_err 2354 if (netif_msg_probe(ugeth))
2339 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2355 ugeth_err
2340 __FUNCTION__); 2356 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2357 __FUNCTION__);
2341 return -EINVAL; 2358 return -EINVAL;
2342 } 2359 }
2343 2360
2344 /* num Tx queues */ 2361 /* num Tx queues */
2345 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2362 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2346 ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2363 if (netif_msg_probe(ugeth))
2364 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2347 return -EINVAL; 2365 return -EINVAL;
2348 } 2366 }
2349 2367
2350 /* num Rx queues */ 2368 /* num Rx queues */
2351 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2369 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2352 ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2370 if (netif_msg_probe(ugeth))
2371 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2353 return -EINVAL; 2372 return -EINVAL;
2354 } 2373 }
2355 2374
2356 /* l2qt */ 2375 /* l2qt */
2357 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { 2376 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2358 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { 2377 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2359 ugeth_err 2378 if (netif_msg_probe(ugeth))
2360 ("%s: VLAN priority table entry must not be" 2379 ugeth_err
2361 " larger than number of Rx queues.", 2380 ("%s: VLAN priority table entry must not be"
2362 __FUNCTION__); 2381 " larger than number of Rx queues.",
2382 __FUNCTION__);
2363 return -EINVAL; 2383 return -EINVAL;
2364 } 2384 }
2365 } 2385 }
@@ -2367,26 +2387,29 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2367 /* l3qt */ 2387 /* l3qt */
2368 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { 2388 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2369 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { 2389 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2370 ugeth_err 2390 if (netif_msg_probe(ugeth))
2371 ("%s: IP priority table entry must not be" 2391 ugeth_err
2372 " larger than number of Rx queues.", 2392 ("%s: IP priority table entry must not be"
2373 __FUNCTION__); 2393 " larger than number of Rx queues.",
2394 __FUNCTION__);
2374 return -EINVAL; 2395 return -EINVAL;
2375 } 2396 }
2376 } 2397 }
2377 2398
2378 if (ug_info->cam && !ug_info->ecamptr) { 2399 if (ug_info->cam && !ug_info->ecamptr) {
2379 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2400 if (netif_msg_probe(ugeth))
2380 __FUNCTION__); 2401 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2402 __FUNCTION__);
2381 return -EINVAL; 2403 return -EINVAL;
2382 } 2404 }
2383 2405
2384 if ((ug_info->numStationAddresses != 2406 if ((ug_info->numStationAddresses !=
2385 UCC_GETH_NUM_OF_STATION_ADDRESSES_1) 2407 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2386 && ug_info->rxExtendedFiltering) { 2408 && ug_info->rxExtendedFiltering) {
2387 ugeth_err("%s: Number of station addresses greater than 1 " 2409 if (netif_msg_probe(ugeth))
2388 "not allowed in extended parsing mode.", 2410 ugeth_err("%s: Number of station addresses greater than 1 "
2389 __FUNCTION__); 2411 "not allowed in extended parsing mode.",
2412 __FUNCTION__);
2390 return -EINVAL; 2413 return -EINVAL;
2391 } 2414 }
2392 2415
@@ -2399,7 +2422,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2399 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); 2422 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2400 /* Initialize the general fast UCC block. */ 2423 /* Initialize the general fast UCC block. */
2401 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2424 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2402 ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2425 if (netif_msg_probe(ugeth))
2426 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2403 ucc_geth_memclean(ugeth); 2427 ucc_geth_memclean(ugeth);
2404 return -ENOMEM; 2428 return -ENOMEM;
2405 } 2429 }
@@ -2452,7 +2476,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2452 numThreadsRxNumerical = 8; 2476 numThreadsRxNumerical = 8;
2453 break; 2477 break;
2454 default: 2478 default:
2455 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); 2479 if (netif_msg_ifup(ugeth))
2480 ugeth_err("%s: Bad number of Rx threads value.",
2481 __FUNCTION__);
2456 ucc_geth_memclean(ugeth); 2482 ucc_geth_memclean(ugeth);
2457 return -EINVAL; 2483 return -EINVAL;
2458 break; 2484 break;
@@ -2475,7 +2501,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2475 numThreadsTxNumerical = 8; 2501 numThreadsTxNumerical = 8;
2476 break; 2502 break;
2477 default: 2503 default:
2478 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); 2504 if (netif_msg_ifup(ugeth))
2505 ugeth_err("%s: Bad number of Tx threads value.",
2506 __FUNCTION__);
2479 ucc_geth_memclean(ugeth); 2507 ucc_geth_memclean(ugeth);
2480 return -EINVAL; 2508 return -EINVAL;
2481 break; 2509 break;
@@ -2507,7 +2535,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2507 /* For more details see the hardware spec. */ 2535 /* For more details see the hardware spec. */
2508 init_flow_control_params(ug_info->aufc, 2536 init_flow_control_params(ug_info->aufc,
2509 ug_info->receiveFlowControl, 2537 ug_info->receiveFlowControl,
2510 1, 2538 ug_info->transmitFlowControl,
2511 ug_info->pausePeriod, 2539 ug_info->pausePeriod,
2512 ug_info->extensionField, 2540 ug_info->extensionField,
2513 &uf_regs->upsmr, 2541 &uf_regs->upsmr,
@@ -2527,8 +2555,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2527 ug_info->backToBackInterFrameGap, 2555 ug_info->backToBackInterFrameGap,
2528 &ug_regs->ipgifg); 2556 &ug_regs->ipgifg);
2529 if (ret_val != 0) { 2557 if (ret_val != 0) {
2530 ugeth_err("%s: IPGIFG initialization parameter too large.", 2558 if (netif_msg_ifup(ugeth))
2531 __FUNCTION__); 2559 ugeth_err("%s: IPGIFG initialization parameter too large.",
2560 __FUNCTION__);
2532 ucc_geth_memclean(ugeth); 2561 ucc_geth_memclean(ugeth);
2533 return ret_val; 2562 return ret_val;
2534 } 2563 }
@@ -2544,7 +2573,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2544 ug_info->collisionWindow, 2573 ug_info->collisionWindow,
2545 &ug_regs->hafdup); 2574 &ug_regs->hafdup);
2546 if (ret_val != 0) { 2575 if (ret_val != 0) {
2547 ugeth_err("%s: Half Duplex initialization parameter too large.", 2576 if (netif_msg_ifup(ugeth))
2577 ugeth_err("%s: Half Duplex initialization parameter too large.",
2548 __FUNCTION__); 2578 __FUNCTION__);
2549 ucc_geth_memclean(ugeth); 2579 ucc_geth_memclean(ugeth);
2550 return ret_val; 2580 return ret_val;
@@ -2597,9 +2627,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2597 tx_bd_ring_offset[j]); 2627 tx_bd_ring_offset[j]);
2598 } 2628 }
2599 if (!ugeth->p_tx_bd_ring[j]) { 2629 if (!ugeth->p_tx_bd_ring[j]) {
2600 ugeth_err 2630 if (netif_msg_ifup(ugeth))
2601 ("%s: Can not allocate memory for Tx bd rings.", 2631 ugeth_err
2602 __FUNCTION__); 2632 ("%s: Can not allocate memory for Tx bd rings.",
2633 __FUNCTION__);
2603 ucc_geth_memclean(ugeth); 2634 ucc_geth_memclean(ugeth);
2604 return -ENOMEM; 2635 return -ENOMEM;
2605 } 2636 }
@@ -2632,9 +2663,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2632 rx_bd_ring_offset[j]); 2663 rx_bd_ring_offset[j]);
2633 } 2664 }
2634 if (!ugeth->p_rx_bd_ring[j]) { 2665 if (!ugeth->p_rx_bd_ring[j]) {
2635 ugeth_err 2666 if (netif_msg_ifup(ugeth))
2636 ("%s: Can not allocate memory for Rx bd rings.", 2667 ugeth_err
2637 __FUNCTION__); 2668 ("%s: Can not allocate memory for Rx bd rings.",
2669 __FUNCTION__);
2638 ucc_geth_memclean(ugeth); 2670 ucc_geth_memclean(ugeth);
2639 return -ENOMEM; 2671 return -ENOMEM;
2640 } 2672 }
@@ -2648,8 +2680,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2648 GFP_KERNEL); 2680 GFP_KERNEL);
2649 2681
2650 if (ugeth->tx_skbuff[j] == NULL) { 2682 if (ugeth->tx_skbuff[j] == NULL) {
2651 ugeth_err("%s: Could not allocate tx_skbuff", 2683 if (netif_msg_ifup(ugeth))
2652 __FUNCTION__); 2684 ugeth_err("%s: Could not allocate tx_skbuff",
2685 __FUNCTION__);
2653 ucc_geth_memclean(ugeth); 2686 ucc_geth_memclean(ugeth);
2654 return -ENOMEM; 2687 return -ENOMEM;
2655 } 2688 }
@@ -2679,8 +2712,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2679 GFP_KERNEL); 2712 GFP_KERNEL);
2680 2713
2681 if (ugeth->rx_skbuff[j] == NULL) { 2714 if (ugeth->rx_skbuff[j] == NULL) {
2682 ugeth_err("%s: Could not allocate rx_skbuff", 2715 if (netif_msg_ifup(ugeth))
2683 __FUNCTION__); 2716 ugeth_err("%s: Could not allocate rx_skbuff",
2717 __FUNCTION__);
2684 ucc_geth_memclean(ugeth); 2718 ucc_geth_memclean(ugeth);
2685 return -ENOMEM; 2719 return -ENOMEM;
2686 } 2720 }
@@ -2711,9 +2745,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2711 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), 2745 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2712 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); 2746 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2713 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { 2747 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2714 ugeth_err 2748 if (netif_msg_ifup(ugeth))
2715 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2749 ugeth_err
2716 __FUNCTION__); 2750 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2751 __FUNCTION__);
2717 ucc_geth_memclean(ugeth); 2752 ucc_geth_memclean(ugeth);
2718 return -ENOMEM; 2753 return -ENOMEM;
2719 } 2754 }
@@ -2733,9 +2768,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2733 32 * (numThreadsTxNumerical == 1), 2768 32 * (numThreadsTxNumerical == 1),
2734 UCC_GETH_THREAD_DATA_ALIGNMENT); 2769 UCC_GETH_THREAD_DATA_ALIGNMENT);
2735 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { 2770 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2736 ugeth_err 2771 if (netif_msg_ifup(ugeth))
2737 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2772 ugeth_err
2738 __FUNCTION__); 2773 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2774 __FUNCTION__);
2739 ucc_geth_memclean(ugeth); 2775 ucc_geth_memclean(ugeth);
2740 return -ENOMEM; 2776 return -ENOMEM;
2741 } 2777 }
@@ -2761,9 +2797,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2761 sizeof(struct ucc_geth_send_queue_qd), 2797 sizeof(struct ucc_geth_send_queue_qd),
2762 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); 2798 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2763 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { 2799 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2764 ugeth_err 2800 if (netif_msg_ifup(ugeth))
2765 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2801 ugeth_err
2766 __FUNCTION__); 2802 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2803 __FUNCTION__);
2767 ucc_geth_memclean(ugeth); 2804 ucc_geth_memclean(ugeth);
2768 return -ENOMEM; 2805 return -ENOMEM;
2769 } 2806 }
@@ -2804,9 +2841,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2804 qe_muram_alloc(sizeof(struct ucc_geth_scheduler), 2841 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2805 UCC_GETH_SCHEDULER_ALIGNMENT); 2842 UCC_GETH_SCHEDULER_ALIGNMENT);
2806 if (IS_ERR_VALUE(ugeth->scheduler_offset)) { 2843 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2807 ugeth_err 2844 if (netif_msg_ifup(ugeth))
2808 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2845 ugeth_err
2809 __FUNCTION__); 2846 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2847 __FUNCTION__);
2810 ucc_geth_memclean(ugeth); 2848 ucc_geth_memclean(ugeth);
2811 return -ENOMEM; 2849 return -ENOMEM;
2812 } 2850 }
@@ -2852,9 +2890,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2852 (struct ucc_geth_tx_firmware_statistics_pram), 2890 (struct ucc_geth_tx_firmware_statistics_pram),
2853 UCC_GETH_TX_STATISTICS_ALIGNMENT); 2891 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2854 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { 2892 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2855 ugeth_err 2893 if (netif_msg_ifup(ugeth))
2856 ("%s: Can not allocate DPRAM memory for" 2894 ugeth_err
2857 " p_tx_fw_statistics_pram.", __FUNCTION__); 2895 ("%s: Can not allocate DPRAM memory for"
2896 " p_tx_fw_statistics_pram.",
2897 __FUNCTION__);
2858 ucc_geth_memclean(ugeth); 2898 ucc_geth_memclean(ugeth);
2859 return -ENOMEM; 2899 return -ENOMEM;
2860 } 2900 }
@@ -2891,9 +2931,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2891 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), 2931 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2892 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); 2932 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2893 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { 2933 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2894 ugeth_err 2934 if (netif_msg_ifup(ugeth))
2895 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2935 ugeth_err
2896 __FUNCTION__); 2936 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2937 __FUNCTION__);
2897 ucc_geth_memclean(ugeth); 2938 ucc_geth_memclean(ugeth);
2898 return -ENOMEM; 2939 return -ENOMEM;
2899 } 2940 }
@@ -2912,9 +2953,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2912 sizeof(struct ucc_geth_thread_data_rx), 2953 sizeof(struct ucc_geth_thread_data_rx),
2913 UCC_GETH_THREAD_DATA_ALIGNMENT); 2954 UCC_GETH_THREAD_DATA_ALIGNMENT);
2914 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { 2955 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2915 ugeth_err 2956 if (netif_msg_ifup(ugeth))
2916 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2957 ugeth_err
2917 __FUNCTION__); 2958 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2959 __FUNCTION__);
2918 ucc_geth_memclean(ugeth); 2960 ucc_geth_memclean(ugeth);
2919 return -ENOMEM; 2961 return -ENOMEM;
2920 } 2962 }
@@ -2935,9 +2977,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2935 (struct ucc_geth_rx_firmware_statistics_pram), 2977 (struct ucc_geth_rx_firmware_statistics_pram),
2936 UCC_GETH_RX_STATISTICS_ALIGNMENT); 2978 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2937 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { 2979 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2938 ugeth_err 2980 if (netif_msg_ifup(ugeth))
2939 ("%s: Can not allocate DPRAM memory for" 2981 ugeth_err
2940 " p_rx_fw_statistics_pram.", __FUNCTION__); 2982 ("%s: Can not allocate DPRAM memory for"
2983 " p_rx_fw_statistics_pram.", __FUNCTION__);
2941 ucc_geth_memclean(ugeth); 2984 ucc_geth_memclean(ugeth);
2942 return -ENOMEM; 2985 return -ENOMEM;
2943 } 2986 }
@@ -2957,9 +3000,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2957 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) 3000 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
2958 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); 3001 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2959 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { 3002 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2960 ugeth_err 3003 if (netif_msg_ifup(ugeth))
2961 ("%s: Can not allocate DPRAM memory for" 3004 ugeth_err
2962 " p_rx_irq_coalescing_tbl.", __FUNCTION__); 3005 ("%s: Can not allocate DPRAM memory for"
3006 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
2963 ucc_geth_memclean(ugeth); 3007 ucc_geth_memclean(ugeth);
2964 return -ENOMEM; 3008 return -ENOMEM;
2965 } 3009 }
@@ -3025,9 +3069,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3025 sizeof(struct ucc_geth_rx_prefetched_bds)), 3069 sizeof(struct ucc_geth_rx_prefetched_bds)),
3026 UCC_GETH_RX_BD_QUEUES_ALIGNMENT); 3070 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3027 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { 3071 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
3028 ugeth_err 3072 if (netif_msg_ifup(ugeth))
3029 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3073 ugeth_err
3030 __FUNCTION__); 3074 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3075 __FUNCTION__);
3031 ucc_geth_memclean(ugeth); 3076 ucc_geth_memclean(ugeth);
3032 return -ENOMEM; 3077 return -ENOMEM;
3033 } 3078 }
@@ -3102,8 +3147,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3102 /* initialize extended filtering */ 3147 /* initialize extended filtering */
3103 if (ug_info->rxExtendedFiltering) { 3148 if (ug_info->rxExtendedFiltering) {
3104 if (!ug_info->extendedFilteringChainPointer) { 3149 if (!ug_info->extendedFilteringChainPointer) {
3105 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3150 if (netif_msg_ifup(ugeth))
3106 __FUNCTION__); 3151 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3152 __FUNCTION__);
3107 ucc_geth_memclean(ugeth); 3153 ucc_geth_memclean(ugeth);
3108 return -EINVAL; 3154 return -EINVAL;
3109 } 3155 }
@@ -3114,9 +3160,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3114 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), 3160 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
3115 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); 3161 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3116 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { 3162 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
3117 ugeth_err 3163 if (netif_msg_ifup(ugeth))
3118 ("%s: Can not allocate DPRAM memory for" 3164 ugeth_err
3119 " p_exf_glbl_param.", __FUNCTION__); 3165 ("%s: Can not allocate DPRAM memory for"
3166 " p_exf_glbl_param.", __FUNCTION__);
3120 ucc_geth_memclean(ugeth); 3167 ucc_geth_memclean(ugeth);
3121 return -ENOMEM; 3168 return -ENOMEM;
3122 } 3169 }
@@ -3161,9 +3208,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3161 */ 3208 */
3162 if (!(ugeth->p_init_enet_param_shadow = 3209 if (!(ugeth->p_init_enet_param_shadow =
3163 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { 3210 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3164 ugeth_err 3211 if (netif_msg_ifup(ugeth))
3165 ("%s: Can not allocate memory for" 3212 ugeth_err
3166 " p_UccInitEnetParamShadows.", __FUNCTION__); 3213 ("%s: Can not allocate memory for"
3214 " p_UccInitEnetParamShadows.", __FUNCTION__);
3167 ucc_geth_memclean(ugeth); 3215 ucc_geth_memclean(ugeth);
3168 return -ENOMEM; 3216 return -ENOMEM;
3169 } 3217 }
@@ -3196,8 +3244,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3196 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3197 && (ug_info->largestexternallookupkeysize != 3245 && (ug_info->largestexternallookupkeysize !=
3198 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3246 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3199 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3247 if (netif_msg_ifup(ugeth))
3200 __FUNCTION__); 3248 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3249 __FUNCTION__);
3201 ucc_geth_memclean(ugeth); 3250 ucc_geth_memclean(ugeth);
3202 return -EINVAL; 3251 return -EINVAL;
3203 } 3252 }
@@ -3222,8 +3271,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3222 /* Rx needs one extra for terminator */ 3271 /* Rx needs one extra for terminator */
3223 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, 3272 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3224 ug_info->riscRx, 1)) != 0) { 3273 ug_info->riscRx, 1)) != 0) {
3225 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3274 if (netif_msg_ifup(ugeth))
3226 __FUNCTION__); 3275 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3276 __FUNCTION__);
3227 ucc_geth_memclean(ugeth); 3277 ucc_geth_memclean(ugeth);
3228 return ret_val; 3278 return ret_val;
3229 } 3279 }
@@ -3237,8 +3287,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3237 sizeof(struct ucc_geth_thread_tx_pram), 3287 sizeof(struct ucc_geth_thread_tx_pram),
3238 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, 3288 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3239 ug_info->riscTx, 0)) != 0) { 3289 ug_info->riscTx, 0)) != 0) {
3240 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3290 if (netif_msg_ifup(ugeth))
3241 __FUNCTION__); 3291 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3292 __FUNCTION__);
3242 ucc_geth_memclean(ugeth); 3293 ucc_geth_memclean(ugeth);
3243 return ret_val; 3294 return ret_val;
3244 } 3295 }
@@ -3246,8 +3297,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3246 /* Load Rx bds with buffers */ 3297 /* Load Rx bds with buffers */
3247 for (i = 0; i < ug_info->numQueuesRx; i++) { 3298 for (i = 0; i < ug_info->numQueuesRx; i++) {
3248 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3299 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3249 ugeth_err("%s: Can not fill Rx bds with buffers.", 3300 if (netif_msg_ifup(ugeth))
3250 __FUNCTION__); 3301 ugeth_err("%s: Can not fill Rx bds with buffers.",
3302 __FUNCTION__);
3251 ucc_geth_memclean(ugeth); 3303 ucc_geth_memclean(ugeth);
3252 return ret_val; 3304 return ret_val;
3253 } 3305 }
@@ -3256,9 +3308,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3256 /* Allocate InitEnet command parameter structure */ 3308 /* Allocate InitEnet command parameter structure */
3257 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); 3309 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3258 if (IS_ERR_VALUE(init_enet_pram_offset)) { 3310 if (IS_ERR_VALUE(init_enet_pram_offset)) {
3259 ugeth_err 3311 if (netif_msg_ifup(ugeth))
3260 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3312 ugeth_err
3261 __FUNCTION__); 3313 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3314 __FUNCTION__);
3262 ucc_geth_memclean(ugeth); 3315 ucc_geth_memclean(ugeth);
3263 return -ENOMEM; 3316 return -ENOMEM;
3264 } 3317 }
@@ -3428,8 +3481,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3428 if (!skb || 3481 if (!skb ||
3429 (!(bd_status & (R_F | R_L))) || 3482 (!(bd_status & (R_F | R_L))) ||
3430 (bd_status & R_ERRORS_FATAL)) { 3483 (bd_status & R_ERRORS_FATAL)) {
3431 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", 3484 if (netif_msg_rx_err(ugeth))
3432 __FUNCTION__, __LINE__, (u32) skb); 3485 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3486 __FUNCTION__, __LINE__, (u32) skb);
3433 if (skb) 3487 if (skb)
3434 dev_kfree_skb_any(skb); 3488 dev_kfree_skb_any(skb);
3435 3489
@@ -3458,7 +3512,8 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3458 3512
3459 skb = get_new_skb(ugeth, bd); 3513 skb = get_new_skb(ugeth, bd);
3460 if (!skb) { 3514 if (!skb) {
3461 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3515 if (netif_msg_rx_err(ugeth))
3516 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3462 ugeth->stats.rx_dropped++; 3517 ugeth->stats.rx_dropped++;
3463 break; 3518 break;
3464 } 3519 }
@@ -3649,28 +3704,32 @@ static int ucc_geth_open(struct net_device *dev)
3649 3704
3650 /* Test station address */ 3705 /* Test station address */
3651 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3706 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3652 ugeth_err("%s: Multicast address used for station address" 3707 if (netif_msg_ifup(ugeth))
3653 " - is this what you wanted?", __FUNCTION__); 3708 ugeth_err("%s: Multicast address used for station address"
3709 " - is this what you wanted?", __FUNCTION__);
3654 return -EINVAL; 3710 return -EINVAL;
3655 } 3711 }
3656 3712
3657 err = ucc_struct_init(ugeth); 3713 err = ucc_struct_init(ugeth);
3658 if (err) { 3714 if (err) {
3659 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); 3715 if (netif_msg_ifup(ugeth))
3716 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3660 return err; 3717 return err;
3661 } 3718 }
3662 3719
3663 err = ucc_geth_startup(ugeth); 3720 err = ucc_geth_startup(ugeth);
3664 if (err) { 3721 if (err) {
3665 ugeth_err("%s: Cannot configure net device, aborting.", 3722 if (netif_msg_ifup(ugeth))
3666 dev->name); 3723 ugeth_err("%s: Cannot configure net device, aborting.",
3724 dev->name);
3667 return err; 3725 return err;
3668 } 3726 }
3669 3727
3670 err = adjust_enet_interface(ugeth); 3728 err = adjust_enet_interface(ugeth);
3671 if (err) { 3729 if (err) {
3672 ugeth_err("%s: Cannot configure net device, aborting.", 3730 if (netif_msg_ifup(ugeth))
3673 dev->name); 3731 ugeth_err("%s: Cannot configure net device, aborting.",
3732 dev->name);
3674 return err; 3733 return err;
3675 } 3734 }
3676 3735
@@ -3687,7 +3746,8 @@ static int ucc_geth_open(struct net_device *dev)
3687 3746
3688 err = init_phy(dev); 3747 err = init_phy(dev);
3689 if (err) { 3748 if (err) {
3690 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); 3749 if (netif_msg_ifup(ugeth))
3750 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3691 return err; 3751 return err;
3692 } 3752 }
3693 3753
@@ -3697,15 +3757,17 @@ static int ucc_geth_open(struct net_device *dev)
3697 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, 3757 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
3698 "UCC Geth", dev); 3758 "UCC Geth", dev);
3699 if (err) { 3759 if (err) {
3700 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3760 if (netif_msg_ifup(ugeth))
3701 dev->name); 3761 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3762 dev->name);
3702 ucc_geth_stop(ugeth); 3763 ucc_geth_stop(ugeth);
3703 return err; 3764 return err;
3704 } 3765 }
3705 3766
3706 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3767 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3707 if (err) { 3768 if (err) {
3708 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3769 if (netif_msg_ifup(ugeth))
3770 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3709 ucc_geth_stop(ugeth); 3771 ucc_geth_stop(ugeth);
3710 return err; 3772 return err;
3711 } 3773 }
@@ -3732,8 +3794,6 @@ static int ucc_geth_close(struct net_device *dev)
3732 return 0; 3794 return 0;
3733} 3795}
3734 3796
3735const struct ethtool_ops ucc_geth_ethtool_ops = { };
3736
3737static phy_interface_t to_phy_interface(const char *phy_connection_type) 3797static phy_interface_t to_phy_interface(const char *phy_connection_type)
3738{ 3798{
3739 if (strcasecmp(phy_connection_type, "mii") == 0) 3799 if (strcasecmp(phy_connection_type, "mii") == 0)
@@ -3790,6 +3850,13 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3790 return -ENODEV; 3850 return -ENODEV;
3791 3851
3792 ug_info = &ugeth_info[ucc_num]; 3852 ug_info = &ugeth_info[ucc_num];
3853 if (ug_info == NULL) {
3854 if (netif_msg_probe(&debug))
3855 ugeth_err("%s: [%d] Missing additional data!",
3856 __FUNCTION__, ucc_num);
3857 return -ENODEV;
3858 }
3859
3793 ug_info->uf_info.ucc_num = ucc_num; 3860 ug_info->uf_info.ucc_num = ucc_num;
3794 3861
3795 prop = of_get_property(np, "rx-clock", NULL); 3862 prop = of_get_property(np, "rx-clock", NULL);
@@ -3868,15 +3935,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3868 3935
3869 ug_info->mdio_bus = res.start; 3936 ug_info->mdio_bus = res.start;
3870 3937
3871 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 3938 if (netif_msg_probe(&debug))
3872 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3939 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
3873 ug_info->uf_info.irq); 3940 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3874 3941 ug_info->uf_info.irq);
3875 if (ug_info == NULL) {
3876 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
3877 ucc_num);
3878 return -ENODEV;
3879 }
3880 3942
3881 /* Create an ethernet device instance */ 3943 /* Create an ethernet device instance */
3882 dev = alloc_etherdev(sizeof(*ugeth)); 3944 dev = alloc_etherdev(sizeof(*ugeth));
@@ -3896,6 +3958,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3896 SET_NETDEV_DEV(dev, device); 3958 SET_NETDEV_DEV(dev, device);
3897 3959
3898 /* Fill in the dev structure */ 3960 /* Fill in the dev structure */
3961 uec_set_ethtool_ops(dev);
3899 dev->open = ucc_geth_open; 3962 dev->open = ucc_geth_open;
3900 dev->hard_start_xmit = ucc_geth_start_xmit; 3963 dev->hard_start_xmit = ucc_geth_start_xmit;
3901 dev->tx_timeout = ucc_geth_timeout; 3964 dev->tx_timeout = ucc_geth_timeout;
@@ -3909,16 +3972,16 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3909// dev->change_mtu = ucc_geth_change_mtu; 3972// dev->change_mtu = ucc_geth_change_mtu;
3910 dev->mtu = 1500; 3973 dev->mtu = 1500;
3911 dev->set_multicast_list = ucc_geth_set_multi; 3974 dev->set_multicast_list = ucc_geth_set_multi;
3912 dev->ethtool_ops = &ucc_geth_ethtool_ops;
3913 3975
3914 ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 3976 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
3915 ugeth->phy_interface = phy_interface; 3977 ugeth->phy_interface = phy_interface;
3916 ugeth->max_speed = max_speed; 3978 ugeth->max_speed = max_speed;
3917 3979
3918 err = register_netdev(dev); 3980 err = register_netdev(dev);
3919 if (err) { 3981 if (err) {
3920 ugeth_err("%s: Cannot register net device, aborting.", 3982 if (netif_msg_probe(ugeth))
3921 dev->name); 3983 ugeth_err("%s: Cannot register net device, aborting.",
3984 dev->name);
3922 free_netdev(dev); 3985 free_netdev(dev);
3923 return err; 3986 return err;
3924 } 3987 }
@@ -3972,7 +4035,8 @@ static int __init ucc_geth_init(void)
3972 if (ret) 4035 if (ret)
3973 return ret; 4036 return ret;
3974 4037
3975 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); 4038 if (netif_msg_drv(&debug))
4039 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
3976 for (i = 0; i < 8; i++) 4040 for (i = 0; i < 8; i++)
3977 memcpy(&(ugeth_info[i]), &ugeth_primary_info, 4041 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
3978 sizeof(ugeth_primary_info)); 4042 sizeof(ugeth_primary_info));
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a29e1c3ca4b..bb4dac8c0c6 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -30,6 +30,10 @@
30 30
31#include "ucc_geth_mii.h" 31#include "ucc_geth_mii.h"
32 32
33#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
34#define DRV_NAME "ucc_geth"
35#define DRV_VERSION "1.1"
36
33#define NUM_TX_QUEUES 8 37#define NUM_TX_QUEUES 8
34#define NUM_RX_QUEUES 8 38#define NUM_RX_QUEUES 8
35#define NUM_BDS_IN_PREFETCHED_BDS 4 39#define NUM_BDS_IN_PREFETCHED_BDS 4
@@ -896,6 +900,7 @@ struct ucc_geth_hardware_statistics {
896#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 900#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
897#define UCC_GETH_RX_BD_RING_SIZE_MIN 8 901#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
898#define UCC_GETH_TX_BD_RING_SIZE_MIN 2 902#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
903#define UCC_GETH_BD_RING_SIZE_MAX 0xffff
899 904
900#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD 905#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
901 906
@@ -1135,6 +1140,7 @@ struct ucc_geth_info {
1135 int bro; 1140 int bro;
1136 int ecm; 1141 int ecm;
1137 int receiveFlowControl; 1142 int receiveFlowControl;
1143 int transmitFlowControl;
1138 u8 maxGroupAddrInHash; 1144 u8 maxGroupAddrInHash;
1139 u8 maxIndAddrInHash; 1145 u8 maxIndAddrInHash;
1140 u8 prel; 1146 u8 prel;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
new file mode 100644
index 00000000000..a8994c7b858
--- /dev/null
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Description: QE UCC Gigabit Ethernet Ethtool API Set
5 *
6 * Author: Li Yang <leoli@freescale.com>
7 *
8 * Limitation:
9 * Can only get/set setttings of the first queue.
10 * Need to re-open the interface manually after changing some paramters.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/delay.h>
30#include <linux/dma-mapping.h>
31#include <linux/fsl_devices.h>
32#include <linux/ethtool.h>
33#include <linux/mii.h>
34#include <linux/phy.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/uaccess.h>
39#include <asm/types.h>
40#include <asm/uaccess.h>
41
42#include "ucc_geth.h"
43#include "ucc_geth_mii.h"
44
45static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
46 "tx-64-frames",
47 "tx-65-127-frames",
48 "tx-128-255-frames",
49 "rx-64-frames",
50 "rx-65-127-frames",
51 "rx-128-255-frames",
52 "tx-bytes-ok",
53 "tx-pause-frames",
54 "tx-multicast-frames",
55 "tx-broadcast-frames",
56 "rx-frames",
57 "rx-bytes-ok",
58 "rx-bytes-all",
59 "rx-multicast-frames",
60 "rx-broadcast-frames",
61 "stats-counter-carry",
62 "stats-counter-mask",
63 "rx-dropped-frames",
64};
65
66static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
67 "tx-single-collision",
68 "tx-multiple-collision",
69 "tx-late-collsion",
70 "tx-aborted-frames",
71 "tx-lost-frames",
72 "tx-carrier-sense-errors",
73 "tx-frames-ok",
74 "tx-excessive-differ-frames",
75 "tx-256-511-frames",
76 "tx-1024-1518-frames",
77 "tx-jumbo-frames",
78};
79
80static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
81 "rx-crc-errors",
82 "rx-alignment-errors",
83 "rx-in-range-length-errors",
84 "rx-out-of-range-length-errors",
85 "rx-too-long-frames",
86 "rx-runt",
87 "rx-very-long-event",
88 "rx-symbol-errors",
89 "rx-busy-drop-frames",
90 "reserved",
91 "reserved",
92 "rx-mismatch-drop-frames",
93 "rx-small-than-64",
94 "rx-256-511-frames",
95 "rx-512-1023-frames",
96 "rx-1024-1518-frames",
97 "rx-jumbo-frames",
98 "rx-mac-error-loss",
99 "rx-pause-frames",
100 "reserved",
101 "rx-vlan-removed",
102 "rx-vlan-replaced",
103 "rx-vlan-inserted",
104 "rx-ip-checksum-errors",
105};
106
107#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
108#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
109#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
110
111extern int init_flow_control_params(u32 automatic_flow_control_mode,
112 int rx_flow_control_enable,
113 int tx_flow_control_enable, u16 pause_period,
114 u16 extension_field, volatile u32 *upsmr_register,
115 volatile u32 *uempr_register, volatile u32 *maccfg1_register);
116
117static int
118uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
119{
120 struct ucc_geth_private *ugeth = netdev_priv(netdev);
121 struct phy_device *phydev = ugeth->phydev;
122 struct ucc_geth_info *ug_info = ugeth->ug_info;
123
124 if (!phydev)
125 return -ENODEV;
126
127 ecmd->maxtxpkt = 1;
128 ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
129
130 return phy_ethtool_gset(phydev, ecmd);
131}
132
133static int
134uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
135{
136 struct ucc_geth_private *ugeth = netdev_priv(netdev);
137 struct phy_device *phydev = ugeth->phydev;
138
139 if (!phydev)
140 return -ENODEV;
141
142 return phy_ethtool_sset(phydev, ecmd);
143}
144
145static void
146uec_get_pauseparam(struct net_device *netdev,
147 struct ethtool_pauseparam *pause)
148{
149 struct ucc_geth_private *ugeth = netdev_priv(netdev);
150
151 pause->autoneg = ugeth->phydev->autoneg;
152
153 if (ugeth->ug_info->receiveFlowControl)
154 pause->rx_pause = 1;
155 if (ugeth->ug_info->transmitFlowControl)
156 pause->tx_pause = 1;
157}
158
159static int
160uec_set_pauseparam(struct net_device *netdev,
161 struct ethtool_pauseparam *pause)
162{
163 struct ucc_geth_private *ugeth = netdev_priv(netdev);
164 int ret = 0;
165
166 ugeth->ug_info->receiveFlowControl = pause->rx_pause;
167 ugeth->ug_info->transmitFlowControl = pause->tx_pause;
168
169 if (ugeth->phydev->autoneg) {
170 if (netif_running(netdev)) {
171 /* FIXME: automatically restart */
172 printk(KERN_INFO
173 "Please re-open the interface.\n");
174 }
175 } else {
176 struct ucc_geth_info *ug_info = ugeth->ug_info;
177
178 ret = init_flow_control_params(ug_info->aufc,
179 ug_info->receiveFlowControl,
180 ug_info->transmitFlowControl,
181 ug_info->pausePeriod,
182 ug_info->extensionField,
183 &ugeth->uccf->uf_regs->upsmr,
184 &ugeth->ug_regs->uempr,
185 &ugeth->ug_regs->maccfg1);
186 }
187
188 return ret;
189}
190
191static uint32_t
192uec_get_msglevel(struct net_device *netdev)
193{
194 struct ucc_geth_private *ugeth = netdev_priv(netdev);
195 return ugeth->msg_enable;
196}
197
198static void
199uec_set_msglevel(struct net_device *netdev, uint32_t data)
200{
201 struct ucc_geth_private *ugeth = netdev_priv(netdev);
202 ugeth->msg_enable = data;
203}
204
205static int
206uec_get_regs_len(struct net_device *netdev)
207{
208 return sizeof(struct ucc_geth);
209}
210
211static void
212uec_get_regs(struct net_device *netdev,
213 struct ethtool_regs *regs, void *p)
214{
215 int i;
216 struct ucc_geth_private *ugeth = netdev_priv(netdev);
217 u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
218 u32 *buff = p;
219
220 for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
221 buff[i] = in_be32(&ug_regs[i]);
222}
223
224static void
225uec_get_ringparam(struct net_device *netdev,
226 struct ethtool_ringparam *ring)
227{
228 struct ucc_geth_private *ugeth = netdev_priv(netdev);
229 struct ucc_geth_info *ug_info = ugeth->ug_info;
230 int queue = 0;
231
232 ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
233 ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
234 ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
235 ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
236
237 ring->rx_pending = ug_info->bdRingLenRx[queue];
238 ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
239 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
240 ring->tx_pending = ug_info->bdRingLenTx[queue];
241}
242
243static int
244uec_set_ringparam(struct net_device *netdev,
245 struct ethtool_ringparam *ring)
246{
247 struct ucc_geth_private *ugeth = netdev_priv(netdev);
248 struct ucc_geth_info *ug_info = ugeth->ug_info;
249 int queue = 0, ret = 0;
250
251 if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
252 printk("%s: RxBD ring size must be no smaller than %d.\n",
253 netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN);
254 return -EINVAL;
255 }
256 if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
257 printk("%s: RxBD ring size must be multiple of %d.\n",
258 netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
259 return -EINVAL;
260 }
261 if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
262 printk("%s: TxBD ring size must be no smaller than %d.\n",
263 netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN);
264 return -EINVAL;
265 }
266
267 ug_info->bdRingLenRx[queue] = ring->rx_pending;
268 ug_info->bdRingLenTx[queue] = ring->tx_pending;
269
270 if (netif_running(netdev)) {
271 /* FIXME: restart automatically */
272 printk(KERN_INFO
273 "Please re-open the interface.\n");
274 }
275
276 return ret;
277}
278
279static int uec_get_stats_count(struct net_device *netdev)
280{
281 struct ucc_geth_private *ugeth = netdev_priv(netdev);
282 u32 stats_mode = ugeth->ug_info->statisticsMode;
283 int len = 0;
284
285 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
286 len += UEC_HW_STATS_LEN;
287 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
288 len += UEC_TX_FW_STATS_LEN;
289 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
290 len += UEC_RX_FW_STATS_LEN;
291
292 return len;
293}
294
295static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
296{
297 struct ucc_geth_private *ugeth = netdev_priv(netdev);
298 u32 stats_mode = ugeth->ug_info->statisticsMode;
299
300 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
301 memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
302 ETH_GSTRING_LEN);
303 buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
304 }
305 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
306 memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
307 ETH_GSTRING_LEN);
308 buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
309 }
310 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
311 memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
312 ETH_GSTRING_LEN);
313}
314
315static void uec_get_ethtool_stats(struct net_device *netdev,
316 struct ethtool_stats *stats, uint64_t *data)
317{
318 struct ucc_geth_private *ugeth = netdev_priv(netdev);
319 u32 stats_mode = ugeth->ug_info->statisticsMode;
320 u32 __iomem *base;
321 int i, j = 0;
322
323 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
324 base = (u32 __iomem *)&ugeth->ug_regs->tx64;
325 for (i = 0; i < UEC_HW_STATS_LEN; i++)
326 data[j++] = (u64)in_be32(&base[i]);
327 }
328 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
329 base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
330 for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
331 data[j++] = (u64)in_be32(&base[i]);
332 }
333 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
334 base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
335 for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
336 data[j++] = (u64)in_be32(&base[i]);
337 }
338}
339
340static int uec_nway_reset(struct net_device *netdev)
341{
342 struct ucc_geth_private *ugeth = netdev_priv(netdev);
343
344 return phy_start_aneg(ugeth->phydev);
345}
346
347/* Report driver information */
348static void
349uec_get_drvinfo(struct net_device *netdev,
350 struct ethtool_drvinfo *drvinfo)
351{
352 strncpy(drvinfo->driver, DRV_NAME, 32);
353 strncpy(drvinfo->version, DRV_VERSION, 32);
354 strncpy(drvinfo->fw_version, "N/A", 32);
355 strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
356 drvinfo->n_stats = uec_get_stats_count(netdev);
357 drvinfo->testinfo_len = 0;
358 drvinfo->eedump_len = 0;
359 drvinfo->regdump_len = uec_get_regs_len(netdev);
360}
361
362static const struct ethtool_ops uec_ethtool_ops = {
363 .get_settings = uec_get_settings,
364 .set_settings = uec_set_settings,
365 .get_drvinfo = uec_get_drvinfo,
366 .get_regs_len = uec_get_regs_len,
367 .get_regs = uec_get_regs,
368 .get_msglevel = uec_get_msglevel,
369 .set_msglevel = uec_set_msglevel,
370 .nway_reset = uec_nway_reset,
371 .get_link = ethtool_op_get_link,
372 .get_ringparam = uec_get_ringparam,
373 .set_ringparam = uec_set_ringparam,
374 .get_pauseparam = uec_get_pauseparam,
375 .set_pauseparam = uec_set_pauseparam,
376 .get_sg = ethtool_op_get_sg,
377 .set_sg = ethtool_op_set_sg,
378 .get_tso = ethtool_op_get_tso,
379 .get_stats_count = uec_get_stats_count,
380 .get_strings = uec_get_strings,
381 .get_ethtool_stats = uec_get_ethtool_stats,
382 .get_perm_addr = ethtool_op_get_perm_addr,
383};
384
385void uec_set_ethtool_ops(struct net_device *netdev)
386{
387 SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
388}
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 7bcb82f50cf..5f8c2d30a32 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -54,8 +54,8 @@
54#define vdbg(format, arg...) do {} while(0) 54#define vdbg(format, arg...) do {} while(0)
55#endif 55#endif
56 56
57#define DRV_DESC "QE UCC Ethernet Controller MII Bus" 57#define MII_DRV_DESC "QE UCC Ethernet Controller MII Bus"
58#define DRV_NAME "fsl-uec_mdio" 58#define MII_DRV_NAME "fsl-uec_mdio"
59 59
60/* Write value to the PHY for this device to the register at regnum, */ 60/* Write value to the PHY for this device to the register at regnum, */
61/* waiting until the write is done before it returns. All PHY */ 61/* waiting until the write is done before it returns. All PHY */
@@ -261,7 +261,7 @@ static struct of_device_id uec_mdio_match[] = {
261}; 261};
262 262
263static struct of_platform_driver uec_mdio_driver = { 263static struct of_platform_driver uec_mdio_driver = {
264 .name = DRV_NAME, 264 .name = MII_DRV_NAME,
265 .probe = uec_mdio_probe, 265 .probe = uec_mdio_probe,
266 .remove = uec_mdio_remove, 266 .remove = uec_mdio_remove,
267 .match_table = uec_mdio_match, 267 .match_table = uec_mdio_match,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bfeca57098f..e6bfce690ca 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1187,7 +1187,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1187static void 1187static void
1188__dasd_process_blk_queue(struct dasd_device * device) 1188__dasd_process_blk_queue(struct dasd_device * device)
1189{ 1189{
1190 request_queue_t *queue; 1190 struct request_queue *queue;
1191 struct request *req; 1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1192 struct dasd_ccw_req *cqr;
1193 int nr_queued; 1193 int nr_queued;
@@ -1740,7 +1740,7 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1740 * Dasd request queue function. Called from ll_rw_blk.c 1740 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1741 */
1742static void 1742static void
1743do_dasd_request(request_queue_t * queue) 1743do_dasd_request(struct request_queue * queue)
1744{ 1744{
1745 struct dasd_device *device; 1745 struct dasd_device *device;
1746 1746
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 241294cba41..aeda5268244 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -293,7 +293,7 @@ struct dasd_uid {
293struct dasd_device { 293struct dasd_device {
294 /* Block device stuff. */ 294 /* Block device stuff. */
295 struct gendisk *gdp; 295 struct gendisk *gdp;
296 request_queue_t *request_queue; 296 struct request_queue *request_queue;
297 spinlock_t request_queue_lock; 297 spinlock_t request_queue_lock;
298 struct block_device *bdev; 298 struct block_device *bdev;
299 unsigned int devindex; 299 unsigned int devindex;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 35765f6a86e..4d8798bacf9 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -621,7 +621,7 @@ out:
621} 621}
622 622
623static int 623static int
624dcssblk_make_request(request_queue_t *q, struct bio *bio) 624dcssblk_make_request(struct request_queue *q, struct bio *bio)
625{ 625{
626 struct dcssblk_dev_info *dev_info; 626 struct dcssblk_dev_info *dev_info;
627 struct bio_vec *bvec; 627 struct bio_vec *bvec;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a04d9120cef..354a060e5be 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -191,7 +191,7 @@ static unsigned long __init xpram_highest_page_index(void)
191/* 191/*
192 * Block device make request function. 192 * Block device make request function.
193 */ 193 */
194static int xpram_make_request(request_queue_t *q, struct bio *bio) 194static int xpram_make_request(struct request_queue *q, struct bio *bio)
195{ 195{
196 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 196 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
197 struct bio_vec *bvec; 197 struct bio_vec *bvec;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 3b52f5c1dbe..dddf8d62c15 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -188,7 +188,7 @@ struct tape_blk_data
188{ 188{
189 struct tape_device * device; 189 struct tape_device * device;
190 /* Block device request queue. */ 190 /* Block device request queue. */
191 request_queue_t * request_queue; 191 struct request_queue * request_queue;
192 spinlock_t request_queue_lock; 192 spinlock_t request_queue_lock;
193 193
194 /* Task to move entries from block request to CCS request queue. */ 194 /* Task to move entries from block request to CCS request queue. */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index dd0ecaed592..eeb92e2ed0c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -147,7 +147,7 @@ static void
147tapeblock_requeue(struct work_struct *work) { 147tapeblock_requeue(struct work_struct *work) {
148 struct tape_blk_data * blkdat; 148 struct tape_blk_data * blkdat;
149 struct tape_device * device; 149 struct tape_device * device;
150 request_queue_t * queue; 150 struct request_queue * queue;
151 int nr_queued; 151 int nr_queued;
152 struct request * req; 152 struct request * req;
153 struct list_head * l; 153 struct list_head * l;
@@ -194,7 +194,7 @@ tapeblock_requeue(struct work_struct *work) {
194 * Tape request queue function. Called from ll_rw_blk.c 194 * Tape request queue function. Called from ll_rw_blk.c
195 */ 195 */
196static void 196static void
197tapeblock_request_fn(request_queue_t *queue) 197tapeblock_request_fn(struct request_queue *queue)
198{ 198{
199 struct tape_device *device; 199 struct tape_device *device;
200 200
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index 35a73168333..400c65bfb8c 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -15,6 +15,7 @@ config SUN_OPENPROMIO
15 15
16config SUN_MOSTEK_RTC 16config SUN_MOSTEK_RTC
17 tristate "Mostek real time clock support" 17 tristate "Mostek real time clock support"
18 depends on SPARC32
18 help 19 help
19 The Mostek RTC chip is used on all known Sun computers except 20 The Mostek RTC chip is used on all known Sun computers except
20 some JavaStations. For a JavaStation you need to say Y both here 21 some JavaStations. For a JavaStation you need to say Y both here
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 5157a2abc58..4b7079fdc10 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -185,7 +185,7 @@ static void jsfd_read(char *buf, unsigned long p, size_t togo) {
185 } 185 }
186} 186}
187 187
188static void jsfd_do_request(request_queue_t *q) 188static void jsfd_do_request(struct request_queue *q)
189{ 189{
190 struct request *req; 190 struct request *req;
191 191
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d76e1a8cb93..c709dc8ad99 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -636,6 +636,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
636static int aac_cfg_ioctl(struct inode *inode, struct file *file, 636static int aac_cfg_ioctl(struct inode *inode, struct file *file,
637 unsigned int cmd, unsigned long arg) 637 unsigned int cmd, unsigned long arg)
638{ 638{
639 if (!capable(CAP_SYS_ADMIN))
640 return -EPERM;
639 return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); 641 return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
640} 642}
641 643
@@ -689,6 +691,8 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
689 691
690static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) 692static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
691{ 693{
694 if (!capable(CAP_SYS_ADMIN))
695 return -EPERM;
692 return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); 696 return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
693} 697}
694#endif 698#endif
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index da63c544919..21c075d44db 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -654,7 +654,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
654static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 654static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
655 int bytes, int requeue) 655 int bytes, int requeue)
656{ 656{
657 request_queue_t *q = cmd->device->request_queue; 657 struct request_queue *q = cmd->device->request_queue;
658 struct request *req = cmd->request; 658 struct request *req = cmd->request;
659 unsigned long flags; 659 unsigned long flags;
660 660
@@ -818,7 +818,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
818{ 818{
819 int result = cmd->result; 819 int result = cmd->result;
820 int this_count = cmd->request_bufflen; 820 int this_count = cmd->request_bufflen;
821 request_queue_t *q = cmd->device->request_queue; 821 struct request_queue *q = cmd->device->request_queue;
822 struct request *req = cmd->request; 822 struct request *req = cmd->request;
823 int clear_errors = 1; 823 int clear_errors = 1;
824 struct scsi_sense_hdr sshdr; 824 struct scsi_sense_hdr sshdr;
@@ -1038,7 +1038,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
1038 return BLKPREP_KILL; 1038 return BLKPREP_KILL;
1039} 1039}
1040 1040
1041static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1041static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk,
1042 sector_t *error_sector) 1042 sector_t *error_sector)
1043{ 1043{
1044 struct scsi_device *sdev = q->queuedata; 1044 struct scsi_device *sdev = q->queuedata;
@@ -1340,7 +1340,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1340/* 1340/*
1341 * Kill a request for a dead device 1341 * Kill a request for a dead device
1342 */ 1342 */
1343static void scsi_kill_request(struct request *req, request_queue_t *q) 1343static void scsi_kill_request(struct request *req, struct request_queue *q)
1344{ 1344{
1345 struct scsi_cmnd *cmd = req->special; 1345 struct scsi_cmnd *cmd = req->special;
1346 struct scsi_device *sdev = cmd->device; 1346 struct scsi_device *sdev = cmd->device;
@@ -2119,7 +2119,7 @@ EXPORT_SYMBOL(scsi_target_resume);
2119int 2119int
2120scsi_internal_device_block(struct scsi_device *sdev) 2120scsi_internal_device_block(struct scsi_device *sdev)
2121{ 2121{
2122 request_queue_t *q = sdev->request_queue; 2122 struct request_queue *q = sdev->request_queue;
2123 unsigned long flags; 2123 unsigned long flags;
2124 int err = 0; 2124 int err = 0;
2125 2125
@@ -2159,7 +2159,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2159int 2159int
2160scsi_internal_device_unblock(struct scsi_device *sdev) 2160scsi_internal_device_unblock(struct scsi_device *sdev)
2161{ 2161{
2162 request_queue_t *q = sdev->request_queue; 2162 struct request_queue *q = sdev->request_queue;
2163 int err; 2163 int err;
2164 unsigned long flags; 2164 unsigned long flags;
2165 2165
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 424d557284a..e21c7142a3e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -814,7 +814,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
814 return ret; 814 return ret;
815} 815}
816 816
817static void sd_prepare_flush(request_queue_t *q, struct request *rq) 817static void sd_prepare_flush(struct request_queue *q, struct request *rq)
818{ 818{
819 memset(rq->cmd, 0, sizeof(rq->cmd)); 819 memset(rq->cmd, 0, sizeof(rq->cmd));
820 rq->cmd_type = REQ_TYPE_BLOCK_PC; 820 rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1285,7 +1285,7 @@ got_data:
1285 */ 1285 */
1286 int hard_sector = sector_size; 1286 int hard_sector = sector_size;
1287 sector_t sz = (sdkp->capacity/2) * (hard_sector/256); 1287 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1288 request_queue_t *queue = sdp->request_queue; 1288 struct request_queue *queue = sdp->request_queue;
1289 sector_t mb = sz; 1289 sector_t mb = sz;
1290 1290
1291 blk_queue_hardsect_size(queue, hard_sector); 1291 blk_queue_hardsect_size(queue, hard_sector);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e7b6a7fde1c..902eb11ffe8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -624,7 +624,7 @@ static void get_sectorsize(struct scsi_cd *cd)
624 unsigned char *buffer; 624 unsigned char *buffer;
625 int the_result, retries = 3; 625 int the_result, retries = 3;
626 int sector_size; 626 int sector_size;
627 request_queue_t *queue; 627 struct request_queue *queue;
628 628
629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
630 if (!buffer) 630 if (!buffer)
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 0b3ec38ae61..2f5a5ac1b27 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2650,8 +2650,9 @@ static int __devinit serial8250_probe(struct platform_device *dev)
2650 ret = serial8250_register_port(&port); 2650 ret = serial8250_register_port(&port);
2651 if (ret < 0) { 2651 if (ret < 0) {
2652 dev_err(&dev->dev, "unable to register port at index %d " 2652 dev_err(&dev->dev, "unable to register port at index %d "
2653 "(IO%lx MEM%lx IRQ%d): %d\n", i, 2653 "(IO%lx MEM%llx IRQ%d): %d\n", i,
2654 p->iobase, p->mapbase, p->irq, ret); 2654 p->iobase, (unsigned long long)p->mapbase,
2655 p->irq, ret);
2655 } 2656 }
2656 } 2657 }
2657 return 0; 2658 return 0;
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index 947c20507e1..150cad5c2eb 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -151,8 +151,9 @@ static int __init parse_options(struct early_serial8250_device *device, char *op
151#else 151#else
152 port->membase = ioremap(port->mapbase, 64); 152 port->membase = ioremap(port->mapbase, 64);
153 if (!port->membase) { 153 if (!port->membase) {
154 printk(KERN_ERR "%s: Couldn't ioremap 0x%lx\n", 154 printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
155 __FUNCTION__, port->mapbase); 155 __FUNCTION__,
156 (unsigned long long)port->mapbase);
156 return -ENOMEM; 157 return -ENOMEM;
157 } 158 }
158#endif 159#endif
@@ -175,9 +176,10 @@ static int __init parse_options(struct early_serial8250_device *device, char *op
175 device->baud); 176 device->baud);
176 } 177 }
177 178
178 printk(KERN_INFO "Early serial console at %s 0x%lx (options '%s')\n", 179 printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n",
179 mmio ? "MMIO" : "I/O port", 180 mmio ? "MMIO" : "I/O port",
180 mmio ? port->mapbase : (unsigned long) port->iobase, 181 mmio ? (unsigned long long) port->mapbase
182 : (unsigned long long) port->iobase,
181 device->options); 183 device->options);
182 return 0; 184 return 0;
183} 185}
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 9c57486c2e7..030a6063541 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -626,7 +626,7 @@ static int uart_get_info(struct uart_state *state,
626 tmp.hub6 = port->hub6; 626 tmp.hub6 = port->hub6;
627 tmp.io_type = port->iotype; 627 tmp.io_type = port->iotype;
628 tmp.iomem_reg_shift = port->regshift; 628 tmp.iomem_reg_shift = port->regshift;
629 tmp.iomem_base = (void *)port->mapbase; 629 tmp.iomem_base = (void *)(unsigned long)port->mapbase;
630 630
631 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) 631 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
632 return -EFAULT; 632 return -EFAULT;
@@ -1666,10 +1666,11 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i)
1666 return 0; 1666 return 0;
1667 1667
1668 mmio = port->iotype >= UPIO_MEM; 1668 mmio = port->iotype >= UPIO_MEM;
1669 ret = sprintf(buf, "%d: uart:%s %s%08lX irq:%d", 1669 ret = sprintf(buf, "%d: uart:%s %s%08llX irq:%d",
1670 port->line, uart_type(port), 1670 port->line, uart_type(port),
1671 mmio ? "mmio:0x" : "port:", 1671 mmio ? "mmio:0x" : "port:",
1672 mmio ? port->mapbase : (unsigned long) port->iobase, 1672 mmio ? (unsigned long long)port->mapbase
1673 : (unsigned long long) port->iobase,
1673 port->irq); 1674 port->irq);
1674 1675
1675 if (port->type == PORT_UNKNOWN) { 1676 if (port->type == PORT_UNKNOWN) {
@@ -2069,7 +2070,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
2069 case UPIO_TSI: 2070 case UPIO_TSI:
2070 case UPIO_DWAPB: 2071 case UPIO_DWAPB:
2071 snprintf(address, sizeof(address), 2072 snprintf(address, sizeof(address),
2072 "MMIO 0x%lx", port->mapbase); 2073 "MMIO 0x%llx", (unsigned long long)port->mapbase);
2073 break; 2074 break;
2074 default: 2075 default:
2075 strlcpy(address, "*unknown*", sizeof(address)); 2076 strlcpy(address, "*unknown*", sizeof(address));
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 564cc9b5182..a7231d171bd 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1571,7 +1571,14 @@ config FB_PM3
1571 1571
1572config FB_AU1100 1572config FB_AU1100
1573 bool "Au1100 LCD Driver" 1573 bool "Au1100 LCD Driver"
1574 depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y 1574 depends on (FB = y) && MIPS && SOC_AU1100
1575 select FB_CFB_FILLRECT
1576 select FB_CFB_COPYAREA
1577 select FB_CFB_IMAGEBLIT
1578 help
1579 This is the framebuffer driver for the AMD Au1100 SOC. It can drive
1580 various panels and CRTs by passing in kernel cmd line option
1581 au1100fb:panel=<name>.
1575 1582
1576config FB_AU1200 1583config FB_AU1200
1577 bool "Au1200 LCD Driver" 1584 bool "Au1200 LCD Driver"
diff --git a/fs/bio.c b/fs/bio.c
index 0d2c2d38b7b..29a44c1b64c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -230,7 +230,7 @@ void bio_put(struct bio *bio)
230 } 230 }
231} 231}
232 232
233inline int bio_phys_segments(request_queue_t *q, struct bio *bio) 233inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
234{ 234{
235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
236 blk_recount_segments(q, bio); 236 blk_recount_segments(q, bio);
@@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
238 return bio->bi_phys_segments; 238 return bio->bi_phys_segments;
239} 239}
240 240
241inline int bio_hw_segments(request_queue_t *q, struct bio *bio) 241inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
242{ 242{
243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
244 blk_recount_segments(q, bio); 244 blk_recount_segments(q, bio);
@@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
257 */ 257 */
258void __bio_clone(struct bio *bio, struct bio *bio_src) 258void __bio_clone(struct bio *bio, struct bio *bio_src)
259{ 259{
260 request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); 260 struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
261 261
262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
263 bio_src->bi_max_vecs * sizeof(struct bio_vec)); 263 bio_src->bi_max_vecs * sizeof(struct bio_vec));
@@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
303 */ 303 */
304int bio_get_nr_vecs(struct block_device *bdev) 304int bio_get_nr_vecs(struct block_device *bdev)
305{ 305{
306 request_queue_t *q = bdev_get_queue(bdev); 306 struct request_queue *q = bdev_get_queue(bdev);
307 int nr_pages; 307 int nr_pages;
308 308
309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
315 return nr_pages; 315 return nr_pages;
316} 316}
317 317
318static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page 318static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
319 *page, unsigned int len, unsigned int offset, 319 *page, unsigned int len, unsigned int offset,
320 unsigned short max_sectors) 320 unsigned short max_sectors)
321{ 321{
@@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
425 * smaller than PAGE_SIZE, so it is always possible to add a single 425 * smaller than PAGE_SIZE, so it is always possible to add a single
426 * page to an empty bio. This should only be used by REQ_PC bios. 426 * page to an empty bio. This should only be used by REQ_PC bios.
427 */ 427 */
428int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, 428int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
429 unsigned int len, unsigned int offset) 429 unsigned int len, unsigned int offset)
430{ 430{
431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
@@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio)
523 * to/from kernel pages as necessary. Must be paired with 523 * to/from kernel pages as necessary. Must be paired with
524 * call bio_uncopy_user() on io completion. 524 * call bio_uncopy_user() on io completion.
525 */ 525 */
526struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, 526struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
527 unsigned int len, int write_to_vm) 527 unsigned int len, int write_to_vm)
528{ 528{
529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -600,7 +600,7 @@ out_bmd:
600 return ERR_PTR(ret); 600 return ERR_PTR(ret);
601} 601}
602 602
603static struct bio *__bio_map_user_iov(request_queue_t *q, 603static struct bio *__bio_map_user_iov(struct request_queue *q,
604 struct block_device *bdev, 604 struct block_device *bdev,
605 struct sg_iovec *iov, int iov_count, 605 struct sg_iovec *iov, int iov_count,
606 int write_to_vm) 606 int write_to_vm)
@@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
712 712
713/** 713/**
714 * bio_map_user - map user address into bio 714 * bio_map_user - map user address into bio
715 * @q: the request_queue_t for the bio 715 * @q: the struct request_queue for the bio
716 * @bdev: destination block device 716 * @bdev: destination block device
717 * @uaddr: start of user address 717 * @uaddr: start of user address
718 * @len: length in bytes 718 * @len: length in bytes
@@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
721 * Map the user space address into a bio suitable for io to a block 721 * Map the user space address into a bio suitable for io to a block
722 * device. Returns an error pointer in case of error. 722 * device. Returns an error pointer in case of error.
723 */ 723 */
724struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 724struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
725 unsigned long uaddr, unsigned int len, int write_to_vm) 725 unsigned long uaddr, unsigned int len, int write_to_vm)
726{ 726{
727 struct sg_iovec iov; 727 struct sg_iovec iov;
@@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
734 734
735/** 735/**
736 * bio_map_user_iov - map user sg_iovec table into bio 736 * bio_map_user_iov - map user sg_iovec table into bio
737 * @q: the request_queue_t for the bio 737 * @q: the struct request_queue for the bio
738 * @bdev: destination block device 738 * @bdev: destination block device
739 * @iov: the iovec. 739 * @iov: the iovec.
740 * @iov_count: number of elements in the iovec 740 * @iov_count: number of elements in the iovec
@@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
743 * Map the user space address into a bio suitable for io to a block 743 * Map the user space address into a bio suitable for io to a block
744 * device. Returns an error pointer in case of error. 744 * device. Returns an error pointer in case of error.
745 */ 745 */
746struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, 746struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
747 struct sg_iovec *iov, int iov_count, 747 struct sg_iovec *iov, int iov_count,
748 int write_to_vm) 748 int write_to_vm)
749{ 749{
@@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
808} 808}
809 809
810 810
811static struct bio *__bio_map_kern(request_queue_t *q, void *data, 811static struct bio *__bio_map_kern(struct request_queue *q, void *data,
812 unsigned int len, gfp_t gfp_mask) 812 unsigned int len, gfp_t gfp_mask)
813{ 813{
814 unsigned long kaddr = (unsigned long)data; 814 unsigned long kaddr = (unsigned long)data;
@@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
847 847
848/** 848/**
849 * bio_map_kern - map kernel address into bio 849 * bio_map_kern - map kernel address into bio
850 * @q: the request_queue_t for the bio 850 * @q: the struct request_queue for the bio
851 * @data: pointer to buffer to map 851 * @data: pointer to buffer to map
852 * @len: length in bytes 852 * @len: length in bytes
853 * @gfp_mask: allocation flags for bio allocation 853 * @gfp_mask: allocation flags for bio allocation
@@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
855 * Map the kernel address into a bio suitable for io to a block 855 * Map the kernel address into a bio suitable for io to a block
856 * device. Returns an error pointer in case of error. 856 * device. Returns an error pointer in case of error.
857 */ 857 */
858struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, 858struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
859 gfp_t gfp_mask) 859 gfp_t gfp_mask)
860{ 860{
861 struct bio *bio; 861 struct bio *bio;
diff --git a/fs/open.c b/fs/open.c
index a6b054edacb..e27c205364d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -403,7 +403,7 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
403 if (inode->i_op && inode->i_op->fallocate) 403 if (inode->i_op && inode->i_op->fallocate)
404 ret = inode->i_op->fallocate(inode, mode, offset, len); 404 ret = inode->i_op->fallocate(inode, mode, offset, len);
405 else 405 else
406 ret = -ENOSYS; 406 ret = -EOPNOTSUPP;
407 407
408out_fput: 408out_fput:
409 fput(file); 409 fput(file);
diff --git a/include/asm-arm/arch-omap/mailbox.h b/include/asm-arm/arch-omap/mailbox.h
index 4bf0909461f..7cbed9332e1 100644
--- a/include/asm-arm/arch-omap/mailbox.h
+++ b/include/asm-arm/arch-omap/mailbox.h
@@ -37,7 +37,7 @@ struct omap_mbox_ops {
37 37
38struct omap_mbox_queue { 38struct omap_mbox_queue {
39 spinlock_t lock; 39 spinlock_t lock;
40 request_queue_t *queue; 40 struct request_queue *queue;
41 struct work_struct work; 41 struct work_struct work;
42 int (*callback)(void *); 42 int (*callback)(void *);
43 struct omap_mbox *mbox; 43 struct omap_mbox *mbox;
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 31ffc3f119c..0faa614d969 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -13,6 +13,7 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15#include <asm/byteorder.h> 15#include <asm/byteorder.h>
16#include <asm/page.h>
16 17
17#include <linux/types.h> 18#include <linux/types.h>
18 19
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 695e34964cb..a1c96d9ee72 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,7 +37,7 @@
37struct scsi_ioctl_command; 37struct scsi_ioctl_command;
38 38
39struct request_queue; 39struct request_queue;
40typedef struct request_queue request_queue_t; 40typedef struct request_queue request_queue_t __deprecated;
41struct elevator_queue; 41struct elevator_queue;
42typedef struct elevator_queue elevator_t; 42typedef struct elevator_queue elevator_t;
43struct request_pm_state; 43struct request_pm_state;
@@ -233,7 +233,7 @@ struct request {
233 struct list_head queuelist; 233 struct list_head queuelist;
234 struct list_head donelist; 234 struct list_head donelist;
235 235
236 request_queue_t *q; 236 struct request_queue *q;
237 237
238 unsigned int cmd_flags; 238 unsigned int cmd_flags;
239 enum rq_cmd_type_bits cmd_type; 239 enum rq_cmd_type_bits cmd_type;
@@ -337,15 +337,15 @@ struct request_pm_state
337 337
338#include <linux/elevator.h> 338#include <linux/elevator.h>
339 339
340typedef void (request_fn_proc) (request_queue_t *q); 340typedef void (request_fn_proc) (struct request_queue *q);
341typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); 341typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
342typedef int (prep_rq_fn) (request_queue_t *, struct request *); 342typedef int (prep_rq_fn) (struct request_queue *, struct request *);
343typedef void (unplug_fn) (request_queue_t *); 343typedef void (unplug_fn) (struct request_queue *);
344 344
345struct bio_vec; 345struct bio_vec;
346typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 346typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
347typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 347typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
348typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 348typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
349typedef void (softirq_done_fn)(struct request *); 349typedef void (softirq_done_fn)(struct request *);
350 350
351enum blk_queue_state { 351enum blk_queue_state {
@@ -626,13 +626,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
626 626
627#ifdef CONFIG_BOUNCE 627#ifdef CONFIG_BOUNCE
628extern int init_emergency_isa_pool(void); 628extern int init_emergency_isa_pool(void);
629extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); 629extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
630#else 630#else
631static inline int init_emergency_isa_pool(void) 631static inline int init_emergency_isa_pool(void)
632{ 632{
633 return 0; 633 return 0;
634} 634}
635static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) 635static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
636{ 636{
637} 637}
638#endif /* CONFIG_MMU */ 638#endif /* CONFIG_MMU */
@@ -646,14 +646,14 @@ extern void blk_unregister_queue(struct gendisk *disk);
646extern void register_disk(struct gendisk *dev); 646extern void register_disk(struct gendisk *dev);
647extern void generic_make_request(struct bio *bio); 647extern void generic_make_request(struct bio *bio);
648extern void blk_put_request(struct request *); 648extern void blk_put_request(struct request *);
649extern void __blk_put_request(request_queue_t *, struct request *); 649extern void __blk_put_request(struct request_queue *, struct request *);
650extern void blk_end_sync_rq(struct request *rq, int error); 650extern void blk_end_sync_rq(struct request *rq, int error);
651extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 651extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
652extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 652extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
653extern void blk_requeue_request(request_queue_t *, struct request *); 653extern void blk_requeue_request(struct request_queue *, struct request *);
654extern void blk_plug_device(request_queue_t *); 654extern void blk_plug_device(struct request_queue *);
655extern int blk_remove_plug(request_queue_t *); 655extern int blk_remove_plug(struct request_queue *);
656extern void blk_recount_segments(request_queue_t *, struct bio *); 656extern void blk_recount_segments(struct request_queue *, struct bio *);
657extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 657extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
658 struct gendisk *, unsigned int, void __user *); 658 struct gendisk *, unsigned int, void __user *);
659extern int sg_scsi_ioctl(struct file *, struct request_queue *, 659extern int sg_scsi_ioctl(struct file *, struct request_queue *,
@@ -662,14 +662,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
662/* 662/*
663 * Temporary export, until SCSI gets fixed up. 663 * Temporary export, until SCSI gets fixed up.
664 */ 664 */
665extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); 665extern int ll_back_merge_fn(struct request_queue *, struct request *,
666 struct bio *);
666 667
667/* 668/*
668 * A queue has just exitted congestion. Note this in the global counter of 669 * A queue has just exitted congestion. Note this in the global counter of
669 * congested queues, and wake up anyone who was waiting for requests to be 670 * congested queues, and wake up anyone who was waiting for requests to be
670 * put back. 671 * put back.
671 */ 672 */
672static inline void blk_clear_queue_congested(request_queue_t *q, int rw) 673static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
673{ 674{
674 clear_bdi_congested(&q->backing_dev_info, rw); 675 clear_bdi_congested(&q->backing_dev_info, rw);
675} 676}
@@ -678,29 +679,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
678 * A queue has just entered congestion. Flag that in the queue's VM-visible 679 * A queue has just entered congestion. Flag that in the queue's VM-visible
679 * state flags and increment the global gounter of congested queues. 680 * state flags and increment the global gounter of congested queues.
680 */ 681 */
681static inline void blk_set_queue_congested(request_queue_t *q, int rw) 682static inline void blk_set_queue_congested(struct request_queue *q, int rw)
682{ 683{
683 set_bdi_congested(&q->backing_dev_info, rw); 684 set_bdi_congested(&q->backing_dev_info, rw);
684} 685}
685 686
686extern void blk_start_queue(request_queue_t *q); 687extern void blk_start_queue(struct request_queue *q);
687extern void blk_stop_queue(request_queue_t *q); 688extern void blk_stop_queue(struct request_queue *q);
688extern void blk_sync_queue(struct request_queue *q); 689extern void blk_sync_queue(struct request_queue *q);
689extern void __blk_stop_queue(request_queue_t *q); 690extern void __blk_stop_queue(struct request_queue *q);
690extern void blk_run_queue(request_queue_t *); 691extern void blk_run_queue(struct request_queue *);
691extern void blk_start_queueing(request_queue_t *); 692extern void blk_start_queueing(struct request_queue *);
692extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 693extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
693extern int blk_rq_unmap_user(struct bio *); 694extern int blk_rq_unmap_user(struct bio *);
694extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 695extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
695extern int blk_rq_map_user_iov(request_queue_t *, struct request *, 696extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
696 struct sg_iovec *, int, unsigned int); 697 struct sg_iovec *, int, unsigned int);
697extern int blk_execute_rq(request_queue_t *, struct gendisk *, 698extern int blk_execute_rq(struct request_queue *, struct gendisk *,
698 struct request *, int); 699 struct request *, int);
699extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 700extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
700 struct request *, int, rq_end_io_fn *); 701 struct request *, int, rq_end_io_fn *);
701extern int blk_verify_command(unsigned char *, int); 702extern int blk_verify_command(unsigned char *, int);
702 703
703static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 704static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
704{ 705{
705 return bdev->bd_disk->queue; 706 return bdev->bd_disk->queue;
706} 707}
@@ -749,41 +750,41 @@ static inline void blkdev_dequeue_request(struct request *req)
749/* 750/*
750 * Access functions for manipulating queue properties 751 * Access functions for manipulating queue properties
751 */ 752 */
752extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, 753extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
753 spinlock_t *lock, int node_id); 754 spinlock_t *lock, int node_id);
754extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); 755extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
755extern void blk_cleanup_queue(request_queue_t *); 756extern void blk_cleanup_queue(struct request_queue *);
756extern void blk_queue_make_request(request_queue_t *, make_request_fn *); 757extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
757extern void blk_queue_bounce_limit(request_queue_t *, u64); 758extern void blk_queue_bounce_limit(struct request_queue *, u64);
758extern void blk_queue_max_sectors(request_queue_t *, unsigned int); 759extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
759extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); 760extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
760extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); 761extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
761extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); 762extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
762extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); 763extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
763extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); 764extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
764extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); 765extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
765extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 766extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
766extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 767extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
767extern void blk_queue_dma_alignment(request_queue_t *, int); 768extern void blk_queue_dma_alignment(struct request_queue *, int);
768extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); 769extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
769extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 770extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
770extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 771extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
771extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 772extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
772extern int blk_do_ordered(request_queue_t *, struct request **); 773extern int blk_do_ordered(struct request_queue *, struct request **);
773extern unsigned blk_ordered_cur_seq(request_queue_t *); 774extern unsigned blk_ordered_cur_seq(struct request_queue *);
774extern unsigned blk_ordered_req_seq(struct request *); 775extern unsigned blk_ordered_req_seq(struct request *);
775extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); 776extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
776 777
777extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 778extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
778extern void blk_dump_rq_flags(struct request *, char *); 779extern void blk_dump_rq_flags(struct request *, char *);
779extern void generic_unplug_device(request_queue_t *); 780extern void generic_unplug_device(struct request_queue *);
780extern void __generic_unplug_device(request_queue_t *); 781extern void __generic_unplug_device(struct request_queue *);
781extern long nr_blockdev_pages(void); 782extern long nr_blockdev_pages(void);
782 783
783int blk_get_queue(request_queue_t *); 784int blk_get_queue(struct request_queue *);
784request_queue_t *blk_alloc_queue(gfp_t); 785struct request_queue *blk_alloc_queue(gfp_t);
785request_queue_t *blk_alloc_queue_node(gfp_t, int); 786struct request_queue *blk_alloc_queue_node(gfp_t, int);
786extern void blk_put_queue(request_queue_t *); 787extern void blk_put_queue(struct request_queue *);
787 788
788/* 789/*
789 * tag stuff 790 * tag stuff
@@ -791,13 +792,13 @@ extern void blk_put_queue(request_queue_t *);
791#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 792#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
792#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 793#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
793#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 794#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
794extern int blk_queue_start_tag(request_queue_t *, struct request *); 795extern int blk_queue_start_tag(struct request_queue *, struct request *);
795extern struct request *blk_queue_find_tag(request_queue_t *, int); 796extern struct request *blk_queue_find_tag(struct request_queue *, int);
796extern void blk_queue_end_tag(request_queue_t *, struct request *); 797extern void blk_queue_end_tag(struct request_queue *, struct request *);
797extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); 798extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
798extern void blk_queue_free_tags(request_queue_t *); 799extern void blk_queue_free_tags(struct request_queue *);
799extern int blk_queue_resize_tags(request_queue_t *, int); 800extern int blk_queue_resize_tags(struct request_queue *, int);
800extern void blk_queue_invalidate_tags(request_queue_t *); 801extern void blk_queue_invalidate_tags(struct request_queue *);
801extern struct blk_queue_tag *blk_init_tags(int); 802extern struct blk_queue_tag *blk_init_tags(int);
802extern void blk_free_tags(struct blk_queue_tag *); 803extern void blk_free_tags(struct blk_queue_tag *);
803 804
@@ -809,7 +810,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
809 return bqt->tag_index[tag]; 810 return bqt->tag_index[tag];
810} 811}
811 812
812extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); 813extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
813extern int blkdev_issue_flush(struct block_device *, sector_t *); 814extern int blkdev_issue_flush(struct block_device *, sector_t *);
814 815
815#define MAX_PHYS_SEGMENTS 128 816#define MAX_PHYS_SEGMENTS 128
@@ -821,7 +822,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
821 822
822#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 823#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
823 824
824static inline int queue_hardsect_size(request_queue_t *q) 825static inline int queue_hardsect_size(struct request_queue *q)
825{ 826{
826 int retval = 512; 827 int retval = 512;
827 828
@@ -836,7 +837,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
836 return queue_hardsect_size(bdev_get_queue(bdev)); 837 return queue_hardsect_size(bdev_get_queue(bdev));
837} 838}
838 839
839static inline int queue_dma_alignment(request_queue_t *q) 840static inline int queue_dma_alignment(struct request_queue *q)
840{ 841{
841 int retval = 511; 842 int retval = 511;
842 843
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3680ff9a30e..90874a5d7d7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,7 +144,7 @@ struct blk_user_trace_setup {
144 144
145#if defined(CONFIG_BLK_DEV_IO_TRACE) 145#if defined(CONFIG_BLK_DEV_IO_TRACE)
146extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 146extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
147extern void blk_trace_shutdown(request_queue_t *); 147extern void blk_trace_shutdown(struct request_queue *);
148extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 148extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
149 149
150/** 150/**
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e88fcbc77f8..e8f42133a61 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -5,29 +5,29 @@
5 5
6#ifdef CONFIG_BLOCK 6#ifdef CONFIG_BLOCK
7 7
8typedef int (elevator_merge_fn) (request_queue_t *, struct request **, 8typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
9 struct bio *); 9 struct bio *);
10 10
11typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); 11typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
12 12
13typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); 13typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
14 14
15typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *); 15typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
16 16
17typedef int (elevator_dispatch_fn) (request_queue_t *, int); 17typedef int (elevator_dispatch_fn) (struct request_queue *, int);
18 18
19typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); 19typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
20typedef int (elevator_queue_empty_fn) (request_queue_t *); 20typedef int (elevator_queue_empty_fn) (struct request_queue *);
21typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); 21typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
22typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); 22typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
23typedef int (elevator_may_queue_fn) (request_queue_t *, int); 23typedef int (elevator_may_queue_fn) (struct request_queue *, int);
24 24
25typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t); 25typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
26typedef void (elevator_put_req_fn) (struct request *); 26typedef void (elevator_put_req_fn) (struct request *);
27typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 27typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
28typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
29 29
30typedef void *(elevator_init_fn) (request_queue_t *); 30typedef void *(elevator_init_fn) (struct request_queue *);
31typedef void (elevator_exit_fn) (elevator_t *); 31typedef void (elevator_exit_fn) (elevator_t *);
32 32
33struct elevator_ops 33struct elevator_ops
@@ -94,27 +94,27 @@ struct elevator_queue
94/* 94/*
95 * block elevator interface 95 * block elevator interface
96 */ 96 */
97extern void elv_dispatch_sort(request_queue_t *, struct request *); 97extern void elv_dispatch_sort(struct request_queue *, struct request *);
98extern void elv_dispatch_add_tail(request_queue_t *, struct request *); 98extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
99extern void elv_add_request(request_queue_t *, struct request *, int, int); 99extern void elv_add_request(struct request_queue *, struct request *, int, int);
100extern void __elv_add_request(request_queue_t *, struct request *, int, int); 100extern void __elv_add_request(struct request_queue *, struct request *, int, int);
101extern void elv_insert(request_queue_t *, struct request *, int); 101extern void elv_insert(struct request_queue *, struct request *, int);
102extern int elv_merge(request_queue_t *, struct request **, struct bio *); 102extern int elv_merge(struct request_queue *, struct request **, struct bio *);
103extern void elv_merge_requests(request_queue_t *, struct request *, 103extern void elv_merge_requests(struct request_queue *, struct request *,
104 struct request *); 104 struct request *);
105extern void elv_merged_request(request_queue_t *, struct request *, int); 105extern void elv_merged_request(struct request_queue *, struct request *, int);
106extern void elv_dequeue_request(request_queue_t *, struct request *); 106extern void elv_dequeue_request(struct request_queue *, struct request *);
107extern void elv_requeue_request(request_queue_t *, struct request *); 107extern void elv_requeue_request(struct request_queue *, struct request *);
108extern int elv_queue_empty(request_queue_t *); 108extern int elv_queue_empty(struct request_queue *);
109extern struct request *elv_next_request(struct request_queue *q); 109extern struct request *elv_next_request(struct request_queue *q);
110extern struct request *elv_former_request(request_queue_t *, struct request *); 110extern struct request *elv_former_request(struct request_queue *, struct request *);
111extern struct request *elv_latter_request(request_queue_t *, struct request *); 111extern struct request *elv_latter_request(struct request_queue *, struct request *);
112extern int elv_register_queue(request_queue_t *q); 112extern int elv_register_queue(struct request_queue *q);
113extern void elv_unregister_queue(request_queue_t *q); 113extern void elv_unregister_queue(struct request_queue *q);
114extern int elv_may_queue(request_queue_t *, int); 114extern int elv_may_queue(struct request_queue *, int);
115extern void elv_completed_request(request_queue_t *, struct request *); 115extern void elv_completed_request(struct request_queue *, struct request *);
116extern int elv_set_request(request_queue_t *, struct request *, gfp_t); 116extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
117extern void elv_put_request(request_queue_t *, struct request *); 117extern void elv_put_request(struct request_queue *, struct request *);
118 118
119/* 119/*
120 * io scheduler registration 120 * io scheduler registration
@@ -125,18 +125,18 @@ extern void elv_unregister(struct elevator_type *);
125/* 125/*
126 * io scheduler sysfs switching 126 * io scheduler sysfs switching
127 */ 127 */
128extern ssize_t elv_iosched_show(request_queue_t *, char *); 128extern ssize_t elv_iosched_show(struct request_queue *, char *);
129extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t); 129extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
130 130
131extern int elevator_init(request_queue_t *, char *); 131extern int elevator_init(struct request_queue *, char *);
132extern void elevator_exit(elevator_t *); 132extern void elevator_exit(elevator_t *);
133extern int elv_rq_merge_ok(struct request *, struct bio *); 133extern int elv_rq_merge_ok(struct request *, struct bio *);
134 134
135/* 135/*
136 * Helper functions. 136 * Helper functions.
137 */ 137 */
138extern struct request *elv_rb_former_request(request_queue_t *, struct request *); 138extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
139extern struct request *elv_rb_latter_request(request_queue_t *, struct request *); 139extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
140 140
141/* 141/*
142 * rb support functions. 142 * rb support functions.
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 5f5daad8bc5..d71d0121b7f 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -555,7 +555,7 @@ typedef struct ide_drive_s {
555 char name[4]; /* drive name, such as "hda" */ 555 char name[4]; /* drive name, such as "hda" */
556 char driver_req[10]; /* requests specific driver */ 556 char driver_req[10]; /* requests specific driver */
557 557
558 request_queue_t *queue; /* request queue */ 558 struct request_queue *queue; /* request queue */
559 559
560 struct request *rq; /* current request */ 560 struct request *rq; /* current request */
561 struct ide_drive_s *next; /* circular list of hwgroup drives */ 561 struct ide_drive_s *next; /* circular list of hwgroup drives */
@@ -1206,7 +1206,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1206extern int ide_spin_wait_hwgroup(ide_drive_t *); 1206extern int ide_spin_wait_hwgroup(ide_drive_t *);
1207extern void ide_timer_expiry(unsigned long); 1207extern void ide_timer_expiry(unsigned long);
1208extern irqreturn_t ide_intr(int irq, void *dev_id); 1208extern irqreturn_t ide_intr(int irq, void *dev_id);
1209extern void do_ide_request(request_queue_t *); 1209extern void do_ide_request(struct request_queue *);
1210 1210
1211void ide_init_disk(struct gendisk *, ide_drive_t *); 1211void ide_init_disk(struct gendisk *, ide_drive_t *);
1212 1212
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 0b99b31f017..26a0a103898 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -63,7 +63,7 @@ struct loop_device {
63 struct task_struct *lo_thread; 63 struct task_struct *lo_thread;
64 wait_queue_head_t lo_event; 64 wait_queue_head_t lo_event;
65 65
66 request_queue_t *lo_queue; 66 struct request_queue *lo_queue;
67 struct gendisk *lo_disk; 67 struct gendisk *lo_disk;
68 struct list_head lo_list; 68 struct list_head lo_list;
69}; 69};
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cbabb9c675c..42d3278c6b5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1225,6 +1225,10 @@
1225#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D 1225#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D
1226#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E 1226#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E
1227#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F 1227#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F
1228#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
1229#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
1230#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
1231#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
1228#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 1232#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
1229#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C 1233#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
1230#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 1234#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 28ac632b42d..dcb729244f4 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -227,7 +227,7 @@ struct mddev_s
227 unsigned int safemode_delay; 227 unsigned int safemode_delay;
228 struct timer_list safemode_timer; 228 struct timer_list safemode_timer;
229 atomic_t writes_pending; 229 atomic_t writes_pending;
230 request_queue_t *queue; /* for plugging ... */ 230 struct request_queue *queue; /* for plugging ... */
231 231
232 atomic_t write_behind; /* outstanding async IO */ 232 atomic_t write_behind; /* outstanding async IO */
233 unsigned int max_write_behind; /* 0 = sync */ 233 unsigned int max_write_behind; /* 0 = sync */
@@ -265,7 +265,7 @@ struct mdk_personality
265 int level; 265 int level;
266 struct list_head list; 266 struct list_head list;
267 struct module *owner; 267 struct module *owner;
268 int (*make_request)(request_queue_t *q, struct bio *bio); 268 int (*make_request)(struct request_queue *q, struct bio *bio);
269 int (*run)(mddev_t *mddev); 269 int (*run)(mddev_t *mddev);
270 int (*stop)(mddev_t *mddev); 270 int (*stop)(mddev_t *mddev);
271 void (*status)(struct seq_file *seq, mddev_t *mddev); 271 void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8518fa2a6f8..afe0f6d9b9b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -20,7 +20,7 @@
20struct plat_serial8250_port { 20struct plat_serial8250_port {
21 unsigned long iobase; /* io base address */ 21 unsigned long iobase; /* io base address */
22 void __iomem *membase; /* ioremap cookie or NULL */ 22 void __iomem *membase; /* ioremap cookie or NULL */
23 unsigned long mapbase; /* resource base */ 23 resource_size_t mapbase; /* resource base */
24 unsigned int irq; /* interrupt number */ 24 unsigned int irq; /* interrupt number */
25 unsigned int uartclk; /* UART clock rate */ 25 unsigned int uartclk; /* UART clock rate */
26 unsigned char regshift; /* register shift */ 26 unsigned char regshift; /* register shift */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 773d8d8828a..09d17b06bf0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -288,7 +288,7 @@ struct uart_port {
288 const struct uart_ops *ops; 288 const struct uart_ops *ops;
289 unsigned int custom_divisor; 289 unsigned int custom_divisor;
290 unsigned int line; /* port index */ 290 unsigned int line; /* port index */
291 unsigned long mapbase; /* for ioremap */ 291 resource_size_t mapbase; /* for ioremap */
292 struct device *dev; /* parent device */ 292 struct device *dev; /* parent device */
293 unsigned char hub6; /* this should be in the 8250 driver */ 293 unsigned char hub6; /* this should be in the 8250 driver */
294 unsigned char unused[3]; 294 unsigned char unused[3];
diff --git a/include/scsi/sd.h b/include/scsi/sd.h
index 5261488e110..78583fee0ab 100644
--- a/include/scsi/sd.h
+++ b/include/scsi/sd.h
@@ -57,7 +57,7 @@ static int sd_resume(struct device *dev);
57static void sd_rescan(struct device *); 57static void sd_rescan(struct device *);
58static int sd_init_command(struct scsi_cmnd *); 58static int sd_init_command(struct scsi_cmnd *);
59static int sd_issue_flush(struct device *, sector_t *); 59static int sd_issue_flush(struct device *, sector_t *);
60static void sd_prepare_flush(request_queue_t *, struct request *); 60static void sd_prepare_flush(struct request_queue *, struct request *);
61static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 61static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
62static void scsi_disk_release(struct class_device *cdev); 62static void scsi_disk_release(struct class_device *cdev);
63static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 63static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index b18fc2ff9ff..23985a278bb 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -139,12 +139,14 @@ static void debugfs_ul_set(void *data, u64 val)
139 *(unsigned long *)data = val; 139 *(unsigned long *)data = val;
140} 140}
141 141
142#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
142static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val) 143static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
143{ 144{
144 *(unsigned long *)data = 145 *(unsigned long *)data =
145 val < MAX_STACK_TRACE_DEPTH ? 146 val < MAX_STACK_TRACE_DEPTH ?
146 val : MAX_STACK_TRACE_DEPTH; 147 val : MAX_STACK_TRACE_DEPTH;
147} 148}
149#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
148 150
149static u64 debugfs_ul_get(void *data) 151static u64 debugfs_ul_get(void *data)
150{ 152{
@@ -159,6 +161,7 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
159 return debugfs_create_file(name, mode, parent, value, &fops_ul); 161 return debugfs_create_file(name, mode, parent, value, &fops_ul);
160} 162}
161 163
164#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
162DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get, 165DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
163 debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n"); 166 debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
164 167
@@ -169,6 +172,7 @@ static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
169 return debugfs_create_file(name, mode, parent, value, 172 return debugfs_create_file(name, mode, parent, value,
170 &fops_ul_MAX_STACK_TRACE_DEPTH); 173 &fops_ul_MAX_STACK_TRACE_DEPTH);
171} 174}
175#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
172 176
173static void debugfs_atomic_t_set(void *data, u64 val) 177static void debugfs_atomic_t_set(void *data, u64 val)
174{ 178{
diff --git a/mm/bounce.c b/mm/bounce.c
index ad401fc5744..179fe38a241 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
190 return 0; 190 return 0;
191} 191}
192 192
193static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 193static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
194 mempool_t *pool) 194 mempool_t *pool)
195{ 195{
196 struct page *page; 196 struct page *page;
@@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
275 *bio_orig = bio; 275 *bio_orig = bio;
276} 276}
277 277
278void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 278void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
279{ 279{
280 mempool_t *pool; 280 mempool_t *pool;
281 281
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f127940ec24..d7ca59d66c5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -84,6 +84,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
84 list_del(&page->lru); 84 list_del(&page->lru);
85 free_huge_pages--; 85 free_huge_pages--;
86 free_huge_pages_node[nid]--; 86 free_huge_pages_node[nid]--;
87 break;
87 } 88 }
88 } 89 }
89 return page; 90 return page;
diff --git a/mm/slab.c b/mm/slab.c
index bde271c001b..a684778b2b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2776,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
2776 * 'nodeid'. 2776 * 'nodeid'.
2777 */ 2777 */
2778 if (!objp) 2778 if (!objp)
2779 objp = kmem_getpages(cachep, flags, nodeid); 2779 objp = kmem_getpages(cachep, local_flags, nodeid);
2780 if (!objp) 2780 if (!objp)
2781 goto failed; 2781 goto failed;
2782 2782
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index cda936b77d2..1583c5ef963 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -810,6 +810,7 @@ static int vlan_ioctl_handler(void __user *arg)
810 err = -EINVAL; 810 err = -EINVAL;
811 break; 811 break;
812 case GET_VLAN_REALDEV_NAME_CMD: 812 case GET_VLAN_REALDEV_NAME_CMD:
813 err = 0;
813 vlan_dev_get_realdev_name(dev, args.u.device2); 814 vlan_dev_get_realdev_name(dev, args.u.device2);
814 if (copy_to_user(arg, &args, 815 if (copy_to_user(arg, &args,
815 sizeof(struct vlan_ioctl_args))) { 816 sizeof(struct vlan_ioctl_args))) {
@@ -818,6 +819,7 @@ static int vlan_ioctl_handler(void __user *arg)
818 break; 819 break;
819 820
820 case GET_VLAN_VID_CMD: 821 case GET_VLAN_VID_CMD:
822 err = 0;
821 vlan_dev_get_vid(dev, &vid); 823 vlan_dev_get_vid(dev, &vid);
822 args.u.VID = vid; 824 args.u.VID = vid;
823 if (copy_to_user(arg, &args, 825 if (copy_to_user(arg, &args,
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 031bfa4a51f..457815fb558 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -9,7 +9,6 @@
9 * 9 *
10 */ 10 */
11 11
12#include <linux/in.h>
13#include <linux/netfilter_bridge/ebtables.h> 12#include <linux/netfilter_bridge/ebtables.h>
14#include <linux/netfilter_bridge/ebt_log.h> 13#include <linux/netfilter_bridge/ebt_log.h>
15#include <linux/netfilter.h> 14#include <linux/netfilter.h>
@@ -196,10 +195,8 @@ static int __init ebt_log_init(void)
196 ret = ebt_register_watcher(&log); 195 ret = ebt_register_watcher(&log);
197 if (ret < 0) 196 if (ret < 0)
198 return ret; 197 return ret;
199 ret = nf_log_register(PF_BRIDGE, &ebt_log_logger); 198 nf_log_register(PF_BRIDGE, &ebt_log_logger);
200 if (ret < 0 && ret != -EEXIST) 199 return 0;
201 ebt_unregister_watcher(&log);
202 return ret;
203} 200}
204 201
205static void __exit ebt_log_fini(void) 202static void __exit ebt_log_fini(void)
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 9411db62591..204c968fa86 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -36,7 +36,6 @@
36#include <linux/timer.h> 36#include <linux/timer.h>
37#include <linux/netlink.h> 37#include <linux/netlink.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/module.h>
40#include <linux/netfilter_bridge/ebtables.h> 39#include <linux/netfilter_bridge/ebtables.h>
41#include <linux/netfilter_bridge/ebt_ulog.h> 40#include <linux/netfilter_bridge/ebt_ulog.h>
42#include <net/sock.h> 41#include <net/sock.h>
@@ -308,12 +307,8 @@ static int __init ebt_ulog_init(void)
308 else if ((ret = ebt_register_watcher(&ulog))) 307 else if ((ret = ebt_register_watcher(&ulog)))
309 sock_release(ebtulognl->sk_socket); 308 sock_release(ebtulognl->sk_socket);
310 309
311 if (nf_log_register(PF_BRIDGE, &ebt_ulog_logger) < 0) { 310 if (ret == 0)
312 printk(KERN_WARNING "ebt_ulog: not logging via ulog " 311 nf_log_register(PF_BRIDGE, &ebt_ulog_logger);
313 "since somebody else already registered for PF_BRIDGE\n");
314 /* we cannot make module load fail here, since otherwise
315 * ebtables userspace would abort */
316 }
317 312
318 return ret; 313 return ret;
319} 314}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 5937ad150b9..127a5e89bf1 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -479,10 +479,8 @@ static int __init ipt_log_init(void)
479 ret = xt_register_target(&ipt_log_reg); 479 ret = xt_register_target(&ipt_log_reg);
480 if (ret < 0) 480 if (ret < 0)
481 return ret; 481 return ret;
482 ret = nf_log_register(PF_INET, &ipt_log_logger); 482 nf_log_register(PF_INET, &ipt_log_logger);
483 if (ret < 0 && ret != -EEXIST) 483 return 0;
484 xt_unregister_target(&ipt_log_reg);
485 return ret;
486} 484}
487 485
488static void __exit ipt_log_fini(void) 486static void __exit ipt_log_fini(void)
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index b05327ebd33..6ab99001dcc 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -493,10 +493,8 @@ static int __init ip6t_log_init(void)
493 ret = xt_register_target(&ip6t_log_reg); 493 ret = xt_register_target(&ip6t_log_reg);
494 if (ret < 0) 494 if (ret < 0)
495 return ret; 495 return ret;
496 ret = nf_log_register(PF_INET6, &ip6t_logger); 496 nf_log_register(PF_INET6, &ip6t_logger);
497 if (ret < 0 && ret != -EEXIST) 497 return 0;
498 xt_unregister_target(&ip6t_log_reg);
499 return ret;
500} 498}
501 499
502static void __exit ip6t_log_fini(void) 500static void __exit ip6t_log_fini(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d67fb1ef751..f10f3689d67 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -633,6 +633,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
633 if (tp->md5sig_info->entries6 == 0) { 633 if (tp->md5sig_info->entries6 == 0) {
634 kfree(tp->md5sig_info->keys6); 634 kfree(tp->md5sig_info->keys6);
635 tp->md5sig_info->keys6 = NULL; 635 tp->md5sig_info->keys6 = NULL;
636 tp->md5sig_info->alloced6 = 0;
636 637
637 tcp_free_md5sig_pool(); 638 tcp_free_md5sig_pool();
638 639
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 87ad3ccf8af..eb3fe740146 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/timer.h> 10#include <linux/timer.h>
11#include <linux/netfilter.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/in.h> 12#include <linux/in.h>
14#include <linux/tcp.h> 13#include <linux/tcp.h>
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 13d94a02572..2a2fd1a764e 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -9,7 +9,6 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/timer.h> 10#include <linux/timer.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/netfilter.h>
13#include <linux/udp.h> 12#include <linux/udp.h>
14#include <linux/seq_file.h> 13#include <linux/seq_file.h>
15#include <linux/skbuff.h> 14#include <linux/skbuff.h>
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 93e747b5396..b906b413997 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -10,7 +10,6 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/netfilter.h>
14#include <linux/udp.h> 13#include <linux/udp.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/skbuff.h> 15#include <linux/skbuff.h>
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index f47cab7a696..a4bab043a6d 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -13,7 +13,6 @@
13#include <linux/netfilter_bridge.h> 13#include <linux/netfilter_bridge.h>
14#include <linux/netfilter/xt_physdev.h> 14#include <linux/netfilter/xt_physdev.h>
15#include <linux/netfilter/x_tables.h> 15#include <linux/netfilter/x_tables.h>
16#include <linux/netfilter_bridge.h>
17 16
18MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); 18MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index e146531faf1..8c11ca4a212 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -184,7 +184,7 @@ int genl_register_mc_group(struct genl_family *family,
184 } 184 }
185 185
186 err = netlink_change_ngroups(genl_sock, 186 err = netlink_change_ngroups(genl_sock,
187 sizeof(unsigned long) * NETLINK_GENERIC); 187 mc_groups_longs * BITS_PER_LONG);
188 if (err) 188 if (err)
189 goto out; 189 goto out;
190 190
@@ -196,10 +196,22 @@ int genl_register_mc_group(struct genl_family *family,
196 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp); 196 genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
197 out: 197 out:
198 genl_unlock(); 198 genl_unlock();
199 return 0; 199 return err;
200} 200}
201EXPORT_SYMBOL(genl_register_mc_group); 201EXPORT_SYMBOL(genl_register_mc_group);
202 202
203static void __genl_unregister_mc_group(struct genl_family *family,
204 struct genl_multicast_group *grp)
205{
206 BUG_ON(grp->family != family);
207 netlink_clear_multicast_users(genl_sock, grp->id);
208 clear_bit(grp->id, mc_groups);
209 list_del(&grp->list);
210 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
211 grp->id = 0;
212 grp->family = NULL;
213}
214
203/** 215/**
204 * genl_unregister_mc_group - unregister a multicast group 216 * genl_unregister_mc_group - unregister a multicast group
205 * 217 *
@@ -217,14 +229,8 @@ EXPORT_SYMBOL(genl_register_mc_group);
217void genl_unregister_mc_group(struct genl_family *family, 229void genl_unregister_mc_group(struct genl_family *family,
218 struct genl_multicast_group *grp) 230 struct genl_multicast_group *grp)
219{ 231{
220 BUG_ON(grp->family != family);
221 genl_lock(); 232 genl_lock();
222 netlink_clear_multicast_users(genl_sock, grp->id); 233 __genl_unregister_mc_group(family, grp);
223 clear_bit(grp->id, mc_groups);
224 list_del(&grp->list);
225 genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
226 grp->id = 0;
227 grp->family = NULL;
228 genl_unlock(); 234 genl_unlock();
229} 235}
230 236
@@ -232,8 +238,10 @@ static void genl_unregister_mc_groups(struct genl_family *family)
232{ 238{
233 struct genl_multicast_group *grp, *tmp; 239 struct genl_multicast_group *grp, *tmp;
234 240
241 genl_lock();
235 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) 242 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
236 genl_unregister_mc_group(family, grp); 243 __genl_unregister_mc_group(family, grp);
244 genl_unlock();
237} 245}
238 246
239/** 247/**
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 490697542fc..dc2f41e9f57 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -769,11 +769,12 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
769 new->h.flavour = &svcauthops_gss; 769 new->h.flavour = &svcauthops_gss;
770 new->pseudoflavor = pseudoflavor; 770 new->pseudoflavor = pseudoflavor;
771 771
772 stat = 0;
772 test = auth_domain_lookup(name, &new->h); 773 test = auth_domain_lookup(name, &new->h);
773 if (test != &new->h) { /* XXX Duplicate registration? */ 774 if (test != &new->h) { /* Duplicate registration */
774 auth_domain_put(&new->h); 775 auth_domain_put(test);
775 /* dangling ref-count... */ 776 kfree(new->h.name);
776 goto out; 777 goto out_free_dom;
777 } 778 }
778 return 0; 779 return 0;
779 780
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b5f017f07a7..0ae032f3876 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2417,8 +2417,10 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
2417 2417
2418 cache->type = NETLBL_CACHE_T_MLS; 2418 cache->type = NETLBL_CACHE_T_MLS;
2419 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, 2419 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
2420 &ctx->range.level[0].cat) != 0) 2420 &ctx->range.level[0].cat) != 0) {
2421 kfree(cache);
2421 return; 2422 return;
2423 }
2422 cache->data.mls_label.level[1].cat.highbit = 2424 cache->data.mls_label.level[1].cat.highbit =
2423 cache->data.mls_label.level[0].cat.highbit; 2425 cache->data.mls_label.level[0].cat.highbit;
2424 cache->data.mls_label.level[1].cat.node = 2426 cache->data.mls_label.level[1].cat.node =