diff options
385 files changed, 5695 insertions, 3263 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 727cc08f0f3b..33121d6c827c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1439,7 +1439,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1439 | Param: "schedule" - profile schedule points. | 1439 | Param: "schedule" - profile schedule points. |
1440 | Param: <number> - step/bucket size as a power of 2 for | 1440 | Param: <number> - step/bucket size as a power of 2 for |
1441 | statistical time based profiling. | 1441 | statistical time based profiling. |
1442 | Param: "sleep" - profile D-state sleeping (millisecs) | 1442 | Param: "sleep" - profile D-state sleeping (millisecs). |
1443 | Requires CONFIG_SCHEDSTATS | ||
1443 | Param: "kvm" - profile VM exits. | 1444 | Param: "kvm" - profile VM exits. |
1444 | 1445 | ||
1445 | processor.max_cstate= [HW,ACPI] | 1446 | processor.max_cstate= [HW,ACPI] |
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 5bdc37f81842..f2668390e8f7 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -34,25 +34,24 @@ | |||
34 | #include <zlib.h> | 34 | #include <zlib.h> |
35 | #include <assert.h> | 35 | #include <assert.h> |
36 | #include <sched.h> | 36 | #include <sched.h> |
37 | /*L:110 We can ignore the 30 include files we need for this program, but I do | ||
38 | * want to draw attention to the use of kernel-style types. | ||
39 | * | ||
40 | * As Linus said, "C is a Spartan language, and so should your naming be." I | ||
41 | * like these abbreviations and the header we need uses them, so we define them | ||
42 | * here. | ||
43 | */ | ||
44 | typedef unsigned long long u64; | ||
45 | typedef uint32_t u32; | ||
46 | typedef uint16_t u16; | ||
47 | typedef uint8_t u8; | ||
48 | #include "linux/lguest_launcher.h" | 37 | #include "linux/lguest_launcher.h" |
49 | #include "linux/pci_ids.h" | ||
50 | #include "linux/virtio_config.h" | 38 | #include "linux/virtio_config.h" |
51 | #include "linux/virtio_net.h" | 39 | #include "linux/virtio_net.h" |
52 | #include "linux/virtio_blk.h" | 40 | #include "linux/virtio_blk.h" |
53 | #include "linux/virtio_console.h" | 41 | #include "linux/virtio_console.h" |
54 | #include "linux/virtio_ring.h" | 42 | #include "linux/virtio_ring.h" |
55 | #include "asm-x86/bootparam.h" | 43 | #include "asm-x86/bootparam.h" |
44 | /*L:110 We can ignore the 38 include files we need for this program, but I do | ||
45 | * want to draw attention to the use of kernel-style types. | ||
46 | * | ||
47 | * As Linus said, "C is a Spartan language, and so should your naming be." I | ||
48 | * like these abbreviations, so we define them here. Note that u64 is always | ||
49 | * unsigned long long, which works on all Linux systems: this means that we can | ||
50 | * use %llu in printf for any u64. */ | ||
51 | typedef unsigned long long u64; | ||
52 | typedef uint32_t u32; | ||
53 | typedef uint16_t u16; | ||
54 | typedef uint8_t u8; | ||
56 | /*:*/ | 55 | /*:*/ |
57 | 56 | ||
58 | #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ | 57 | #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ |
@@ -361,8 +360,8 @@ static unsigned long load_bzimage(int fd) | |||
361 | } | 360 | } |
362 | 361 | ||
363 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels | 362 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels |
364 | * come wrapped up in the self-decompressing "bzImage" format. With some funky | 363 | * come wrapped up in the self-decompressing "bzImage" format. With a little |
365 | * coding, we can load those, too. */ | 364 | * work, we can load those, too. */ |
366 | static unsigned long load_kernel(int fd) | 365 | static unsigned long load_kernel(int fd) |
367 | { | 366 | { |
368 | Elf32_Ehdr hdr; | 367 | Elf32_Ehdr hdr; |
@@ -465,6 +464,7 @@ static unsigned long setup_pagetables(unsigned long mem, | |||
465 | * to know where it is. */ | 464 | * to know where it is. */ |
466 | return to_guest_phys(pgdir); | 465 | return to_guest_phys(pgdir); |
467 | } | 466 | } |
467 | /*:*/ | ||
468 | 468 | ||
469 | /* Simple routine to roll all the commandline arguments together with spaces | 469 | /* Simple routine to roll all the commandline arguments together with spaces |
470 | * between them. */ | 470 | * between them. */ |
@@ -481,9 +481,9 @@ static void concat(char *dst, char *args[]) | |||
481 | dst[len] = '\0'; | 481 | dst[len] = '\0'; |
482 | } | 482 | } |
483 | 483 | ||
484 | /* This is where we actually tell the kernel to initialize the Guest. We saw | 484 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We |
485 | * the arguments it expects when we looked at initialize() in lguest_user.c: | 485 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: |
486 | * the base of guest "physical" memory, the top physical page to allow, the | 486 | * the base of Guest "physical" memory, the top physical page to allow, the |
487 | * top level pagetable and the entry point for the Guest. */ | 487 | * top level pagetable and the entry point for the Guest. */ |
488 | static int tell_kernel(unsigned long pgdir, unsigned long start) | 488 | static int tell_kernel(unsigned long pgdir, unsigned long start) |
489 | { | 489 | { |
@@ -513,13 +513,14 @@ static void add_device_fd(int fd) | |||
513 | /*L:200 | 513 | /*L:200 |
514 | * The Waker. | 514 | * The Waker. |
515 | * | 515 | * |
516 | * With a console and network devices, we can have lots of input which we need | 516 | * With console, block and network devices, we can have lots of input which we |
517 | * to process. We could try to tell the kernel what file descriptors to watch, | 517 | * need to process. We could try to tell the kernel what file descriptors to |
518 | * but handing a file descriptor mask through to the kernel is fairly icky. | 518 | * watch, but handing a file descriptor mask through to the kernel is fairly |
519 | * icky. | ||
519 | * | 520 | * |
520 | * Instead, we fork off a process which watches the file descriptors and writes | 521 | * Instead, we fork off a process which watches the file descriptors and writes |
521 | * the LHREQ_BREAK command to the /dev/lguest filedescriptor to tell the Host | 522 | * the LHREQ_BREAK command to the /dev/lguest file descriptor to tell the Host |
522 | * loop to stop running the Guest. This causes it to return from the | 523 | * stop running the Guest. This causes the Launcher to return from the |
523 | * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset | 524 | * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset |
524 | * the LHREQ_BREAK and wake us up again. | 525 | * the LHREQ_BREAK and wake us up again. |
525 | * | 526 | * |
@@ -545,7 +546,9 @@ static void wake_parent(int pipefd, int lguest_fd) | |||
545 | if (read(pipefd, &fd, sizeof(fd)) == 0) | 546 | if (read(pipefd, &fd, sizeof(fd)) == 0) |
546 | exit(0); | 547 | exit(0); |
547 | /* Otherwise it's telling us to change what file | 548 | /* Otherwise it's telling us to change what file |
548 | * descriptors we're to listen to. */ | 549 | * descriptors we're to listen to. Positive means |
550 | * listen to a new one, negative means stop | ||
551 | * listening. */ | ||
549 | if (fd >= 0) | 552 | if (fd >= 0) |
550 | FD_SET(fd, &devices.infds); | 553 | FD_SET(fd, &devices.infds); |
551 | else | 554 | else |
@@ -560,7 +563,7 @@ static int setup_waker(int lguest_fd) | |||
560 | { | 563 | { |
561 | int pipefd[2], child; | 564 | int pipefd[2], child; |
562 | 565 | ||
563 | /* We create a pipe to talk to the waker, and also so it knows when the | 566 | /* We create a pipe to talk to the Waker, and also so it knows when the |
564 | * Launcher dies (and closes pipe). */ | 567 | * Launcher dies (and closes pipe). */ |
565 | pipe(pipefd); | 568 | pipe(pipefd); |
566 | child = fork(); | 569 | child = fork(); |
@@ -568,7 +571,8 @@ static int setup_waker(int lguest_fd) | |||
568 | err(1, "forking"); | 571 | err(1, "forking"); |
569 | 572 | ||
570 | if (child == 0) { | 573 | if (child == 0) { |
571 | /* Close the "writing" end of our copy of the pipe */ | 574 | /* We are the Waker: close the "writing" end of our copy of the |
575 | * pipe and start waiting for input. */ | ||
572 | close(pipefd[1]); | 576 | close(pipefd[1]); |
573 | wake_parent(pipefd[0], lguest_fd); | 577 | wake_parent(pipefd[0], lguest_fd); |
574 | } | 578 | } |
@@ -579,12 +583,12 @@ static int setup_waker(int lguest_fd) | |||
579 | return pipefd[1]; | 583 | return pipefd[1]; |
580 | } | 584 | } |
581 | 585 | ||
582 | /*L:210 | 586 | /* |
583 | * Device Handling. | 587 | * Device Handling. |
584 | * | 588 | * |
585 | * When the Guest sends DMA to us, it sends us an array of addresses and sizes. | 589 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. |
586 | * We need to make sure it's not trying to reach into the Launcher itself, so | 590 | * We need to make sure it's not trying to reach into the Launcher itself, so |
587 | * we have a convenient routine which check it and exits with an error message | 591 | * we have a convenient routine which checks it and exits with an error message |
588 | * if something funny is going on: | 592 | * if something funny is going on: |
589 | */ | 593 | */ |
590 | static void *_check_pointer(unsigned long addr, unsigned int size, | 594 | static void *_check_pointer(unsigned long addr, unsigned int size, |
@@ -601,7 +605,9 @@ static void *_check_pointer(unsigned long addr, unsigned int size, | |||
601 | /* A macro which transparently hands the line number to the real function. */ | 605 | /* A macro which transparently hands the line number to the real function. */ |
602 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 606 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
603 | 607 | ||
604 | /* This function returns the next descriptor in the chain, or vq->vring.num. */ | 608 | /* Each buffer in the virtqueues is actually a chain of descriptors. This |
609 | * function returns the next descriptor in the chain, or vq->vring.num if we're | ||
610 | * at the end. */ | ||
605 | static unsigned next_desc(struct virtqueue *vq, unsigned int i) | 611 | static unsigned next_desc(struct virtqueue *vq, unsigned int i) |
606 | { | 612 | { |
607 | unsigned int next; | 613 | unsigned int next; |
@@ -680,13 +686,14 @@ static unsigned get_vq_desc(struct virtqueue *vq, | |||
680 | return head; | 686 | return head; |
681 | } | 687 | } |
682 | 688 | ||
683 | /* Once we've used one of their buffers, we tell them about it. We'll then | 689 | /* After we've used one of their buffers, we tell them about it. We'll then |
684 | * want to send them an interrupt, using trigger_irq(). */ | 690 | * want to send them an interrupt, using trigger_irq(). */ |
685 | static void add_used(struct virtqueue *vq, unsigned int head, int len) | 691 | static void add_used(struct virtqueue *vq, unsigned int head, int len) |
686 | { | 692 | { |
687 | struct vring_used_elem *used; | 693 | struct vring_used_elem *used; |
688 | 694 | ||
689 | /* Get a pointer to the next entry in the used ring. */ | 695 | /* The virtqueue contains a ring of used buffers. Get a pointer to the |
696 | * next entry in that used ring. */ | ||
690 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; | 697 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; |
691 | used->id = head; | 698 | used->id = head; |
692 | used->len = len; | 699 | used->len = len; |
@@ -700,6 +707,7 @@ static void trigger_irq(int fd, struct virtqueue *vq) | |||
700 | { | 707 | { |
701 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; | 708 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; |
702 | 709 | ||
710 | /* If they don't want an interrupt, don't send one. */ | ||
703 | if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) | 711 | if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) |
704 | return; | 712 | return; |
705 | 713 | ||
@@ -716,8 +724,11 @@ static void add_used_and_trigger(int fd, struct virtqueue *vq, | |||
716 | trigger_irq(fd, vq); | 724 | trigger_irq(fd, vq); |
717 | } | 725 | } |
718 | 726 | ||
719 | /* Here is the input terminal setting we save, and the routine to restore them | 727 | /* |
720 | * on exit so the user can see what they type next. */ | 728 | * The Console |
729 | * | ||
730 | * Here is the input terminal setting we save, and the routine to restore them | ||
731 | * on exit so the user gets their terminal back. */ | ||
721 | static struct termios orig_term; | 732 | static struct termios orig_term; |
722 | static void restore_term(void) | 733 | static void restore_term(void) |
723 | { | 734 | { |
@@ -818,7 +829,10 @@ static void handle_console_output(int fd, struct virtqueue *vq) | |||
818 | } | 829 | } |
819 | } | 830 | } |
820 | 831 | ||
821 | /* Handling output for network is also simple: we get all the output buffers | 832 | /* |
833 | * The Network | ||
834 | * | ||
835 | * Handling output for network is also simple: we get all the output buffers | ||
822 | * and write them (ignoring the first element) to this device's file descriptor | 836 | * and write them (ignoring the first element) to this device's file descriptor |
823 | * (stdout). */ | 837 | * (stdout). */ |
824 | static void handle_net_output(int fd, struct virtqueue *vq) | 838 | static void handle_net_output(int fd, struct virtqueue *vq) |
@@ -831,8 +845,9 @@ static void handle_net_output(int fd, struct virtqueue *vq) | |||
831 | while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { | 845 | while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { |
832 | if (in) | 846 | if (in) |
833 | errx(1, "Input buffers in output queue?"); | 847 | errx(1, "Input buffers in output queue?"); |
834 | /* Check header, but otherwise ignore it (we said we supported | 848 | /* Check header, but otherwise ignore it (we told the Guest we |
835 | * no features). */ | 849 | * supported no features, so it shouldn't have anything |
850 | * interesting). */ | ||
836 | (void)convert(&iov[0], struct virtio_net_hdr); | 851 | (void)convert(&iov[0], struct virtio_net_hdr); |
837 | len = writev(vq->dev->fd, iov+1, out-1); | 852 | len = writev(vq->dev->fd, iov+1, out-1); |
838 | add_used_and_trigger(fd, vq, head, len); | 853 | add_used_and_trigger(fd, vq, head, len); |
@@ -883,7 +898,8 @@ static bool handle_tun_input(int fd, struct device *dev) | |||
883 | return true; | 898 | return true; |
884 | } | 899 | } |
885 | 900 | ||
886 | /* This callback ensures we try again, in case we stopped console or net | 901 | /*L:215 This is the callback attached to the network and console input |
902 | * virtqueues: it ensures we try again, in case we stopped console or net | ||
887 | * delivery because Guest didn't have any buffers. */ | 903 | * delivery because Guest didn't have any buffers. */ |
888 | static void enable_fd(int fd, struct virtqueue *vq) | 904 | static void enable_fd(int fd, struct virtqueue *vq) |
889 | { | 905 | { |
@@ -919,7 +935,7 @@ static void handle_output(int fd, unsigned long addr) | |||
919 | strnlen(from_guest_phys(addr), guest_limit - addr)); | 935 | strnlen(from_guest_phys(addr), guest_limit - addr)); |
920 | } | 936 | } |
921 | 937 | ||
922 | /* This is called when the waker wakes us up: check for incoming file | 938 | /* This is called when the Waker wakes us up: check for incoming file |
923 | * descriptors. */ | 939 | * descriptors. */ |
924 | static void handle_input(int fd) | 940 | static void handle_input(int fd) |
925 | { | 941 | { |
@@ -986,8 +1002,7 @@ static struct lguest_device_desc *new_dev_desc(u16 type) | |||
986 | } | 1002 | } |
987 | 1003 | ||
988 | /* Each device descriptor is followed by some configuration information. | 1004 | /* Each device descriptor is followed by some configuration information. |
989 | * The first byte is a "status" byte for the Guest to report what's happening. | 1005 | * Each configuration field looks like: u8 type, u8 len, [... len bytes...]. |
990 | * After that are fields: u8 type, u8 len, [... len bytes...]. | ||
991 | * | 1006 | * |
992 | * This routine adds a new field to an existing device's descriptor. It only | 1007 | * This routine adds a new field to an existing device's descriptor. It only |
993 | * works for the last device, but that's OK because that's how we use it. */ | 1008 | * works for the last device, but that's OK because that's how we use it. */ |
@@ -1044,14 +1059,17 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1044 | /* Link virtqueue back to device. */ | 1059 | /* Link virtqueue back to device. */ |
1045 | vq->dev = dev; | 1060 | vq->dev = dev; |
1046 | 1061 | ||
1047 | /* Set up handler. */ | 1062 | /* Set the routine to call when the Guest does something to this |
1063 | * virtqueue. */ | ||
1048 | vq->handle_output = handle_output; | 1064 | vq->handle_output = handle_output; |
1065 | |||
1066 | /* Set the "Don't Notify Me" flag if we don't have a handler */ | ||
1049 | if (!handle_output) | 1067 | if (!handle_output) |
1050 | vq->vring.used->flags = VRING_USED_F_NO_NOTIFY; | 1068 | vq->vring.used->flags = VRING_USED_F_NO_NOTIFY; |
1051 | } | 1069 | } |
1052 | 1070 | ||
1053 | /* This routine does all the creation and setup of a new device, including | 1071 | /* This routine does all the creation and setup of a new device, including |
1054 | * caling new_dev_desc() to allocate the descriptor and device memory. */ | 1072 | * calling new_dev_desc() to allocate the descriptor and device memory. */ |
1055 | static struct device *new_device(const char *name, u16 type, int fd, | 1073 | static struct device *new_device(const char *name, u16 type, int fd, |
1056 | bool (*handle_input)(int, struct device *)) | 1074 | bool (*handle_input)(int, struct device *)) |
1057 | { | 1075 | { |
@@ -1060,7 +1078,7 @@ static struct device *new_device(const char *name, u16 type, int fd, | |||
1060 | /* Append to device list. Prepending to a single-linked list is | 1078 | /* Append to device list. Prepending to a single-linked list is |
1061 | * easier, but the user expects the devices to be arranged on the bus | 1079 | * easier, but the user expects the devices to be arranged on the bus |
1062 | * in command-line order. The first network device on the command line | 1080 | * in command-line order. The first network device on the command line |
1063 | * is eth0, the first block device /dev/lgba, etc. */ | 1081 | * is eth0, the first block device /dev/vda, etc. */ |
1064 | *devices.lastdev = dev; | 1082 | *devices.lastdev = dev; |
1065 | dev->next = NULL; | 1083 | dev->next = NULL; |
1066 | devices.lastdev = &dev->next; | 1084 | devices.lastdev = &dev->next; |
@@ -1104,7 +1122,7 @@ static void setup_console(void) | |||
1104 | /* The console needs two virtqueues: the input then the output. When | 1122 | /* The console needs two virtqueues: the input then the output. When |
1105 | * they put something the input queue, we make sure we're listening to | 1123 | * they put something the input queue, we make sure we're listening to |
1106 | * stdin. When they put something in the output queue, we write it to | 1124 | * stdin. When they put something in the output queue, we write it to |
1107 | * stdout. */ | 1125 | * stdout. */ |
1108 | add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); | 1126 | add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); |
1109 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); | 1127 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); |
1110 | 1128 | ||
@@ -1252,21 +1270,17 @@ static void setup_tun_net(const char *arg) | |||
1252 | verbose("attached to bridge: %s\n", br_name); | 1270 | verbose("attached to bridge: %s\n", br_name); |
1253 | } | 1271 | } |
1254 | 1272 | ||
1255 | 1273 | /* Our block (disk) device should be really simple: the Guest asks for a block | |
1256 | /* | 1274 | * number and we read or write that position in the file. Unfortunately, that |
1257 | * Block device. | 1275 | * was amazingly slow: the Guest waits until the read is finished before |
1276 | * running anything else, even if it could have been doing useful work. | ||
1258 | * | 1277 | * |
1259 | * Serving a block device is really easy: the Guest asks for a block number and | 1278 | * We could use async I/O, except it's reputed to suck so hard that characters |
1260 | * we read or write that position in the file. | 1279 | * actually go missing from your code when you try to use it. |
1261 | * | ||
1262 | * Unfortunately, this is amazingly slow: the Guest waits until the read is | ||
1263 | * finished before running anything else, even if it could be doing useful | ||
1264 | * work. We could use async I/O, except it's reputed to suck so hard that | ||
1265 | * characters actually go missing from your code when you try to use it. | ||
1266 | * | 1280 | * |
1267 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ | 1281 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ |
1268 | 1282 | ||
1269 | /* This hangs off device->priv, with the data. */ | 1283 | /* This hangs off device->priv. */ |
1270 | struct vblk_info | 1284 | struct vblk_info |
1271 | { | 1285 | { |
1272 | /* The size of the file. */ | 1286 | /* The size of the file. */ |
@@ -1282,8 +1296,14 @@ struct vblk_info | |||
1282 | * Launcher triggers interrupt to Guest. */ | 1296 | * Launcher triggers interrupt to Guest. */ |
1283 | int done_fd; | 1297 | int done_fd; |
1284 | }; | 1298 | }; |
1299 | /*:*/ | ||
1285 | 1300 | ||
1286 | /* This is the core of the I/O thread. It returns true if it did something. */ | 1301 | /*L:210 |
1302 | * The Disk | ||
1303 | * | ||
1304 | * Remember that the block device is handled by a separate I/O thread. We head | ||
1305 | * straight into the core of that thread here: | ||
1306 | */ | ||
1287 | static bool service_io(struct device *dev) | 1307 | static bool service_io(struct device *dev) |
1288 | { | 1308 | { |
1289 | struct vblk_info *vblk = dev->priv; | 1309 | struct vblk_info *vblk = dev->priv; |
@@ -1294,10 +1314,14 @@ static bool service_io(struct device *dev) | |||
1294 | struct iovec iov[dev->vq->vring.num]; | 1314 | struct iovec iov[dev->vq->vring.num]; |
1295 | off64_t off; | 1315 | off64_t off; |
1296 | 1316 | ||
1317 | /* See if there's a request waiting. If not, nothing to do. */ | ||
1297 | head = get_vq_desc(dev->vq, iov, &out_num, &in_num); | 1318 | head = get_vq_desc(dev->vq, iov, &out_num, &in_num); |
1298 | if (head == dev->vq->vring.num) | 1319 | if (head == dev->vq->vring.num) |
1299 | return false; | 1320 | return false; |
1300 | 1321 | ||
1322 | /* Every block request should contain at least one output buffer | ||
1323 | * (detailing the location on disk and the type of request) and one | ||
1324 | * input buffer (to hold the result). */ | ||
1301 | if (out_num == 0 || in_num == 0) | 1325 | if (out_num == 0 || in_num == 0) |
1302 | errx(1, "Bad virtblk cmd %u out=%u in=%u", | 1326 | errx(1, "Bad virtblk cmd %u out=%u in=%u", |
1303 | head, out_num, in_num); | 1327 | head, out_num, in_num); |
@@ -1306,10 +1330,15 @@ static bool service_io(struct device *dev) | |||
1306 | in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr); | 1330 | in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr); |
1307 | off = out->sector * 512; | 1331 | off = out->sector * 512; |
1308 | 1332 | ||
1309 | /* This is how we implement barriers. Pretty poor, no? */ | 1333 | /* The block device implements "barriers", where the Guest indicates |
1334 | * that it wants all previous writes to occur before this write. We | ||
1335 | * don't have a way of asking our kernel to do a barrier, so we just | ||
1336 | * synchronize all the data in the file. Pretty poor, no? */ | ||
1310 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1337 | if (out->type & VIRTIO_BLK_T_BARRIER) |
1311 | fdatasync(vblk->fd); | 1338 | fdatasync(vblk->fd); |
1312 | 1339 | ||
1340 | /* In general the virtio block driver is allowed to try SCSI commands. | ||
1341 | * It'd be nice if we supported eject, for example, but we don't. */ | ||
1313 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | 1342 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { |
1314 | fprintf(stderr, "Scsi commands unsupported\n"); | 1343 | fprintf(stderr, "Scsi commands unsupported\n"); |
1315 | in->status = VIRTIO_BLK_S_UNSUPP; | 1344 | in->status = VIRTIO_BLK_S_UNSUPP; |
@@ -1375,7 +1404,7 @@ static int io_thread(void *_dev) | |||
1375 | 1404 | ||
1376 | /* When this read fails, it means Launcher died, so we follow. */ | 1405 | /* When this read fails, it means Launcher died, so we follow. */ |
1377 | while (read(vblk->workpipe[0], &c, 1) == 1) { | 1406 | while (read(vblk->workpipe[0], &c, 1) == 1) { |
1378 | /* We acknowledge each request immediately, to reduce latency, | 1407 | /* We acknowledge each request immediately to reduce latency, |
1379 | * rather than waiting until we've done them all. I haven't | 1408 | * rather than waiting until we've done them all. I haven't |
1380 | * measured to see if it makes any difference. */ | 1409 | * measured to see if it makes any difference. */ |
1381 | while (service_io(dev)) | 1410 | while (service_io(dev)) |
@@ -1384,12 +1413,14 @@ static int io_thread(void *_dev) | |||
1384 | return 0; | 1413 | return 0; |
1385 | } | 1414 | } |
1386 | 1415 | ||
1387 | /* When the thread says some I/O is done, we interrupt the Guest. */ | 1416 | /* Now we've seen the I/O thread, we return to the Launcher to see what happens |
1417 | * when the thread tells us it's completed some I/O. */ | ||
1388 | static bool handle_io_finish(int fd, struct device *dev) | 1418 | static bool handle_io_finish(int fd, struct device *dev) |
1389 | { | 1419 | { |
1390 | char c; | 1420 | char c; |
1391 | 1421 | ||
1392 | /* If child died, presumably it printed message. */ | 1422 | /* If the I/O thread died, presumably it printed the error, so we |
1423 | * simply exit. */ | ||
1393 | if (read(dev->fd, &c, 1) != 1) | 1424 | if (read(dev->fd, &c, 1) != 1) |
1394 | exit(1); | 1425 | exit(1); |
1395 | 1426 | ||
@@ -1398,7 +1429,7 @@ static bool handle_io_finish(int fd, struct device *dev) | |||
1398 | return true; | 1429 | return true; |
1399 | } | 1430 | } |
1400 | 1431 | ||
1401 | /* When the Guest submits some I/O, we wake the I/O thread. */ | 1432 | /* When the Guest submits some I/O, we just need to wake the I/O thread. */ |
1402 | static void handle_virtblk_output(int fd, struct virtqueue *vq) | 1433 | static void handle_virtblk_output(int fd, struct virtqueue *vq) |
1403 | { | 1434 | { |
1404 | struct vblk_info *vblk = vq->dev->priv; | 1435 | struct vblk_info *vblk = vq->dev->priv; |
@@ -1410,7 +1441,7 @@ static void handle_virtblk_output(int fd, struct virtqueue *vq) | |||
1410 | exit(1); | 1441 | exit(1); |
1411 | } | 1442 | } |
1412 | 1443 | ||
1413 | /* This creates a virtual block device. */ | 1444 | /*L:198 This actually sets up a virtual block device. */ |
1414 | static void setup_block_file(const char *filename) | 1445 | static void setup_block_file(const char *filename) |
1415 | { | 1446 | { |
1416 | int p[2]; | 1447 | int p[2]; |
@@ -1426,7 +1457,7 @@ static void setup_block_file(const char *filename) | |||
1426 | /* The device responds to return from I/O thread. */ | 1457 | /* The device responds to return from I/O thread. */ |
1427 | dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); | 1458 | dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); |
1428 | 1459 | ||
1429 | /* The device has a virtqueue. */ | 1460 | /* The device has one virtqueue, where the Guest places requests. */ |
1430 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); | 1461 | add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); |
1431 | 1462 | ||
1432 | /* Allocate the room for our own bookkeeping */ | 1463 | /* Allocate the room for our own bookkeeping */ |
@@ -1448,7 +1479,8 @@ static void setup_block_file(const char *filename) | |||
1448 | /* The I/O thread writes to this end of the pipe when done. */ | 1479 | /* The I/O thread writes to this end of the pipe when done. */ |
1449 | vblk->done_fd = p[1]; | 1480 | vblk->done_fd = p[1]; |
1450 | 1481 | ||
1451 | /* This is how we tell the I/O thread about more work. */ | 1482 | /* This is the second pipe, which is how we tell the I/O thread about |
1483 | * more work. */ | ||
1452 | pipe(vblk->workpipe); | 1484 | pipe(vblk->workpipe); |
1453 | 1485 | ||
1454 | /* Create stack for thread and run it */ | 1486 | /* Create stack for thread and run it */ |
@@ -1487,24 +1519,25 @@ static void __attribute__((noreturn)) run_guest(int lguest_fd) | |||
1487 | char reason[1024] = { 0 }; | 1519 | char reason[1024] = { 0 }; |
1488 | read(lguest_fd, reason, sizeof(reason)-1); | 1520 | read(lguest_fd, reason, sizeof(reason)-1); |
1489 | errx(1, "%s", reason); | 1521 | errx(1, "%s", reason); |
1490 | /* EAGAIN means the waker wanted us to look at some input. | 1522 | /* EAGAIN means the Waker wanted us to look at some input. |
1491 | * Anything else means a bug or incompatible change. */ | 1523 | * Anything else means a bug or incompatible change. */ |
1492 | } else if (errno != EAGAIN) | 1524 | } else if (errno != EAGAIN) |
1493 | err(1, "Running guest failed"); | 1525 | err(1, "Running guest failed"); |
1494 | 1526 | ||
1495 | /* Service input, then unset the BREAK which releases | 1527 | /* Service input, then unset the BREAK to release the Waker. */ |
1496 | * the Waker. */ | ||
1497 | handle_input(lguest_fd); | 1528 | handle_input(lguest_fd); |
1498 | if (write(lguest_fd, args, sizeof(args)) < 0) | 1529 | if (write(lguest_fd, args, sizeof(args)) < 0) |
1499 | err(1, "Resetting break"); | 1530 | err(1, "Resetting break"); |
1500 | } | 1531 | } |
1501 | } | 1532 | } |
1502 | /* | 1533 | /* |
1503 | * This is the end of the Launcher. | 1534 | * This is the end of the Launcher. The good news: we are over halfway |
1535 | * through! The bad news: the most fiendish part of the code still lies ahead | ||
1536 | * of us. | ||
1504 | * | 1537 | * |
1505 | * But wait! We've seen I/O from the Launcher, and we've seen I/O from the | 1538 | * Are you ready? Take a deep breath and join me in the core of the Host, in |
1506 | * Drivers. If we were to see the Host kernel I/O code, our understanding | 1539 | * "make Host". |
1507 | * would be complete... :*/ | 1540 | :*/ |
1508 | 1541 | ||
1509 | static struct option opts[] = { | 1542 | static struct option opts[] = { |
1510 | { "verbose", 0, NULL, 'v' }, | 1543 | { "verbose", 0, NULL, 'v' }, |
@@ -1527,7 +1560,7 @@ int main(int argc, char *argv[]) | |||
1527 | /* Memory, top-level pagetable, code startpoint and size of the | 1560 | /* Memory, top-level pagetable, code startpoint and size of the |
1528 | * (optional) initrd. */ | 1561 | * (optional) initrd. */ |
1529 | unsigned long mem = 0, pgdir, start, initrd_size = 0; | 1562 | unsigned long mem = 0, pgdir, start, initrd_size = 0; |
1530 | /* A temporary and the /dev/lguest file descriptor. */ | 1563 | /* Two temporaries and the /dev/lguest file descriptor. */ |
1531 | int i, c, lguest_fd; | 1564 | int i, c, lguest_fd; |
1532 | /* The boot information for the Guest. */ | 1565 | /* The boot information for the Guest. */ |
1533 | struct boot_params *boot; | 1566 | struct boot_params *boot; |
@@ -1622,6 +1655,7 @@ int main(int argc, char *argv[]) | |||
1622 | /* The boot header contains a command line pointer: we put the command | 1655 | /* The boot header contains a command line pointer: we put the command |
1623 | * line after the boot header. */ | 1656 | * line after the boot header. */ |
1624 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); | 1657 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); |
1658 | /* We use a simple helper to copy the arguments separated by spaces. */ | ||
1625 | concat((char *)(boot + 1), argv+optind+2); | 1659 | concat((char *)(boot + 1), argv+optind+2); |
1626 | 1660 | ||
1627 | /* Boot protocol version: 2.07 supports the fields for lguest. */ | 1661 | /* Boot protocol version: 2.07 supports the fields for lguest. */ |
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 153d84d281e6..f5a5e6d3d541 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
@@ -80,8 +80,6 @@ multicast.txt | |||
80 | - Behaviour of cards under Multicast | 80 | - Behaviour of cards under Multicast |
81 | ncsa-telnet | 81 | ncsa-telnet |
82 | - notes on how NCSA telnet (DOS) breaks with MTU discovery enabled. | 82 | - notes on how NCSA telnet (DOS) breaks with MTU discovery enabled. |
83 | net-modules.txt | ||
84 | - info and "insmod" parameters for all network driver modules. | ||
85 | netdevices.txt | 83 | netdevices.txt |
86 | - info on network device driver functions exported to the kernel. | 84 | - info on network device driver functions exported to the kernel. |
87 | olympic.txt | 85 | olympic.txt |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 747a5d15d529..6f7872ba1def 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -184,14 +184,14 @@ tcp_frto - INTEGER | |||
184 | F-RTO is an enhanced recovery algorithm for TCP retransmission | 184 | F-RTO is an enhanced recovery algorithm for TCP retransmission |
185 | timeouts. It is particularly beneficial in wireless environments | 185 | timeouts. It is particularly beneficial in wireless environments |
186 | where packet loss is typically due to random radio interference | 186 | where packet loss is typically due to random radio interference |
187 | rather than intermediate router congestion. FRTO is sender-side | 187 | rather than intermediate router congestion. F-RTO is sender-side |
188 | only modification. Therefore it does not require any support from | 188 | only modification. Therefore it does not require any support from |
189 | the peer, but in a typical case, however, where wireless link is | 189 | the peer, but in a typical case, however, where wireless link is |
190 | the local access link and most of the data flows downlink, the | 190 | the local access link and most of the data flows downlink, the |
191 | faraway servers should have FRTO enabled to take advantage of it. | 191 | faraway servers should have F-RTO enabled to take advantage of it. |
192 | If set to 1, basic version is enabled. 2 enables SACK enhanced | 192 | If set to 1, basic version is enabled. 2 enables SACK enhanced |
193 | F-RTO if flow uses SACK. The basic version can be used also when | 193 | F-RTO if flow uses SACK. The basic version can be used also when |
194 | SACK is in use though scenario(s) with it exists where FRTO | 194 | SACK is in use though scenario(s) with it exists where F-RTO |
195 | interacts badly with the packet counting of the SACK enabled TCP | 195 | interacts badly with the packet counting of the SACK enabled TCP |
196 | flow. | 196 | flow. |
197 | 197 | ||
diff --git a/Documentation/networking/net-modules.txt b/Documentation/networking/net-modules.txt deleted file mode 100644 index 98c4392dd0fd..000000000000 --- a/Documentation/networking/net-modules.txt +++ /dev/null | |||
@@ -1,315 +0,0 @@ | |||
1 | Wed 2-Aug-95 <matti.aarnio@utu.fi> | ||
2 | |||
3 | Linux network driver modules | ||
4 | |||
5 | Do not mistake this for "README.modules" at the top-level | ||
6 | directory! That document tells about modules in general, while | ||
7 | this one tells only about network device driver modules. | ||
8 | |||
9 | This is a potpourri of INSMOD-time(*) configuration options | ||
10 | (if such exists) and their default values of various modules | ||
11 | in the Linux network drivers collection. | ||
12 | |||
13 | Some modules have also hidden (= non-documented) tunable values. | ||
14 | The choice of not documenting them is based on general belief, that | ||
15 | the less the user needs to know, the better. (There are things that | ||
16 | driver developers can use, others should not confuse themselves.) | ||
17 | |||
18 | In many cases it is highly preferred that insmod:ing is done | ||
19 | ONLY with defining an explicit address for the card, AND BY | ||
20 | NOT USING AUTO-PROBING! | ||
21 | |||
22 | Now most cards have some explicitly defined base address that they | ||
23 | are compiled with (to avoid auto-probing, among other things). | ||
24 | If that compiled value does not match your actual configuration, | ||
25 | do use the "io=0xXXX" -parameter for the insmod, and give there | ||
26 | a value matching your environment. | ||
27 | |||
28 | If you are adventurous, you can ask the driver to autoprobe | ||
29 | by using the "io=0" parameter, however it is a potentially dangerous | ||
30 | thing to do in a live system. (If you don't know where the | ||
31 | card is located, you can try autoprobing, and after possible | ||
32 | crash recovery, insmod with proper IO-address..) | ||
33 | |||
34 | -------------------------- | ||
35 | (*) "INSMOD-time" means when you load module with | ||
36 | /sbin/insmod you can feed it optional parameters. | ||
37 | See "man insmod". | ||
38 | -------------------------- | ||
39 | |||
40 | |||
41 | 8390 based Network Modules (Paul Gortmaker, Nov 12, 1995) | ||
42 | -------------------------- | ||
43 | |||
44 | (Includes: smc-ultra, ne, wd, 3c503, hp, hp-plus, e2100 and ac3200) | ||
45 | |||
46 | The 8390 series of network drivers now support multiple card systems without | ||
47 | reloading the same module multiple times (memory efficient!) This is done by | ||
48 | specifying multiple comma separated values, such as: | ||
49 | |||
50 | insmod 3c503.o io=0x280,0x300,0x330,0x350 xcvr=0,1,0,1 | ||
51 | |||
52 | The above would have the one module controlling four 3c503 cards, with card 2 | ||
53 | and 4 using external transceivers. The "insmod" manual describes the usage | ||
54 | of comma separated value lists. | ||
55 | |||
56 | It is *STRONGLY RECOMMENDED* that you supply "io=" instead of autoprobing. | ||
57 | If an "io=" argument is not supplied, then the ISA drivers will complain | ||
58 | about autoprobing being not recommended, and begrudgingly autoprobe for | ||
59 | a *SINGLE CARD ONLY* -- if you want to use multiple cards you *have* to | ||
60 | supply an "io=0xNNN,0xQQQ,..." argument. | ||
61 | |||
62 | The ne module is an exception to the above. A NE2000 is essentially an | ||
63 | 8390 chip, some bus glue and some RAM. Because of this, the ne probe is | ||
64 | more invasive than the rest, and so at boot we make sure the ne probe is | ||
65 | done last of all the 8390 cards (so that it won't trip over other 8390 based | ||
66 | cards) With modules we can't ensure that all other non-ne 8390 cards have | ||
67 | already been found. Because of this, the ne module REQUIRES an "io=0xNNN" | ||
68 | argument passed in via insmod. It will refuse to autoprobe. | ||
69 | |||
70 | It is also worth noting that auto-IRQ probably isn't as reliable during | ||
71 | the flurry of interrupt activity on a running machine. Cards such as the | ||
72 | ne2000 that can't get the IRQ setting from an EEPROM or configuration | ||
73 | register are probably best supplied with an "irq=M" argument as well. | ||
74 | |||
75 | |||
76 | ---------------------------------------------------------------------- | ||
77 | Card/Module List - Configurable Parameters and Default Values | ||
78 | ---------------------------------------------------------------------- | ||
79 | |||
80 | 3c501.c: | ||
81 | io = 0x280 IO base address | ||
82 | irq = 5 IRQ | ||
83 | (Probes ports: 0x280, 0x300) | ||
84 | |||
85 | 3c503.c: | ||
86 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
87 | irq = 0 (IRQ software selected by driver using autoIRQ) | ||
88 | xcvr = 0 (Use xcvr=1 to select external transceiver.) | ||
89 | (Probes ports: 0x300, 0x310, 0x330, 0x350, 0x250, 0x280, 0x2A0, 0x2E0) | ||
90 | |||
91 | 3c505.c: | ||
92 | io = 0 | ||
93 | irq = 0 | ||
94 | dma = 6 (not autoprobed) | ||
95 | (Probes ports: 0x300, 0x280, 0x310) | ||
96 | |||
97 | 3c507.c: | ||
98 | io = 0x300 | ||
99 | irq = 0 | ||
100 | (Probes ports: 0x300, 0x320, 0x340, 0x280) | ||
101 | |||
102 | 3c509.c: | ||
103 | io = 0 | ||
104 | irq = 0 | ||
105 | ( Module load-time probing Works reliably only on EISA, ISA ID-PROBE | ||
106 | IS NOT RELIABLE! Compile this driver statically into kernel for | ||
107 | now, if you need it auto-probing on an ISA-bus machine. ) | ||
108 | |||
109 | 8390.c: | ||
110 | (No public options, several other modules need this one) | ||
111 | |||
112 | a2065.c: | ||
113 | Since this is a Zorro board, it supports full autoprobing, even for | ||
114 | multiple boards. (m68k/Amiga) | ||
115 | |||
116 | ac3200.c: | ||
117 | io = 0 (Checks 0x1000 to 0x8fff in 0x1000 intervals) | ||
118 | irq = 0 (Read from config register) | ||
119 | (EISA probing..) | ||
120 | |||
121 | apricot.c: | ||
122 | io = 0x300 (Can't be altered!) | ||
123 | irq = 10 | ||
124 | |||
125 | arcnet.c: | ||
126 | io = 0 | ||
127 | irqnum = 0 | ||
128 | shmem = 0 | ||
129 | num = 0 | ||
130 | DO SET THESE MANUALLY AT INSMOD! | ||
131 | (When probing, looks at the following possible addresses: | ||
132 | Suggested ones: | ||
133 | 0x300, 0x2E0, 0x2F0, 0x2D0 | ||
134 | Other ones: | ||
135 | 0x200, 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x270, | ||
136 | 0x280, 0x290, 0x2A0, 0x2B0, 0x2C0, | ||
137 | 0x310, 0x320, 0x330, 0x340, 0x350, 0x360, 0x370, | ||
138 | 0x380, 0x390, 0x3A0, 0x3E0, 0x3F0 ) | ||
139 | |||
140 | ariadne.c: | ||
141 | Since this is a Zorro board, it supports full autoprobing, even for | ||
142 | multiple boards. (m68k/Amiga) | ||
143 | |||
144 | at1700.c: | ||
145 | io = 0x260 | ||
146 | irq = 0 | ||
147 | (Probes ports: 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300) | ||
148 | |||
149 | atarilance.c: | ||
150 | Supports full autoprobing. (m68k/Atari) | ||
151 | |||
152 | atp.c: *Not modularized* | ||
153 | (Probes ports: 0x378, 0x278, 0x3BC; | ||
154 | fixed IRQs: 5 and 7 ) | ||
155 | |||
156 | cops.c: | ||
157 | io = 0x240 | ||
158 | irq = 5 | ||
159 | nodeid = 0 (AutoSelect = 0, NodeID 1-254 is hand selected.) | ||
160 | (Probes ports: 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260, | ||
161 | 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360) | ||
162 | |||
163 | de4x5.c: | ||
164 | io = 0x000b | ||
165 | irq = 10 | ||
166 | is_not_dec = 0 -- For non-DEC card using DEC 21040/21041/21140 chip, set this to 1 | ||
167 | (EISA, and PCI probing) | ||
168 | |||
169 | de600.c: | ||
170 | de600_debug = 0 | ||
171 | (On port 0x378, irq 7 -- lpt1; compile time configurable) | ||
172 | |||
173 | de620.c: | ||
174 | bnc = 0, utp = 0 <-- Force media by setting either. | ||
175 | io = 0x378 (also compile-time configurable) | ||
176 | irq = 7 | ||
177 | |||
178 | depca.c: | ||
179 | io = 0x200 | ||
180 | irq = 7 | ||
181 | (Probes ports: ISA: 0x300, 0x200; | ||
182 | EISA: 0x0c00 ) | ||
183 | |||
184 | dummy.c: | ||
185 | No options | ||
186 | |||
187 | e2100.c: | ||
188 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
189 | irq = 0 (IRQ software selected by driver) | ||
190 | mem = 0 (Override default shared memory start of 0xd0000) | ||
191 | xcvr = 0 (Use xcvr=1 to select external transceiver.) | ||
192 | (Probes ports: 0x300, 0x280, 0x380, 0x220) | ||
193 | |||
194 | eepro.c: | ||
195 | io = 0x200 | ||
196 | irq = 0 | ||
197 | (Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360) | ||
198 | |||
199 | eexpress.c: | ||
200 | io = 0x300 | ||
201 | irq = 0 (IRQ value read from EEPROM) | ||
202 | (Probes ports: 0x300, 0x270, 0x320, 0x340) | ||
203 | |||
204 | eql.c: | ||
205 | (No parameters) | ||
206 | |||
207 | ewrk3.c: | ||
208 | io = 0x300 | ||
209 | irq = 5 | ||
210 | (With module no autoprobing! | ||
211 | On EISA-bus does EISA probing. | ||
212 | Static linkage probes ports on ISA bus: | ||
213 | 0x100, 0x120, 0x140, 0x160, 0x180, 0x1A0, 0x1C0, | ||
214 | 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, | ||
215 | 0x300, 0x340, 0x360, 0x380, 0x3A0, 0x3C0) | ||
216 | |||
217 | hp-plus.c: | ||
218 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
219 | irq = 0 (IRQ read from configuration register) | ||
220 | (Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340) | ||
221 | |||
222 | hp.c: | ||
223 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
224 | irq = 0 (IRQ software selected by driver using autoIRQ) | ||
225 | (Probes ports: 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240) | ||
226 | |||
227 | hp100.c: | ||
228 | hp100_port = 0 (IO-base address) | ||
229 | (Does EISA-probing, if on EISA-slot; | ||
230 | On ISA-bus probes all ports from 0x100 thru to 0x3E0 | ||
231 | in increments of 0x020) | ||
232 | |||
233 | hydra.c: | ||
234 | Since this is a Zorro board, it supports full autoprobing, even for | ||
235 | multiple boards. (m68k/Amiga) | ||
236 | |||
237 | ibmtr.c: | ||
238 | io = 0xa20, 0xa24 (autoprobed by default) | ||
239 | irq = 0 (driver cannot select irq - read from hardware) | ||
240 | mem = 0 (shared memory base set at 0xd0000 and not yet | ||
241 | able to override thru mem= parameter.) | ||
242 | |||
243 | lance.c: *Not modularized* | ||
244 | (PCI, and ISA probing; "CONFIG_PCI" needed for PCI support) | ||
245 | (Probes ISA ports: 0x300, 0x320, 0x340, 0x360) | ||
246 | |||
247 | loopback.c: *Static kernel component* | ||
248 | |||
249 | ne.c: | ||
250 | io = 0 (Explicitly *requires* an "io=0xNNN" value) | ||
251 | irq = 0 (Tries to determine configured IRQ via autoIRQ) | ||
252 | (Probes ports: 0x300, 0x280, 0x320, 0x340, 0x360) | ||
253 | |||
254 | net_init.c: *Static kernel component* | ||
255 | |||
256 | ni52.c: *Not modularized* | ||
257 | (Probes ports: 0x300, 0x280, 0x360, 0x320, 0x340 | ||
258 | mems: 0xD0000, 0xD2000, 0xC8000, 0xCA000, | ||
259 | 0xD4000, 0xD6000, 0xD8000 ) | ||
260 | |||
261 | ni65.c: *Not modularized* **16MB MEMORY BARRIER BUG** | ||
262 | (Probes ports: 0x300, 0x320, 0x340, 0x360) | ||
263 | |||
264 | pi2.c: *Not modularized* (well, NON-STANDARD modularization!) | ||
265 | Only one card supported at this time. | ||
266 | (Probes ports: 0x380, 0x300, 0x320, 0x340, 0x360, 0x3A0) | ||
267 | |||
268 | plip.c: | ||
269 | io = 0 | ||
270 | irq = 0 (by default, uses IRQ 5 for port at 0x3bc, IRQ 7 | ||
271 | for port at 0x378, and IRQ 2 for port at 0x278) | ||
272 | (Probes ports: 0x278, 0x378, 0x3bc) | ||
273 | |||
274 | ppp.c: | ||
275 | No options (ppp-2.2+ has some, this is based on non-dynamic | ||
276 | version from ppp-2.1.2d) | ||
277 | |||
278 | seeq8005.c: *Not modularized* | ||
279 | (Probes ports: 0x300, 0x320, 0x340, 0x360) | ||
280 | |||
281 | skeleton.c: *Skeleton* | ||
282 | |||
283 | slhc.c: | ||
284 | No configuration parameters | ||
285 | |||
286 | slip.c: | ||
287 | slip_maxdev = 256 (default value from SL_NRUNIT on slip.h) | ||
288 | |||
289 | |||
290 | smc-ultra.c: | ||
291 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
292 | irq = 0 (IRQ val. read from EEPROM) | ||
293 | (Probes ports: 0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380) | ||
294 | |||
295 | tulip.c: *Partial modularization* | ||
296 | (init-time memory allocation makes problems..) | ||
297 | |||
298 | tunnel.c: | ||
299 | No insmod parameters | ||
300 | |||
301 | wavelan.c: | ||
302 | io = 0x390 (Settable, but change not recommended) | ||
303 | irq = 0 (Not honoured, if changed..) | ||
304 | |||
305 | wd.c: | ||
306 | io = 0 (It will complain if you don't supply an "io=0xNNN") | ||
307 | irq = 0 (IRQ val. read from EEPROM, ancient cards use autoIRQ) | ||
308 | mem = 0 (Force shared-memory on address 0xC8000, or whatever..) | ||
309 | mem_end = 0 (Force non-std. mem. size via supplying mem_end val.) | ||
310 | (eg. for 32k WD8003EBT, use mem=0xd0000 mem_end=0xd8000) | ||
311 | (Probes ports: 0x300, 0x280, 0x380, 0x240) | ||
312 | |||
313 | znet.c: *Not modularized* | ||
314 | (Only one device on Zenith Z-Note (notebook?) systems, | ||
315 | configuration information from (EE)PROM) | ||
diff --git a/Documentation/networking/tc-actions-env-rules.txt b/Documentation/networking/tc-actions-env-rules.txt new file mode 100644 index 000000000000..01e716d185f4 --- /dev/null +++ b/Documentation/networking/tc-actions-env-rules.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | |||
2 | The "enviromental" rules for authors of any new tc actions are: | ||
3 | |||
4 | 1) If you stealeth or borroweth any packet thou shalt be branching | ||
5 | from the righteous path and thou shalt cloneth. | ||
6 | |||
7 | For example if your action queues a packet to be processed later | ||
8 | or intentionaly branches by redirecting a packet then you need to | ||
9 | clone the packet. | ||
10 | There are certain fields in the skb tc_verd that need to be reset so we | ||
11 | avoid loops etc. A few are generic enough so much so that skb_act_clone() | ||
12 | resets them for you. So invoke skb_act_clone() rather than skb_clone() | ||
13 | |||
14 | 2) If you munge any packet thou shalt call pskb_expand_head in the case | ||
15 | someone else is referencing the skb. After that you "own" the skb. | ||
16 | You must also tell us if it is ok to munge the packet (TC_OK2MUNGE), | ||
17 | this way any action downstream can stomp on the packet. | ||
18 | |||
19 | 3) dropping packets you dont own is a nono. You simply return | ||
20 | TC_ACT_SHOT to the caller and they will drop it. | ||
21 | |||
22 | The "enviromental" rules for callers of actions (qdiscs etc) are: | ||
23 | |||
24 | *) thou art responsible for freeing anything returned as being | ||
25 | TC_ACT_SHOT/STOLEN/QUEUED. If none of TC_ACT_SHOT/STOLEN/QUEUED is | ||
26 | returned then all is great and you dont need to do anything. | ||
27 | |||
28 | Post on netdev if something is unclear. | ||
29 | |||
diff --git a/Documentation/scsi/link_power_management_policy.txt b/Documentation/scsi/link_power_management_policy.txt new file mode 100644 index 000000000000..d18993d01884 --- /dev/null +++ b/Documentation/scsi/link_power_management_policy.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | This parameter allows the user to set the link (interface) power management. | ||
2 | There are 3 possible options: | ||
3 | |||
4 | Value Effect | ||
5 | ---------------------------------------------------------------------------- | ||
6 | min_power Tell the controller to try to make the link use the | ||
7 | least possible power when possible. This may | ||
8 | sacrifice some performance due to increased latency | ||
9 | when coming out of lower power states. | ||
10 | |||
11 | max_performance Generally, this means no power management. Tell | ||
12 | the controller to have performance be a priority | ||
13 | over power management. | ||
14 | |||
15 | medium_power Tell the controller to enter a lower power state | ||
16 | when possible, but do not enter the lowest power | ||
17 | state, thus improving latency over min_power setting. | ||
18 | |||
19 | |||
diff --git a/MAINTAINERS b/MAINTAINERS index 76b857157866..231bda28c428 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2129,8 +2129,8 @@ S: Maintained | |||
2129 | JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) | 2129 | JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) |
2130 | P: David Woodhouse | 2130 | P: David Woodhouse |
2131 | M: dwmw2@infradead.org | 2131 | M: dwmw2@infradead.org |
2132 | L: jffs-dev@axis.com | 2132 | L: linux-mtd@lists.infradead.org |
2133 | W: http://sources.redhat.com/jffs2/ | 2133 | W: http://www.linux-mtd.infradead.org/doc/jffs2.html |
2134 | S: Maintained | 2134 | S: Maintained |
2135 | 2135 | ||
2136 | JFS FILESYSTEM | 2136 | JFS FILESYSTEM |
@@ -2259,6 +2259,13 @@ L: legousb-devel@lists.sourceforge.net | |||
2259 | W: http://legousb.sourceforge.net/ | 2259 | W: http://legousb.sourceforge.net/ |
2260 | S: Maintained | 2260 | S: Maintained |
2261 | 2261 | ||
2262 | LGUEST | ||
2263 | P: Rusty Russell | ||
2264 | M: rusty@rustcorp.com.au | ||
2265 | L: lguest@ozlabs.org | ||
2266 | W: http://lguest.ozlabs.org/ | ||
2267 | S: Maintained | ||
2268 | |||
2262 | LINUX FOR IBM pSERIES (RS/6000) | 2269 | LINUX FOR IBM pSERIES (RS/6000) |
2263 | P: Paul Mackerras | 2270 | P: Paul Mackerras |
2264 | M: paulus@au.ibm.com | 2271 | M: paulus@au.ibm.com |
@@ -2442,13 +2449,15 @@ W: http://www.tazenda.demon.co.uk/phil/linux-hp | |||
2442 | S: Maintained | 2449 | S: Maintained |
2443 | 2450 | ||
2444 | MAC80211 | 2451 | MAC80211 |
2445 | P: Jiri Benc | ||
2446 | M: jbenc@suse.cz | ||
2447 | P: Michael Wu | 2452 | P: Michael Wu |
2448 | M: flamingice@sourmilk.net | 2453 | M: flamingice@sourmilk.net |
2454 | P: Johannes Berg | ||
2455 | M: johannes@sipsolutions.net | ||
2456 | P: Jiri Benc | ||
2457 | M: jbenc@suse.cz | ||
2449 | L: linux-wireless@vger.kernel.org | 2458 | L: linux-wireless@vger.kernel.org |
2450 | W: http://linuxwireless.org/ | 2459 | W: http://linuxwireless.org/ |
2451 | T: git kernel.org:/pub/scm/linux/kernel/git/jbenc/mac80211.git | 2460 | T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git |
2452 | S: Maintained | 2461 | S: Maintained |
2453 | 2462 | ||
2454 | MACVLAN DRIVER | 2463 | MACVLAN DRIVER |
@@ -4141,6 +4150,12 @@ W: http://linuxtv.org | |||
4141 | T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git | 4150 | T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git |
4142 | S: Maintained | 4151 | S: Maintained |
4143 | 4152 | ||
4153 | VLAN (802.1Q) | ||
4154 | P: Patrick McHardy | ||
4155 | M: kaber@trash.net | ||
4156 | L: netdev@vger.kernel.org | ||
4157 | S: Maintained | ||
4158 | |||
4144 | VT1211 HARDWARE MONITOR DRIVER | 4159 | VT1211 HARDWARE MONITOR DRIVER |
4145 | P: Juerg Haefliger | 4160 | P: Juerg Haefliger |
4146 | M: juergh@gmail.com | 4161 | M: juergh@gmail.com |
@@ -196,6 +196,9 @@ CROSS_COMPILE ?= | |||
196 | UTS_MACHINE := $(ARCH) | 196 | UTS_MACHINE := $(ARCH) |
197 | SRCARCH := $(ARCH) | 197 | SRCARCH := $(ARCH) |
198 | 198 | ||
199 | # for i386 and x86_64 we use SRCARCH equal to x86 | ||
200 | SRCARCH := $(if $(filter x86_64 i386,$(SRCARCH)),x86,$(SRCARCH)) | ||
201 | |||
199 | KCONFIG_CONFIG ?= .config | 202 | KCONFIG_CONFIG ?= .config |
200 | 203 | ||
201 | # SHELL used by kbuild | 204 | # SHELL used by kbuild |
@@ -418,7 +421,7 @@ ifeq ($(config-targets),1) | |||
418 | # Read arch specific Makefile to set KBUILD_DEFCONFIG as needed. | 421 | # Read arch specific Makefile to set KBUILD_DEFCONFIG as needed. |
419 | # KBUILD_DEFCONFIG may point out an alternative default configuration | 422 | # KBUILD_DEFCONFIG may point out an alternative default configuration |
420 | # used for 'make defconfig' | 423 | # used for 'make defconfig' |
421 | include $(srctree)/arch/$(ARCH)/Makefile | 424 | include $(srctree)/arch/$(SRCARCH)/Makefile |
422 | export KBUILD_DEFCONFIG | 425 | export KBUILD_DEFCONFIG |
423 | 426 | ||
424 | config %config: scripts_basic outputmakefile FORCE | 427 | config %config: scripts_basic outputmakefile FORCE |
@@ -497,7 +500,7 @@ else | |||
497 | KBUILD_CFLAGS += -O2 | 500 | KBUILD_CFLAGS += -O2 |
498 | endif | 501 | endif |
499 | 502 | ||
500 | include $(srctree)/arch/$(ARCH)/Makefile | 503 | include $(srctree)/arch/$(SRCARCH)/Makefile |
501 | 504 | ||
502 | ifdef CONFIG_FRAME_POINTER | 505 | ifdef CONFIG_FRAME_POINTER |
503 | KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls | 506 | KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index a16cb03c5291..d6b61d56b656 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/scatterlist.h> | ||
38 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
39 | #include <asm/bfin-global.h> | 40 | #include <asm/bfin-global.h> |
40 | 41 | ||
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 671ce1e8434f..662f7b12d005 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/scatterlist.h> | ||
18 | #include <asm/io.h> | 19 | #include <asm/io.h> |
19 | 20 | ||
20 | void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) | 21 | void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) |
@@ -86,7 +87,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
86 | dampr2 = __get_DAMPR(2); | 87 | dampr2 = __get_DAMPR(2); |
87 | 88 | ||
88 | for (i = 0; i < nents; i++) { | 89 | for (i = 0; i < nents; i++) { |
89 | vaddr = kmap_atomic(sg[i].page, __KM_CACHE); | 90 | vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE); |
90 | 91 | ||
91 | frv_dcache_writeback((unsigned long) vaddr, | 92 | frv_dcache_writeback((unsigned long) vaddr, |
92 | (unsigned long) vaddr + PAGE_SIZE); | 93 | (unsigned long) vaddr + PAGE_SIZE); |
diff --git a/arch/i386/.gitignore b/arch/i386/.gitignore deleted file mode 100644 index 36ef4c374d25..000000000000 --- a/arch/i386/.gitignore +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | boot | ||
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index ef490e1ce600..6f8c080dd9f9 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c | |||
@@ -9,10 +9,10 @@ | |||
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/scatterlist.h> | ||
12 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
13 | 14 | ||
14 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
15 | #include <asm/scatterlist.h> | ||
16 | 16 | ||
17 | void *dma_alloc_coherent(struct device *dev, size_t size, | 17 | void *dma_alloc_coherent(struct device *dev, size_t size, |
18 | dma_addr_t *handle, gfp_t flag) | 18 | dma_addr_t *handle, gfp_t flag) |
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index f2eae457fc9a..f2d432edc92d 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c | |||
@@ -753,7 +753,7 @@ void __init pci_time_init(void) | |||
753 | local_irq_enable(); | 753 | local_irq_enable(); |
754 | } | 754 | } |
755 | 755 | ||
756 | static __inline__ unsigned long do_gettimeoffset(void) | 756 | static inline unsigned long do_gettimeoffset(void) |
757 | { | 757 | { |
758 | /* | 758 | /* |
759 | * We divide all by 100 | 759 | * We divide all by 100 |
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 33f7a3ddb104..77460e316a03 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: process.c,v 1.161 2002/01/23 11:27:32 davem Exp $ | 1 | /* linux/arch/sparc/kernel/process.c |
2 | * linux/arch/sparc/kernel/process.c | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -397,7 +396,7 @@ void flush_thread(void) | |||
397 | } | 396 | } |
398 | } | 397 | } |
399 | 398 | ||
400 | static __inline__ struct sparc_stackf __user * | 399 | static inline struct sparc_stackf __user * |
401 | clone_stackframe(struct sparc_stackf __user *dst, | 400 | clone_stackframe(struct sparc_stackf __user *dst, |
402 | struct sparc_stackf __user *src) | 401 | struct sparc_stackf __user *src) |
403 | { | 402 | { |
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 4bf78a5e8e0f..45cb7c5286d7 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: time.c,v 1.60 2002/01/23 14:33:55 davem Exp $ | 1 | /* linux/arch/sparc/kernel/time.c |
2 | * linux/arch/sparc/kernel/time.c | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) | 4 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) |
6 | * | 5 | * |
7 | * Chris Davis (cdavis@cois.on.ca) 03/27/1998 | 6 | * Chris Davis (cdavis@cois.on.ca) 03/27/1998 |
@@ -210,7 +209,7 @@ static void __devinit kick_start_clock(void) | |||
210 | } | 209 | } |
211 | 210 | ||
212 | /* Return nonzero if the clock chip battery is low. */ | 211 | /* Return nonzero if the clock chip battery is low. */ |
213 | static __inline__ int has_low_battery(void) | 212 | static inline int has_low_battery(void) |
214 | { | 213 | { |
215 | struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs; | 214 | struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs; |
216 | unsigned char data1, data2; | 215 | unsigned char data1, data2; |
@@ -252,7 +251,7 @@ static void __devinit mostek_set_system_time(void) | |||
252 | } | 251 | } |
253 | 252 | ||
254 | /* Probe for the real time clock chip on Sun4 */ | 253 | /* Probe for the real time clock chip on Sun4 */ |
255 | static __inline__ void sun4_clock_probe(void) | 254 | static inline void sun4_clock_probe(void) |
256 | { | 255 | { |
257 | #ifdef CONFIG_SUN4 | 256 | #ifdef CONFIG_SUN4 |
258 | int temp; | 257 | int temp; |
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c index ec4231c2855a..a312d127d47a 100644 --- a/arch/sparc/mm/btfixup.c +++ b/arch/sparc/mm/btfixup.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: btfixup.c,v 1.10 2000/05/09 17:40:13 davem Exp $ | 1 | /* btfixup.c: Boot time code fixup and relocator, so that |
2 | * btfixup.c: Boot time code fixup and relocator, so that | ||
3 | * we can get rid of most indirect calls to achieve single | 2 | * we can get rid of most indirect calls to achieve single |
4 | * image sun4c and srmmu kernel. | 3 | * image sun4c and srmmu kernel. |
5 | * | 4 | * |
@@ -69,7 +68,7 @@ static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, u | |||
69 | } | 68 | } |
70 | } | 69 | } |
71 | #else | 70 | #else |
72 | static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) | 71 | static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) |
73 | { | 72 | { |
74 | *addr = value; | 73 | *addr = value; |
75 | } | 74 | } |
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 1666087c5b80..b86dfce8eee4 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c | |||
@@ -144,7 +144,7 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus | |||
144 | spin_lock_irqsave(&iounit->lock, flags); | 144 | spin_lock_irqsave(&iounit->lock, flags); |
145 | while (sz != 0) { | 145 | while (sz != 0) { |
146 | --sz; | 146 | --sz; |
147 | sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length); | 147 | sg->dvma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); |
148 | sg->dvma_length = sg->length; | 148 | sg->dvma_length = sg->length; |
149 | sg = sg_next(sg); | 149 | sg = sg_next(sg); |
150 | } | 150 | } |
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index a2cc141291c7..0729305f2f59 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: sun4c.c,v 1.212 2001/12/21 04:56:15 davem Exp $ | 1 | /* sun4c.c: Doing in software what should be done in hardware. |
2 | * sun4c.c: Doing in software what should be done in hardware. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
6 | * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au) | 5 | * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au) |
7 | * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org) | 6 | * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org) |
@@ -719,7 +718,7 @@ static void add_ring(struct sun4c_mmu_ring *ring, | |||
719 | ring->num_entries++; | 718 | ring->num_entries++; |
720 | } | 719 | } |
721 | 720 | ||
722 | static __inline__ void add_lru(struct sun4c_mmu_entry *entry) | 721 | static inline void add_lru(struct sun4c_mmu_entry *entry) |
723 | { | 722 | { |
724 | struct sun4c_mmu_ring *ring = &sun4c_ulru_ring; | 723 | struct sun4c_mmu_ring *ring = &sun4c_ulru_ring; |
725 | struct sun4c_mmu_entry *head = &ring->ringhd; | 724 | struct sun4c_mmu_entry *head = &ring->ringhd; |
@@ -746,7 +745,7 @@ static void add_ring_ordered(struct sun4c_mmu_ring *ring, | |||
746 | add_lru(entry); | 745 | add_lru(entry); |
747 | } | 746 | } |
748 | 747 | ||
749 | static __inline__ void remove_ring(struct sun4c_mmu_ring *ring, | 748 | static inline void remove_ring(struct sun4c_mmu_ring *ring, |
750 | struct sun4c_mmu_entry *entry) | 749 | struct sun4c_mmu_entry *entry) |
751 | { | 750 | { |
752 | struct sun4c_mmu_entry *next = entry->next; | 751 | struct sun4c_mmu_entry *next = entry->next; |
@@ -1836,7 +1835,7 @@ static unsigned long sun4c_pte_to_pgoff(pte_t pte) | |||
1836 | } | 1835 | } |
1837 | 1836 | ||
1838 | 1837 | ||
1839 | static __inline__ unsigned long sun4c_pmd_page_v(pmd_t pmd) | 1838 | static inline unsigned long sun4c_pmd_page_v(pmd_t pmd) |
1840 | { | 1839 | { |
1841 | return (pmd_val(pmd) & PAGE_MASK); | 1840 | return (pmd_val(pmd) & PAGE_MASK); |
1842 | } | 1841 | } |
@@ -1922,7 +1921,7 @@ static void sun4c_free_pgd_fast(pgd_t *pgd) | |||
1922 | } | 1921 | } |
1923 | 1922 | ||
1924 | 1923 | ||
1925 | static __inline__ pte_t * | 1924 | static inline pte_t * |
1926 | sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) | 1925 | sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) |
1927 | { | 1926 | { |
1928 | unsigned long *ret; | 1927 | unsigned long *ret; |
@@ -1956,7 +1955,7 @@ static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long addr | |||
1956 | return virt_to_page(pte); | 1955 | return virt_to_page(pte); |
1957 | } | 1956 | } |
1958 | 1957 | ||
1959 | static __inline__ void sun4c_free_pte_fast(pte_t *pte) | 1958 | static inline void sun4c_free_pte_fast(pte_t *pte) |
1960 | { | 1959 | { |
1961 | *(unsigned long *)pte = (unsigned long) pte_quicklist; | 1960 | *(unsigned long *)pte = (unsigned long) pte_quicklist; |
1962 | pte_quicklist = (unsigned long *) pte; | 1961 | pte_quicklist = (unsigned long *) pte; |
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c index 9ad84ff10a17..1587a29a4b0e 100644 --- a/arch/sparc64/kernel/binfmt_elf32.c +++ b/arch/sparc64/kernel/binfmt_elf32.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra. | 2 | * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra. |
3 | * | 3 | * |
4 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) | 5 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) |
6 | */ | 6 | */ |
7 | 7 | ||
@@ -133,7 +133,7 @@ struct elf_prpsinfo32 | |||
133 | 133 | ||
134 | #undef cputime_to_timeval | 134 | #undef cputime_to_timeval |
135 | #define cputime_to_timeval cputime_to_compat_timeval | 135 | #define cputime_to_timeval cputime_to_compat_timeval |
136 | static __inline__ void | 136 | static inline void |
137 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | 137 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) |
138 | { | 138 | { |
139 | unsigned long jiffies = cputime_to_jiffies(cputime); | 139 | unsigned long jiffies = cputime_to_jiffies(cputime); |
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c index 8230099f0d8a..b61b8dfb09cf 100644 --- a/arch/sparc64/kernel/central.c +++ b/arch/sparc64/kernel/central.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: central.c,v 1.15 2001/12/19 00:29:51 davem Exp $ | 1 | /* central.c: Central FHC driver for Sunfire/Starfire/Wildfire. |
2 | * central.c: Central FHC driver for Sunfire/Starfire/Wildfire. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com) | 3 | * Copyright (C) 1997, 1999 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
@@ -385,7 +384,7 @@ void __init central_probe(void) | |||
385 | init_all_fhc_hw(); | 384 | init_all_fhc_hw(); |
386 | } | 385 | } |
387 | 386 | ||
388 | static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on) | 387 | static inline void fhc_ledblink(struct linux_fhc *fhc, int on) |
389 | { | 388 | { |
390 | u32 tmp; | 389 | u32 tmp; |
391 | 390 | ||
@@ -402,7 +401,7 @@ static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on) | |||
402 | upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL); | 401 | upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL); |
403 | } | 402 | } |
404 | 403 | ||
405 | static __inline__ void central_ledblink(struct linux_central *central, int on) | 404 | static inline void central_ledblink(struct linux_central *central, int on) |
406 | { | 405 | { |
407 | u8 tmp; | 406 | u8 tmp; |
408 | 407 | ||
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c index b70324e0d83d..efd5dff85f60 100644 --- a/arch/sparc64/kernel/iommu_common.c +++ b/arch/sparc64/kernel/iommu_common.c | |||
@@ -234,7 +234,7 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents) | |||
234 | dma_sg->dma_length = dent_len; | 234 | dma_sg->dma_length = dent_len; |
235 | 235 | ||
236 | if (dma_sg != sg) { | 236 | if (dma_sg != sg) { |
237 | dma_sg = next_sg(dma_sg); | 237 | dma_sg = sg_next(dma_sg); |
238 | dma_sg->dma_length = 0; | 238 | dma_sg->dma_length = 0; |
239 | } | 239 | } |
240 | 240 | ||
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c index c8313cb60f0a..217478a94128 100644 --- a/arch/sparc64/kernel/ldc.c +++ b/arch/sparc64/kernel/ldc.c | |||
@@ -2121,7 +2121,7 @@ int ldc_map_sg(struct ldc_channel *lp, | |||
2121 | state.nc = 0; | 2121 | state.nc = 0; |
2122 | 2122 | ||
2123 | for (i = 0; i < num_sg; i++) | 2123 | for (i = 0; i < num_sg; i++) |
2124 | fill_cookies(&state, page_to_pfn(sg[i].page) << PAGE_SHIFT, | 2124 | fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT, |
2125 | sg[i].offset, sg[i].length); | 2125 | sg[i].offset, sg[i].length); |
2126 | 2126 | ||
2127 | return state.nc; | 2127 | return state.nc; |
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c index a809e63f03ef..9974a6899551 100644 --- a/arch/sparc64/kernel/semaphore.c +++ b/arch/sparc64/kernel/semaphore.c | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $ | 1 | /* semaphore.c: Sparc64 semaphore implementation. |
2 | * semaphore.c: Sparc64 semaphore implementation. | ||
3 | * | 2 | * |
4 | * This is basically the PPC semaphore scheme ported to use | 3 | * This is basically the PPC semaphore scheme ported to use |
5 | * the sparc64 atomic instructions, so see the PPC code for | 4 | * the sparc64 atomic instructions, so see the PPC code for |
@@ -19,7 +18,7 @@ | |||
19 | * sem->count = tmp; | 18 | * sem->count = tmp; |
20 | * return old_count; | 19 | * return old_count; |
21 | */ | 20 | */ |
22 | static __inline__ int __sem_update_count(struct semaphore *sem, int incr) | 21 | static inline int __sem_update_count(struct semaphore *sem, int incr) |
23 | { | 22 | { |
24 | int old_count, tmp; | 23 | int old_count, tmp; |
25 | 24 | ||
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 407d74a8a542..7cd8d94df0dc 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -459,7 +459,7 @@ again: | |||
459 | } | 459 | } |
460 | } | 460 | } |
461 | 461 | ||
462 | static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 462 | static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
463 | { | 463 | { |
464 | u64 pstate; | 464 | u64 pstate; |
465 | int i; | 465 | int i; |
@@ -906,7 +906,7 @@ extern atomic_t dcpage_flushes; | |||
906 | extern atomic_t dcpage_flushes_xcall; | 906 | extern atomic_t dcpage_flushes_xcall; |
907 | #endif | 907 | #endif |
908 | 908 | ||
909 | static __inline__ void __local_flush_dcache_page(struct page *page) | 909 | static inline void __local_flush_dcache_page(struct page *page) |
910 | { | 910 | { |
911 | #ifdef DCACHE_ALIASING_POSSIBLE | 911 | #ifdef DCACHE_ALIASING_POSSIBLE |
912 | __flush_dcache_page(page_address(page), | 912 | __flush_dcache_page(page_address(page), |
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 560cb1edb1d0..c56573a10eee 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c | |||
@@ -318,7 +318,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | |||
318 | 318 | ||
319 | if (flags & MAP_FIXED) { | 319 | if (flags & MAP_FIXED) { |
320 | /* Ok, don't mess with it. */ | 320 | /* Ok, don't mess with it. */ |
321 | return get_unmapped_area(NULL, addr, len, pgoff, flags); | 321 | return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); |
322 | } | 322 | } |
323 | flags &= ~MAP_SHARED; | 323 | flags &= ~MAP_SHARED; |
324 | 324 | ||
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index e9c7e4f07abf..04998388259f 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $ | 1 | /* arch/sparc64/kernel/traps.c |
2 | * arch/sparc64/kernel/traps.c | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995,1997 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) | 4 | * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -765,7 +764,7 @@ static unsigned long cheetah_afsr_errors; | |||
765 | */ | 764 | */ |
766 | struct cheetah_err_info *cheetah_error_log; | 765 | struct cheetah_err_info *cheetah_error_log; |
767 | 766 | ||
768 | static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) | 767 | static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) |
769 | { | 768 | { |
770 | struct cheetah_err_info *p; | 769 | struct cheetah_err_info *p; |
771 | int cpu = smp_processor_id(); | 770 | int cpu = smp_processor_id(); |
@@ -1085,7 +1084,7 @@ static unsigned char cheetah_mtag_syntab[] = { | |||
1085 | }; | 1084 | }; |
1086 | 1085 | ||
1087 | /* Return the highest priority error conditon mentioned. */ | 1086 | /* Return the highest priority error conditon mentioned. */ |
1088 | static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr) | 1087 | static inline unsigned long cheetah_get_hipri(unsigned long afsr) |
1089 | { | 1088 | { |
1090 | unsigned long tmp = 0; | 1089 | unsigned long tmp = 0; |
1091 | int i; | 1090 | int i; |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 100c4456ed1e..e18ccf85224f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -201,7 +201,7 @@ inline void flush_dcache_page_impl(struct page *page) | |||
201 | #define dcache_dirty_cpu(page) \ | 201 | #define dcache_dirty_cpu(page) \ |
202 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) | 202 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) |
203 | 203 | ||
204 | static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) | 204 | static inline void set_dcache_dirty(struct page *page, int this_cpu) |
205 | { | 205 | { |
206 | unsigned long mask = this_cpu; | 206 | unsigned long mask = this_cpu; |
207 | unsigned long non_cpu_bits; | 207 | unsigned long non_cpu_bits; |
@@ -223,7 +223,7 @@ static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) | |||
223 | : "g1", "g7"); | 223 | : "g1", "g7"); |
224 | } | 224 | } |
225 | 225 | ||
226 | static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) | 226 | static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) |
227 | { | 227 | { |
228 | unsigned long mask = (1UL << PG_dcache_dirty); | 228 | unsigned long mask = (1UL << PG_dcache_dirty); |
229 | 229 | ||
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index 3fafa9a8b50b..e1c3fc87484d 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c | |||
@@ -1,8 +1,7 @@ | |||
1 | /* $Id: console.c,v 1.9 1997/10/29 07:41:43 ecd Exp $ | 1 | /* console.c: Routines that deal with sending and receiving IO |
2 | * console.c: Routines that deal with sending and receiving IO | ||
3 | * to/from the current console device using the PROM. | 2 | * to/from the current console device using the PROM. |
4 | * | 3 | * |
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 4 | * Copyright (C) 1995 David S. Miller (davem@davemloft.net) |
6 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 5 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
7 | */ | 6 | */ |
8 | 7 | ||
@@ -19,7 +18,7 @@ extern int prom_stdin, prom_stdout; | |||
19 | /* Non blocking get character from console input device, returns -1 | 18 | /* Non blocking get character from console input device, returns -1 |
20 | * if no input was taken. This can be used for polling. | 19 | * if no input was taken. This can be used for polling. |
21 | */ | 20 | */ |
22 | __inline__ int | 21 | inline int |
23 | prom_nbgetchar(void) | 22 | prom_nbgetchar(void) |
24 | { | 23 | { |
25 | char inc; | 24 | char inc; |
@@ -35,7 +34,7 @@ prom_nbgetchar(void) | |||
35 | /* Non blocking put character to console device, returns -1 if | 34 | /* Non blocking put character to console device, returns -1 if |
36 | * unsuccessful. | 35 | * unsuccessful. |
37 | */ | 36 | */ |
38 | __inline__ int | 37 | inline int |
39 | prom_nbputchar(char c) | 38 | prom_nbputchar(char c) |
40 | { | 39 | { |
41 | char outc; | 40 | char outc; |
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index b2c5b12c9818..a99ccd7fb1b0 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c | |||
@@ -18,14 +18,12 @@ | |||
18 | /* Return the child of node 'node' or zero if no this node has no | 18 | /* Return the child of node 'node' or zero if no this node has no |
19 | * direct descendent. | 19 | * direct descendent. |
20 | */ | 20 | */ |
21 | __inline__ int | 21 | inline int __prom_getchild(int node) |
22 | __prom_getchild(int node) | ||
23 | { | 22 | { |
24 | return p1275_cmd ("child", P1275_INOUT(1, 1), node); | 23 | return p1275_cmd ("child", P1275_INOUT(1, 1), node); |
25 | } | 24 | } |
26 | 25 | ||
27 | __inline__ int | 26 | inline int prom_getchild(int node) |
28 | prom_getchild(int node) | ||
29 | { | 27 | { |
30 | int cnode; | 28 | int cnode; |
31 | 29 | ||
@@ -35,8 +33,7 @@ prom_getchild(int node) | |||
35 | return (int)cnode; | 33 | return (int)cnode; |
36 | } | 34 | } |
37 | 35 | ||
38 | __inline__ int | 36 | inline int prom_getparent(int node) |
39 | prom_getparent(int node) | ||
40 | { | 37 | { |
41 | int cnode; | 38 | int cnode; |
42 | 39 | ||
@@ -49,14 +46,12 @@ prom_getparent(int node) | |||
49 | /* Return the next sibling of node 'node' or zero if no more siblings | 46 | /* Return the next sibling of node 'node' or zero if no more siblings |
50 | * at this level of depth in the tree. | 47 | * at this level of depth in the tree. |
51 | */ | 48 | */ |
52 | __inline__ int | 49 | inline int __prom_getsibling(int node) |
53 | __prom_getsibling(int node) | ||
54 | { | 50 | { |
55 | return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); | 51 | return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); |
56 | } | 52 | } |
57 | 53 | ||
58 | __inline__ int | 54 | inline int prom_getsibling(int node) |
59 | prom_getsibling(int node) | ||
60 | { | 55 | { |
61 | int sibnode; | 56 | int sibnode; |
62 | 57 | ||
@@ -72,8 +67,7 @@ prom_getsibling(int node) | |||
72 | /* Return the length in bytes of property 'prop' at node 'node'. | 67 | /* Return the length in bytes of property 'prop' at node 'node'. |
73 | * Return -1 on error. | 68 | * Return -1 on error. |
74 | */ | 69 | */ |
75 | __inline__ int | 70 | inline int prom_getproplen(int node, const char *prop) |
76 | prom_getproplen(int node, const char *prop) | ||
77 | { | 71 | { |
78 | if((!node) || (!prop)) return -1; | 72 | if((!node) || (!prop)) return -1; |
79 | return p1275_cmd ("getproplen", | 73 | return p1275_cmd ("getproplen", |
@@ -86,8 +80,8 @@ prom_getproplen(int node, const char *prop) | |||
86 | * 'buffer' which has a size of 'bufsize'. If the acquisition | 80 | * 'buffer' which has a size of 'bufsize'. If the acquisition |
87 | * was successful the length will be returned, else -1 is returned. | 81 | * was successful the length will be returned, else -1 is returned. |
88 | */ | 82 | */ |
89 | __inline__ int | 83 | inline int prom_getproperty(int node, const char *prop, |
90 | prom_getproperty(int node, const char *prop, char *buffer, int bufsize) | 84 | char *buffer, int bufsize) |
91 | { | 85 | { |
92 | int plen; | 86 | int plen; |
93 | 87 | ||
@@ -107,8 +101,7 @@ prom_getproperty(int node, const char *prop, char *buffer, int bufsize) | |||
107 | /* Acquire an integer property and return its value. Returns -1 | 101 | /* Acquire an integer property and return its value. Returns -1 |
108 | * on failure. | 102 | * on failure. |
109 | */ | 103 | */ |
110 | __inline__ int | 104 | inline int prom_getint(int node, const char *prop) |
111 | prom_getint(int node, const char *prop) | ||
112 | { | 105 | { |
113 | int intprop; | 106 | int intprop; |
114 | 107 | ||
@@ -122,8 +115,7 @@ prom_getint(int node, const char *prop) | |||
122 | * integer. | 115 | * integer. |
123 | */ | 116 | */ |
124 | 117 | ||
125 | int | 118 | int prom_getintdefault(int node, const char *property, int deflt) |
126 | prom_getintdefault(int node, const char *property, int deflt) | ||
127 | { | 119 | { |
128 | int retval; | 120 | int retval; |
129 | 121 | ||
@@ -134,8 +126,7 @@ prom_getintdefault(int node, const char *property, int deflt) | |||
134 | } | 126 | } |
135 | 127 | ||
136 | /* Acquire a boolean property, 1=TRUE 0=FALSE. */ | 128 | /* Acquire a boolean property, 1=TRUE 0=FALSE. */ |
137 | int | 129 | int prom_getbool(int node, const char *prop) |
138 | prom_getbool(int node, const char *prop) | ||
139 | { | 130 | { |
140 | int retval; | 131 | int retval; |
141 | 132 | ||
@@ -148,8 +139,7 @@ prom_getbool(int node, const char *prop) | |||
148 | * string on error. The char pointer is the user supplied string | 139 | * string on error. The char pointer is the user supplied string |
149 | * buffer. | 140 | * buffer. |
150 | */ | 141 | */ |
151 | void | 142 | void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) |
152 | prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) | ||
153 | { | 143 | { |
154 | int len; | 144 | int len; |
155 | 145 | ||
@@ -163,8 +153,7 @@ prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) | |||
163 | /* Does the device at node 'node' have name 'name'? | 153 | /* Does the device at node 'node' have name 'name'? |
164 | * YES = 1 NO = 0 | 154 | * YES = 1 NO = 0 |
165 | */ | 155 | */ |
166 | int | 156 | int prom_nodematch(int node, const char *name) |
167 | prom_nodematch(int node, const char *name) | ||
168 | { | 157 | { |
169 | char namebuf[128]; | 158 | char namebuf[128]; |
170 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); | 159 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); |
@@ -175,8 +164,7 @@ prom_nodematch(int node, const char *name) | |||
175 | /* Search siblings at 'node_start' for a node with name | 164 | /* Search siblings at 'node_start' for a node with name |
176 | * 'nodename'. Return node if successful, zero if not. | 165 | * 'nodename'. Return node if successful, zero if not. |
177 | */ | 166 | */ |
178 | int | 167 | int prom_searchsiblings(int node_start, const char *nodename) |
179 | prom_searchsiblings(int node_start, const char *nodename) | ||
180 | { | 168 | { |
181 | 169 | ||
182 | int thisnode, error; | 170 | int thisnode, error; |
@@ -197,8 +185,7 @@ prom_searchsiblings(int node_start, const char *nodename) | |||
197 | /* Return the first property type for node 'node'. | 185 | /* Return the first property type for node 'node'. |
198 | * buffer should be at least 32B in length | 186 | * buffer should be at least 32B in length |
199 | */ | 187 | */ |
200 | __inline__ char * | 188 | inline char *prom_firstprop(int node, char *buffer) |
201 | prom_firstprop(int node, char *buffer) | ||
202 | { | 189 | { |
203 | *buffer = 0; | 190 | *buffer = 0; |
204 | if(node == -1) return buffer; | 191 | if(node == -1) return buffer; |
@@ -212,8 +199,7 @@ prom_firstprop(int node, char *buffer) | |||
212 | * at node 'node' . Returns NULL string if no more | 199 | * at node 'node' . Returns NULL string if no more |
213 | * property types for this node. | 200 | * property types for this node. |
214 | */ | 201 | */ |
215 | __inline__ char * | 202 | inline char *prom_nextprop(int node, const char *oprop, char *buffer) |
216 | prom_nextprop(int node, const char *oprop, char *buffer) | ||
217 | { | 203 | { |
218 | char buf[32]; | 204 | char buf[32]; |
219 | 205 | ||
@@ -279,8 +265,7 @@ prom_setprop(int node, const char *pname, char *value, int size) | |||
279 | node, pname, value, P1275_SIZE(size)); | 265 | node, pname, value, P1275_SIZE(size)); |
280 | } | 266 | } |
281 | 267 | ||
282 | __inline__ int | 268 | inline int prom_inst2pkg(int inst) |
283 | prom_inst2pkg(int inst) | ||
284 | { | 269 | { |
285 | int node; | 270 | int node; |
286 | 271 | ||
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index 9876d80d85dd..e0ac74e5d4c4 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -1,6 +1,6 @@ | |||
1 | menu "Host processor type and features" | 1 | menu "Host processor type and features" |
2 | 2 | ||
3 | source "arch/i386/Kconfig.cpu" | 3 | source "arch/x86/Kconfig.cpu" |
4 | 4 | ||
5 | endmenu | 5 | endmenu |
6 | 6 | ||
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386 index 0178df306939..b01dfb00e5f8 100644 --- a/arch/um/Makefile-i386 +++ b/arch/um/Makefile-i386 | |||
@@ -9,6 +9,7 @@ ELF_ARCH := $(SUBARCH) | |||
9 | ELF_FORMAT := elf32-$(SUBARCH) | 9 | ELF_FORMAT := elf32-$(SUBARCH) |
10 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 10 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
11 | HEADER_ARCH := x86 | 11 | HEADER_ARCH := x86 |
12 | CHECKFLAGS += -D__i386__ | ||
12 | 13 | ||
13 | ifeq ("$(origin SUBARCH)", "command line") | 14 | ifeq ("$(origin SUBARCH)", "command line") |
14 | ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)") | 15 | ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)") |
@@ -26,10 +27,8 @@ AFLAGS += -DCONFIG_X86_32 | |||
26 | CONFIG_X86_32 := y | 27 | CONFIG_X86_32 := y |
27 | export CONFIG_X86_32 | 28 | export CONFIG_X86_32 |
28 | 29 | ||
29 | ARCH_KERNEL_DEFINES += -U__$(SUBARCH)__ -U$(SUBARCH) | ||
30 | |||
31 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | 30 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. |
32 | include $(srctree)/arch/i386/Makefile.cpu | 31 | include $(srctree)/arch/x86/Makefile_32.cpu |
33 | 32 | ||
34 | # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. | 33 | # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. |
35 | cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) | 34 | cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) |
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64 index fe5316f0c6a5..8ed362f93582 100644 --- a/arch/um/Makefile-x86_64 +++ b/arch/um/Makefile-x86_64 | |||
@@ -6,12 +6,9 @@ START := 0x60000000 | |||
6 | 6 | ||
7 | _extra_flags_ = -fno-builtin -m64 | 7 | _extra_flags_ = -fno-builtin -m64 |
8 | 8 | ||
9 | #We #undef __x86_64__ for kernelspace, not for userspace where | ||
10 | #it's needed for headers to work! | ||
11 | ARCH_KERNEL_DEFINES = -U__$(SUBARCH)__ | ||
12 | KBUILD_CFLAGS += $(_extra_flags_) | 9 | KBUILD_CFLAGS += $(_extra_flags_) |
13 | 10 | ||
14 | CHECKFLAGS += -m64 | 11 | CHECKFLAGS += -m64 -D__x86_64__ |
15 | KBUILD_AFLAGS += -m64 | 12 | KBUILD_AFLAGS += -m64 |
16 | LDFLAGS += -m elf_x86_64 | 13 | LDFLAGS += -m elf_x86_64 |
17 | KBUILD_CPPFLAGS += -m64 | 14 | KBUILD_CPPFLAGS += -m64 |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 3a8cd3dfb51c..e184b44b1011 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include "linux/genhd.h" | 35 | #include "linux/genhd.h" |
36 | #include "linux/spinlock.h" | 36 | #include "linux/spinlock.h" |
37 | #include "linux/platform_device.h" | 37 | #include "linux/platform_device.h" |
38 | #include "linux/scatterlist.h" | ||
38 | #include "asm/segment.h" | 39 | #include "asm/segment.h" |
39 | #include "asm/uaccess.h" | 40 | #include "asm/uaccess.h" |
40 | #include "asm/irq.h" | 41 | #include "asm/irq.h" |
@@ -704,6 +705,7 @@ static int ubd_add(int n, char **error_out) | |||
704 | ubd_dev->size = ROUND_BLOCK(ubd_dev->size); | 705 | ubd_dev->size = ROUND_BLOCK(ubd_dev->size); |
705 | 706 | ||
706 | INIT_LIST_HEAD(&ubd_dev->restart); | 707 | INIT_LIST_HEAD(&ubd_dev->restart); |
708 | sg_init_table(&ubd_dev->sg, MAX_SG); | ||
707 | 709 | ||
708 | err = -ENOMEM; | 710 | err = -ENOMEM; |
709 | ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); | 711 | ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock); |
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 8456397f5f4d..59822dee438a 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c | |||
@@ -165,7 +165,7 @@ static void __init kmap_init(void) | |||
165 | kmap_prot = PAGE_KERNEL; | 165 | kmap_prot = PAGE_KERNEL; |
166 | } | 166 | } |
167 | 167 | ||
168 | static void init_highmem(void) | 168 | static void __init init_highmem(void) |
169 | { | 169 | { |
170 | pgd_t *pgd; | 170 | pgd_t *pgd; |
171 | pud_t *pud; | 171 | pud_t *pud; |
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index 9657c89fdf31..bd3da8a61f64 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c | |||
@@ -155,7 +155,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
155 | if (err) | 155 | if (err) |
156 | return err; | 156 | return err; |
157 | 157 | ||
158 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 158 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
159 | if(n > 0) | 159 | if(n > 0) |
160 | return -EFAULT; | 160 | return -EFAULT; |
161 | 161 | ||
@@ -168,7 +168,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
168 | long fpregs[HOST_FP_SIZE]; | 168 | long fpregs[HOST_FP_SIZE]; |
169 | 169 | ||
170 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 170 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
171 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 171 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
172 | if (n > 0) | 172 | if (n > 0) |
173 | return -EFAULT; | 173 | return -EFAULT; |
174 | 174 | ||
@@ -185,7 +185,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | |||
185 | if (err) | 185 | if (err) |
186 | return err; | 186 | return err; |
187 | 187 | ||
188 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 188 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
189 | if(n > 0) | 189 | if(n > 0) |
190 | return -EFAULT; | 190 | return -EFAULT; |
191 | 191 | ||
@@ -198,7 +198,7 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | |||
198 | long fpregs[HOST_XFP_SIZE]; | 198 | long fpregs[HOST_XFP_SIZE]; |
199 | 199 | ||
200 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 200 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
201 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 201 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
202 | if (n > 0) | 202 | if (n > 0) |
203 | return -EFAULT; | 203 | return -EFAULT; |
204 | 204 | ||
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index a3cfeed17af4..b7631b0e9ddc 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c | |||
@@ -154,7 +154,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
154 | if (err) | 154 | if (err) |
155 | return err; | 155 | return err; |
156 | 156 | ||
157 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 157 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
158 | if(n > 0) | 158 | if(n > 0) |
159 | return -EFAULT; | 159 | return -EFAULT; |
160 | 160 | ||
@@ -167,7 +167,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
167 | long fpregs[HOST_FP_SIZE]; | 167 | long fpregs[HOST_FP_SIZE]; |
168 | 168 | ||
169 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 169 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
170 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 170 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
171 | if (n > 0) | 171 | if (n > 0) |
172 | return -EFAULT; | 172 | return -EFAULT; |
173 | 173 | ||
diff --git a/arch/i386/Kconfig.cpu b/arch/x86/Kconfig.cpu index 0e2adadf5905..0e2adadf5905 100644 --- a/arch/i386/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
diff --git a/arch/i386/Kconfig.debug b/arch/x86/Kconfig.debug index f03531eacdfb..970b2defe7df 100644 --- a/arch/i386/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
@@ -1,14 +1,14 @@ | |||
1 | menu "Kernel hacking" | 1 | menu "Kernel hacking" |
2 | 2 | ||
3 | config TRACE_IRQFLAGS_SUPPORT | 3 | config TRACE_IRQFLAGS_SUPPORT |
4 | bool | 4 | def_bool y |
5 | default y | ||
6 | 5 | ||
7 | source "lib/Kconfig.debug" | 6 | source "lib/Kconfig.debug" |
8 | 7 | ||
9 | config EARLY_PRINTK | 8 | config EARLY_PRINTK |
10 | bool "Early printk" if EMBEDDED && DEBUG_KERNEL | 9 | bool "Early printk" if EMBEDDED && DEBUG_KERNEL |
11 | default y | 10 | default y |
11 | depends on X86_32 | ||
12 | help | 12 | help |
13 | Write kernel log output directly into the VGA buffer or to a serial | 13 | Write kernel log output directly into the VGA buffer or to a serial |
14 | port. | 14 | port. |
@@ -37,10 +37,12 @@ config DEBUG_STACK_USAGE | |||
37 | 37 | ||
38 | comment "Page alloc debug is incompatible with Software Suspend on i386" | 38 | comment "Page alloc debug is incompatible with Software Suspend on i386" |
39 | depends on DEBUG_KERNEL && HIBERNATION | 39 | depends on DEBUG_KERNEL && HIBERNATION |
40 | depends on X86_32 | ||
40 | 41 | ||
41 | config DEBUG_PAGEALLOC | 42 | config DEBUG_PAGEALLOC |
42 | bool "Debug page memory allocations" | 43 | bool "Debug page memory allocations" |
43 | depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS | 44 | depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS |
45 | depends on X86_32 | ||
44 | help | 46 | help |
45 | Unmap pages from the kernel linear mapping after free_pages(). | 47 | Unmap pages from the kernel linear mapping after free_pages(). |
46 | This results in a large slowdown, but helps to find certain types | 48 | This results in a large slowdown, but helps to find certain types |
@@ -59,6 +61,7 @@ config DEBUG_RODATA | |||
59 | config 4KSTACKS | 61 | config 4KSTACKS |
60 | bool "Use 4Kb for kernel stacks instead of 8Kb" | 62 | bool "Use 4Kb for kernel stacks instead of 8Kb" |
61 | depends on DEBUG_KERNEL | 63 | depends on DEBUG_KERNEL |
64 | depends on X86_32 | ||
62 | help | 65 | help |
63 | If you say Y here the kernel will use a 4Kb stacksize for the | 66 | If you say Y here the kernel will use a 4Kb stacksize for the |
64 | kernel stack attached to each process/thread. This facilitates | 67 | kernel stack attached to each process/thread. This facilitates |
@@ -67,22 +70,50 @@ config 4KSTACKS | |||
67 | will also use IRQ stacks to compensate for the reduced stackspace. | 70 | will also use IRQ stacks to compensate for the reduced stackspace. |
68 | 71 | ||
69 | config X86_FIND_SMP_CONFIG | 72 | config X86_FIND_SMP_CONFIG |
70 | bool | 73 | def_bool y |
71 | depends on X86_LOCAL_APIC || X86_VOYAGER | 74 | depends on X86_LOCAL_APIC || X86_VOYAGER |
72 | default y | 75 | depends on X86_32 |
73 | 76 | ||
74 | config X86_MPPARSE | 77 | config X86_MPPARSE |
75 | bool | 78 | def_bool y |
76 | depends on X86_LOCAL_APIC && !X86_VISWS | 79 | depends on X86_LOCAL_APIC && !X86_VISWS |
77 | default y | 80 | depends on X86_32 |
78 | 81 | ||
79 | config DOUBLEFAULT | 82 | config DOUBLEFAULT |
80 | default y | 83 | default y |
81 | bool "Enable doublefault exception handler" if EMBEDDED | 84 | bool "Enable doublefault exception handler" if EMBEDDED |
85 | depends on X86_32 | ||
86 | help | ||
87 | This option allows trapping of rare doublefault exceptions that | ||
88 | would otherwise cause a system to silently reboot. Disabling this | ||
89 | option saves about 4k and might cause you much additional grey | ||
90 | hair. | ||
91 | |||
92 | config IOMMU_DEBUG | ||
93 | bool "Enable IOMMU debugging" | ||
94 | depends on IOMMU && DEBUG_KERNEL | ||
95 | depends on X86_64 | ||
82 | help | 96 | help |
83 | This option allows trapping of rare doublefault exceptions that | 97 | Force the IOMMU to on even when you have less than 4GB of |
84 | would otherwise cause a system to silently reboot. Disabling this | 98 | memory and add debugging code. On overflow always panic. And |
85 | option saves about 4k and might cause you much additional grey | 99 | allow to enable IOMMU leak tracing. Can be disabled at boot |
86 | hair. | 100 | time with iommu=noforce. This will also enable scatter gather |
101 | list merging. Currently not recommended for production | ||
102 | code. When you use it make sure you have a big enough | ||
103 | IOMMU/AGP aperture. Most of the options enabled by this can | ||
104 | be set more finegrained using the iommu= command line | ||
105 | options. See Documentation/x86_64/boot-options.txt for more | ||
106 | details. | ||
107 | |||
108 | config IOMMU_LEAK | ||
109 | bool "IOMMU leak tracing" | ||
110 | depends on DEBUG_KERNEL | ||
111 | depends on IOMMU_DEBUG | ||
112 | help | ||
113 | Add a simple leak tracer to the IOMMU code. This is useful when you | ||
114 | are debugging a buggy device driver that leaks IOMMU mappings. | ||
115 | |||
116 | #config X86_REMOTE_DEBUG | ||
117 | # bool "kgdb debugging stub" | ||
87 | 118 | ||
88 | endmenu | 119 | endmenu |
diff --git a/arch/i386/Kconfig b/arch/x86/Kconfig.i386 index b4437ce0f973..7331efe891a7 100644 --- a/arch/i386/Kconfig +++ b/arch/x86/Kconfig.i386 | |||
@@ -287,7 +287,7 @@ config ES7000_CLUSTERED_APIC | |||
287 | default y | 287 | default y |
288 | depends on SMP && X86_ES7000 && MPENTIUMIII | 288 | depends on SMP && X86_ES7000 && MPENTIUMIII |
289 | 289 | ||
290 | source "arch/i386/Kconfig.cpu" | 290 | source "arch/x86/Kconfig.cpu" |
291 | 291 | ||
292 | config HPET_TIMER | 292 | config HPET_TIMER |
293 | bool "HPET Timer Support" | 293 | bool "HPET Timer Support" |
@@ -1272,7 +1272,7 @@ source "fs/Kconfig" | |||
1272 | 1272 | ||
1273 | source "kernel/Kconfig.instrumentation" | 1273 | source "kernel/Kconfig.instrumentation" |
1274 | 1274 | ||
1275 | source "arch/i386/Kconfig.debug" | 1275 | source "arch/x86/Kconfig.debug" |
1276 | 1276 | ||
1277 | source "security/Kconfig" | 1277 | source "security/Kconfig" |
1278 | 1278 | ||
diff --git a/arch/x86_64/Kconfig b/arch/x86/Kconfig.x86_64 index 308970aa5382..e2542e5b536c 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86/Kconfig.x86_64 | |||
@@ -835,7 +835,7 @@ source fs/Kconfig | |||
835 | 835 | ||
836 | source "kernel/Kconfig.instrumentation" | 836 | source "kernel/Kconfig.instrumentation" |
837 | 837 | ||
838 | source "arch/x86_64/Kconfig.debug" | 838 | source "arch/x86/Kconfig.debug" |
839 | 839 | ||
840 | source "security/Kconfig" | 840 | source "security/Kconfig" |
841 | 841 | ||
diff --git a/arch/x86/Makefile b/arch/x86/Makefile new file mode 100644 index 000000000000..309597386a77 --- /dev/null +++ b/arch/x86/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # Unified Makefile for i386 and x86_64 | ||
2 | |||
3 | # select defconfig based on actual architecture | ||
4 | KBUILD_DEFCONFIG := $(ARCH)_defconfig | ||
5 | |||
6 | # # No need to remake these files | ||
7 | $(srctree)/arch/x86/Makefile%: ; | ||
8 | |||
9 | ifeq ($(ARCH),i386) | ||
10 | include $(srctree)/arch/x86/Makefile_32 | ||
11 | else | ||
12 | include $(srctree)/arch/x86/Makefile_64 | ||
13 | endif | ||
14 | |||
15 | |||
16 | |||
diff --git a/arch/i386/Makefile b/arch/x86/Makefile_32 index f5b9a37def8b..346ac0766875 100644 --- a/arch/i386/Makefile +++ b/arch/x86/Makefile_32 | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # i386/Makefile | 2 | # i386 Makefile |
3 | # | 3 | # |
4 | # This file is included by the global makefile so that you can add your own | 4 | # This file is included by the global makefile so that you can add your own |
5 | # architecture-specific flags and dependencies. Remember to do have actions | 5 | # architecture-specific flags and dependencies. Remember to do have actions |
@@ -17,9 +17,6 @@ | |||
17 | # 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net> | 17 | # 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net> |
18 | # Added support for GEODE CPU | 18 | # Added support for GEODE CPU |
19 | 19 | ||
20 | # Fill in SRCARCH | ||
21 | SRCARCH := x86 | ||
22 | |||
23 | # BITS is used as extension for files which are available in a 32 bit | 20 | # BITS is used as extension for files which are available in a 32 bit |
24 | # and a 64 bit version to simplify shared Makefiles. | 21 | # and a 64 bit version to simplify shared Makefiles. |
25 | # e.g.: obj-y += foo_$(BITS).o | 22 | # e.g.: obj-y += foo_$(BITS).o |
@@ -46,7 +43,7 @@ KBUILD_CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return | |||
46 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) | 43 | KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) |
47 | 44 | ||
48 | # CPU-specific tuning. Anything which can be shared with UML should go here. | 45 | # CPU-specific tuning. Anything which can be shared with UML should go here. |
49 | include $(srctree)/arch/i386/Makefile.cpu | 46 | include $(srctree)/arch/x86/Makefile_32.cpu |
50 | 47 | ||
51 | # temporary until string.h is fixed | 48 | # temporary until string.h is fixed |
52 | cflags-y += -ffreestanding | 49 | cflags-y += -ffreestanding |
diff --git a/arch/i386/Makefile.cpu b/arch/x86/Makefile_32.cpu index e372b584e919..e372b584e919 100644 --- a/arch/i386/Makefile.cpu +++ b/arch/x86/Makefile_32.cpu | |||
diff --git a/arch/x86_64/Makefile b/arch/x86/Makefile_64 index 20eb69bd5a6d..57e714a47af7 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86/Makefile_64 | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # x86_64/Makefile | 2 | # x86_64 Makefile |
3 | # | 3 | # |
4 | # This file is included by the global makefile so that you can add your own | 4 | # This file is included by the global makefile so that you can add your own |
5 | # architecture-specific flags and dependencies. Remember to do have actions | 5 | # architecture-specific flags and dependencies. Remember to do have actions |
@@ -21,9 +21,6 @@ | |||
21 | # | 21 | # |
22 | # $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $ | 22 | # $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $ |
23 | 23 | ||
24 | # Fill in SRCARCH | ||
25 | SRCARCH := x86 | ||
26 | |||
27 | # BITS is used as extension for files which are available in a 32 bit | 24 | # BITS is used as extension for files which are available in a 32 bit |
28 | # and a 64 bit version to simplify shared Makefiles. | 25 | # and a 64 bit version to simplify shared Makefiles. |
29 | # e.g.: obj-y += foo_$(BITS).o | 26 | # e.g.: obj-y += foo_$(BITS).o |
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 5f9a2e72a731..d2b5adf46512 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #ifndef BOOT_BOOT_H | 17 | #ifndef BOOT_BOOT_H |
18 | #define BOOT_BOOT_H | 18 | #define BOOT_BOOT_H |
19 | 19 | ||
20 | #define STACK_SIZE 512 /* Minimum number of bytes for stack */ | ||
21 | |||
20 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
21 | 23 | ||
22 | #include <stdarg.h> | 24 | #include <stdarg.h> |
@@ -198,8 +200,6 @@ static inline int isdigit(int ch) | |||
198 | } | 200 | } |
199 | 201 | ||
200 | /* Heap -- available for dynamic lists. */ | 202 | /* Heap -- available for dynamic lists. */ |
201 | #define STACK_SIZE 512 /* Minimum number of bytes for stack */ | ||
202 | |||
203 | extern char _end[]; | 203 | extern char _end[]; |
204 | extern char *HEAP; | 204 | extern char *HEAP; |
205 | extern char *heap_end; | 205 | extern char *heap_end; |
@@ -216,9 +216,9 @@ static inline char *__get_heap(size_t s, size_t a, size_t n) | |||
216 | #define GET_HEAP(type, n) \ | 216 | #define GET_HEAP(type, n) \ |
217 | ((type *)__get_heap(sizeof(type),__alignof__(type),(n))) | 217 | ((type *)__get_heap(sizeof(type),__alignof__(type),(n))) |
218 | 218 | ||
219 | static inline int heap_free(void) | 219 | static inline bool heap_free(size_t n) |
220 | { | 220 | { |
221 | return heap_end-HEAP; | 221 | return (int)(heap_end-HEAP) >= (int)n; |
222 | } | 222 | } |
223 | 223 | ||
224 | /* copy.S */ | 224 | /* copy.S */ |
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index a0ae2e7f6cec..036e635f18a3 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -33,24 +33,20 @@ | |||
33 | .globl startup_32 | 33 | .globl startup_32 |
34 | 34 | ||
35 | startup_32: | 35 | startup_32: |
36 | /* check to see if KEEP_SEGMENTS flag is meaningful */ | 36 | cld |
37 | cmpw $0x207, BP_version(%esi) | ||
38 | jb 1f | ||
39 | |||
40 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | 37 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
41 | * us to not reload segments */ | 38 | * us to not reload segments */ |
42 | testb $(1<<6), BP_loadflags(%esi) | 39 | testb $(1<<6), BP_loadflags(%esi) |
43 | jnz 2f | 40 | jnz 1f |
44 | 41 | ||
45 | 1: cli | 42 | cli |
46 | movl $(__BOOT_DS),%eax | 43 | movl $(__BOOT_DS),%eax |
47 | movl %eax,%ds | 44 | movl %eax,%ds |
48 | movl %eax,%es | 45 | movl %eax,%es |
49 | movl %eax,%fs | 46 | movl %eax,%fs |
50 | movl %eax,%gs | 47 | movl %eax,%gs |
51 | movl %eax,%ss | 48 | movl %eax,%ss |
52 | 49 | 1: | |
53 | 2: cld | ||
54 | 50 | ||
55 | /* Calculate the delta between where we were compiled to run | 51 | /* Calculate the delta between where we were compiled to run |
56 | * at and where we were actually loaded at. This can only be done | 52 | * at and where we were actually loaded at. This can only be done |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 49467640751f..1ccb38a7f0d2 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
31 | #include <asm/msr.h> | 31 | #include <asm/msr.h> |
32 | #include <asm/asm-offsets.h> | ||
32 | 33 | ||
33 | .section ".text.head" | 34 | .section ".text.head" |
34 | .code32 | 35 | .code32 |
@@ -36,11 +37,17 @@ | |||
36 | 37 | ||
37 | startup_32: | 38 | startup_32: |
38 | cld | 39 | cld |
40 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | ||
41 | * us to not reload segments */ | ||
42 | testb $(1<<6), BP_loadflags(%esi) | ||
43 | jnz 1f | ||
44 | |||
39 | cli | 45 | cli |
40 | movl $(__KERNEL_DS), %eax | 46 | movl $(__KERNEL_DS), %eax |
41 | movl %eax, %ds | 47 | movl %eax, %ds |
42 | movl %eax, %es | 48 | movl %eax, %es |
43 | movl %eax, %ss | 49 | movl %eax, %ss |
50 | 1: | ||
44 | 51 | ||
45 | /* Calculate the delta between where we were compiled to run | 52 | /* Calculate the delta between where we were compiled to run |
46 | * at and where we were actually loaded at. This can only be done | 53 | * at and where we were actually loaded at. This can only be done |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8353c81c41c0..6ef5a060fa11 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -173,7 +173,8 @@ ramdisk_size: .long 0 # its size in bytes | |||
173 | bootsect_kludge: | 173 | bootsect_kludge: |
174 | .long 0 # obsolete | 174 | .long 0 # obsolete |
175 | 175 | ||
176 | heap_end_ptr: .word _end+1024 # (Header version 0x0201 or later) | 176 | heap_end_ptr: .word _end+STACK_SIZE-512 |
177 | # (Header version 0x0201 or later) | ||
177 | # space from here (exclusive) down to | 178 | # space from here (exclusive) down to |
178 | # end of setup code can be used by setup | 179 | # end of setup code can be used by setup |
179 | # for local heap purposes. | 180 | # for local heap purposes. |
@@ -230,28 +231,53 @@ start_of_setup: | |||
230 | int $0x13 | 231 | int $0x13 |
231 | #endif | 232 | #endif |
232 | 233 | ||
233 | # We will have entered with %cs = %ds+0x20, normalize %cs so | ||
234 | # it is on par with the other segments. | ||
235 | pushw %ds | ||
236 | pushw $setup2 | ||
237 | lretw | ||
238 | |||
239 | setup2: | ||
240 | # Force %es = %ds | 234 | # Force %es = %ds |
241 | movw %ds, %ax | 235 | movw %ds, %ax |
242 | movw %ax, %es | 236 | movw %ax, %es |
243 | cld | 237 | cld |
244 | 238 | ||
245 | # Stack paranoia: align the stack and make sure it is good | 239 | # Apparently some ancient versions of LILO invoked the kernel |
246 | # for both 16- and 32-bit references. In particular, if we | 240 | # with %ss != %ds, which happened to work by accident for the |
247 | # were meant to have been using the full 16-bit segment, the | 241 | # old code. If the CAN_USE_HEAP flag is set in loadflags, or |
248 | # caller might have set %sp to zero, which breaks %esp-based | 242 | # %ss != %ds, then adjust the stack pointer. |
249 | # references. | 243 | |
250 | andw $~3, %sp # dword align (might as well...) | 244 | # Smallest possible stack we can tolerate |
251 | jnz 1f | 245 | movw $(_end+STACK_SIZE), %cx |
252 | movw $0xfffc, %sp # Make sure we're not zero | 246 | |
253 | 1: movzwl %sp, %esp # Clear upper half of %esp | 247 | movw heap_end_ptr, %dx |
254 | sti | 248 | addw $512, %dx |
249 | jnc 1f | ||
250 | xorw %dx, %dx # Wraparound - whole segment available | ||
251 | 1: testb $CAN_USE_HEAP, loadflags | ||
252 | jnz 2f | ||
253 | |||
254 | # No CAN_USE_HEAP | ||
255 | movw %ss, %dx | ||
256 | cmpw %ax, %dx # %ds == %ss? | ||
257 | movw %sp, %dx | ||
258 | # If so, assume %sp is reasonably set, otherwise use | ||
259 | # the smallest possible stack. | ||
260 | jne 4f # -> Smallest possible stack... | ||
261 | |||
262 | # Make sure the stack is at least minimum size. Take a value | ||
263 | # of zero to mean "full segment." | ||
264 | 2: | ||
265 | andw $~3, %dx # dword align (might as well...) | ||
266 | jnz 3f | ||
267 | movw $0xfffc, %dx # Make sure we're not zero | ||
268 | 3: cmpw %cx, %dx | ||
269 | jnb 5f | ||
270 | 4: movw %cx, %dx # Minimum value we can possibly use | ||
271 | 5: movw %ax, %ss | ||
272 | movzwl %dx, %esp # Clear upper half of %esp | ||
273 | sti # Now we should have a working stack | ||
274 | |||
275 | # We will have entered with %cs = %ds+0x20, normalize %cs so | ||
276 | # it is on par with the other segments. | ||
277 | pushw %ds | ||
278 | pushw $6f | ||
279 | lretw | ||
280 | 6: | ||
255 | 281 | ||
256 | # Check signature at end of setup | 282 | # Check signature at end of setup |
257 | cmpl $0x5a5aaa55, setup_sig | 283 | cmpl $0x5a5aaa55, setup_sig |
diff --git a/arch/x86/boot/video-bios.c b/arch/x86/boot/video-bios.c index 68e65d95cdfd..ed0672a81870 100644 --- a/arch/x86/boot/video-bios.c +++ b/arch/x86/boot/video-bios.c | |||
@@ -79,7 +79,7 @@ static int bios_probe(void) | |||
79 | video_bios.modes = GET_HEAP(struct mode_info, 0); | 79 | video_bios.modes = GET_HEAP(struct mode_info, 0); |
80 | 80 | ||
81 | for (mode = 0x14; mode <= 0x7f; mode++) { | 81 | for (mode = 0x14; mode <= 0x7f; mode++) { |
82 | if (heap_free() < sizeof(struct mode_info)) | 82 | if (!heap_free(sizeof(struct mode_info))) |
83 | break; | 83 | break; |
84 | 84 | ||
85 | if (mode_defined(VIDEO_FIRST_BIOS+mode)) | 85 | if (mode_defined(VIDEO_FIRST_BIOS+mode)) |
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 192190710710..4716b9a96357 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c | |||
@@ -57,7 +57,7 @@ static int vesa_probe(void) | |||
57 | while ((mode = rdfs16(mode_ptr)) != 0xffff) { | 57 | while ((mode = rdfs16(mode_ptr)) != 0xffff) { |
58 | mode_ptr += 2; | 58 | mode_ptr += 2; |
59 | 59 | ||
60 | if (heap_free() < sizeof(struct mode_info)) | 60 | if (!heap_free(sizeof(struct mode_info))) |
61 | break; /* Heap full, can't save mode info */ | 61 | break; /* Heap full, can't save mode info */ |
62 | 62 | ||
63 | if (mode & ~0x1ff) | 63 | if (mode & ~0x1ff) |
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index e4ba897bf9a3..ad9712f01739 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c | |||
@@ -371,7 +371,7 @@ static void save_screen(void) | |||
371 | saved.curx = boot_params.screen_info.orig_x; | 371 | saved.curx = boot_params.screen_info.orig_x; |
372 | saved.cury = boot_params.screen_info.orig_y; | 372 | saved.cury = boot_params.screen_info.orig_y; |
373 | 373 | ||
374 | if (heap_free() < saved.x*saved.y*sizeof(u16)+512) | 374 | if (!heap_free(saved.x*saved.y*sizeof(u16)+512)) |
375 | return; /* Not enough heap to save the screen */ | 375 | return; /* Not enough heap to save the screen */ |
376 | 376 | ||
377 | saved.data = GET_HEAP(u16, saved.x*saved.y); | 377 | saved.data = GET_HEAP(u16, saved.x*saved.y); |
diff --git a/arch/i386/defconfig b/arch/x86/configs/i386_defconfig index 54ee1764fdae..54ee1764fdae 100644 --- a/arch/i386/defconfig +++ b/arch/x86/configs/i386_defconfig | |||
diff --git a/arch/x86_64/defconfig b/arch/x86/configs/x86_64_defconfig index b091c5e35558..b091c5e35558 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c index 2ed0a4ce62f0..f63e5ff0aca1 100644 --- a/arch/x86/kernel/acpi/processor.c +++ b/arch/x86/kernel/acpi/processor.c | |||
@@ -62,8 +62,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) | |||
62 | /* Initialize _PDC data based on the CPU vendor */ | 62 | /* Initialize _PDC data based on the CPU vendor */ |
63 | void arch_acpi_processor_init_pdc(struct acpi_processor *pr) | 63 | void arch_acpi_processor_init_pdc(struct acpi_processor *pr) |
64 | { | 64 | { |
65 | unsigned int cpu = pr->id; | 65 | struct cpuinfo_x86 *c = &cpu_data(pr->id); |
66 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
67 | 66 | ||
68 | pr->pdc = NULL; | 67 | pr->pdc = NULL; |
69 | if (c->x86_vendor == X86_VENDOR_INTEL) | 68 | if (c->x86_vendor == X86_VENDOR_INTEL) |
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 7e50bda565b4..d1b6ed98774e 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -15,12 +15,16 @@ | |||
15 | #include <asm/segment.h> | 15 | #include <asm/segment.h> |
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/ia32.h> | 17 | #include <asm/ia32.h> |
18 | #include <asm/bootparam.h> | ||
18 | 19 | ||
19 | #define DEFINE(sym, val) \ | 20 | #define DEFINE(sym, val) \ |
20 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | 21 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) |
21 | 22 | ||
22 | #define BLANK() asm volatile("\n->" : : ) | 23 | #define BLANK() asm volatile("\n->" : : ) |
23 | 24 | ||
25 | #define OFFSET(sym, str, mem) \ | ||
26 | DEFINE(sym, offsetof(struct str, mem)) | ||
27 | |||
24 | #define __NO_STUBS 1 | 28 | #define __NO_STUBS 1 |
25 | #undef __SYSCALL | 29 | #undef __SYSCALL |
26 | #undef _ASM_X86_64_UNISTD_H_ | 30 | #undef _ASM_X86_64_UNISTD_H_ |
@@ -109,5 +113,11 @@ int main(void) | |||
109 | DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); | 113 | DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); |
110 | BLANK(); | 114 | BLANK(); |
111 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); | 115 | DEFINE(__NR_syscall_max, sizeof(syscalls) - 1); |
116 | |||
117 | BLANK(); | ||
118 | OFFSET(BP_scratch, boot_params, scratch); | ||
119 | OFFSET(BP_loadflags, boot_params, hdr.loadflags); | ||
120 | OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch); | ||
121 | OFFSET(BP_version, boot_params, hdr.version); | ||
112 | return 0; | 122 | return 0; |
113 | } | 123 | } |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index af0253f94a9a..8bb482ff091b 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/kdebug.h> | 25 | #include <linux/kdebug.h> |
26 | #include <asm/smp.h> | 26 | #include <asm/smp.h> |
27 | 27 | ||
28 | #ifdef X86_32 | 28 | #ifdef CONFIG_X86_32 |
29 | #include <mach_ipi.h> | 29 | #include <mach_ipi.h> |
30 | #else | 30 | #else |
31 | #include <asm/mach_apic.h> | 31 | #include <asm/mach_apic.h> |
@@ -41,7 +41,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
41 | unsigned long val, void *data) | 41 | unsigned long val, void *data) |
42 | { | 42 | { |
43 | struct pt_regs *regs; | 43 | struct pt_regs *regs; |
44 | #ifdef X86_32 | 44 | #ifdef CONFIG_X86_32 |
45 | struct pt_regs fixed_regs; | 45 | struct pt_regs fixed_regs; |
46 | #endif | 46 | #endif |
47 | int cpu; | 47 | int cpu; |
@@ -60,7 +60,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
60 | return NOTIFY_STOP; | 60 | return NOTIFY_STOP; |
61 | local_irq_disable(); | 61 | local_irq_disable(); |
62 | 62 | ||
63 | #ifdef X86_32 | 63 | #ifdef CONFIG_X86_32 |
64 | if (!user_mode_vm(regs)) { | 64 | if (!user_mode_vm(regs)) { |
65 | crash_fixup_ss_esp(&fixed_regs, regs); | 65 | crash_fixup_ss_esp(&fixed_regs, regs); |
66 | regs = &fixed_regs; | 66 | regs = &fixed_regs; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index dc34acbd54aa..639e6320518e 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -35,12 +35,14 @@ static void __init via_bugs(void) | |||
35 | } | 35 | } |
36 | 36 | ||
37 | #ifdef CONFIG_ACPI | 37 | #ifdef CONFIG_ACPI |
38 | #ifdef CONFIG_X86_IO_APIC | ||
38 | 39 | ||
39 | static int __init nvidia_hpet_check(struct acpi_table_header *header) | 40 | static int __init nvidia_hpet_check(struct acpi_table_header *header) |
40 | { | 41 | { |
41 | return 0; | 42 | return 0; |
42 | } | 43 | } |
43 | #endif | 44 | #endif /* CONFIG_X86_IO_APIC */ |
45 | #endif /* CONFIG_ACPI */ | ||
44 | 46 | ||
45 | static void __init nvidia_bugs(void) | 47 | static void __init nvidia_bugs(void) |
46 | { | 48 | { |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 0d8577f05422..aa3d2c8f7737 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -233,6 +233,8 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
233 | 233 | ||
234 | void arch_crash_save_vmcoreinfo(void) | 234 | void arch_crash_save_vmcoreinfo(void) |
235 | { | 235 | { |
236 | VMCOREINFO_SYMBOL(init_level4_pgt); | ||
237 | |||
236 | #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE | 238 | #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE |
237 | VMCOREINFO_SYMBOL(node_data); | 239 | VMCOREINFO_SYMBOL(node_data); |
238 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); | 240 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index c56e9ee64964..79b514b381b1 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -338,7 +338,6 @@ static int __dma_map_cont(struct scatterlist *start, int nelems, | |||
338 | 338 | ||
339 | BUG_ON(s != start && s->offset); | 339 | BUG_ON(s != start && s->offset); |
340 | if (s == start) { | 340 | if (s == start) { |
341 | *sout = *s; | ||
342 | sout->dma_address = iommu_bus_base; | 341 | sout->dma_address = iommu_bus_base; |
343 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; | 342 | sout->dma_address += iommu_page*PAGE_SIZE + s->offset; |
344 | sout->dma_length = s->length; | 343 | sout->dma_length = s->length; |
@@ -365,7 +364,7 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems, | |||
365 | { | 364 | { |
366 | if (!need) { | 365 | if (!need) { |
367 | BUG_ON(nelems != 1); | 366 | BUG_ON(nelems != 1); |
368 | *sout = *start; | 367 | sout->dma_address = start->dma_address; |
369 | sout->dma_length = start->length; | 368 | sout->dma_length = start->length; |
370 | return 0; | 369 | return 0; |
371 | } | 370 | } |
@@ -436,7 +435,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
436 | 435 | ||
437 | error: | 436 | error: |
438 | flush_gart(); | 437 | flush_gart(); |
439 | gart_unmap_sg(dev, sg, nents, dir); | 438 | gart_unmap_sg(dev, sg, out, dir); |
440 | /* When it was forced or merged try again in a dumb way */ | 439 | /* When it was forced or merged try again in a dumb way */ |
441 | if (force_iommu || iommu_merge) { | 440 | if (force_iommu || iommu_merge) { |
442 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | 441 | out = dma_map_sg_nonforce(dev, sg, nents, dir); |
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index f32115308399..fcaa026eb807 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c | |||
@@ -708,10 +708,4 @@ struct smp_ops smp_ops = { | |||
708 | .smp_send_reschedule = native_smp_send_reschedule, | 708 | .smp_send_reschedule = native_smp_send_reschedule, |
709 | .smp_call_function_mask = native_smp_call_function_mask, | 709 | .smp_call_function_mask = native_smp_call_function_mask, |
710 | }; | 710 | }; |
711 | 711 | EXPORT_SYMBOL_GPL(smp_ops); | |
712 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | ||
713 | void *info, int wait) | ||
714 | { | ||
715 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
716 | } | ||
717 | EXPORT_SYMBOL(smp_call_function_mask); | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index d2235db4085f..a55b0902f9d3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/lguest.h> | 56 | #include <linux/lguest.h> |
57 | #include <linux/lguest_launcher.h> | 57 | #include <linux/lguest_launcher.h> |
58 | #include <linux/virtio_console.h> | 58 | #include <linux/virtio_console.h> |
59 | #include <linux/pm.h> | ||
59 | #include <asm/paravirt.h> | 60 | #include <asm/paravirt.h> |
60 | #include <asm/param.h> | 61 | #include <asm/param.h> |
61 | #include <asm/page.h> | 62 | #include <asm/page.h> |
@@ -98,7 +99,7 @@ static cycle_t clock_base; | |||
98 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do | 99 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do |
99 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls | 100 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls |
100 | * are reasonably expensive, batching them up makes sense. For example, a | 101 | * are reasonably expensive, batching them up makes sense. For example, a |
101 | * large mmap might update dozens of page table entries: that code calls | 102 | * large munmap might update dozens of page table entries: that code calls |
102 | * paravirt_enter_lazy_mmu(), does the dozen updates, then calls | 103 | * paravirt_enter_lazy_mmu(), does the dozen updates, then calls |
103 | * lguest_leave_lazy_mode(). | 104 | * lguest_leave_lazy_mode(). |
104 | * | 105 | * |
@@ -163,8 +164,8 @@ void async_hcall(unsigned long call, | |||
163 | /*:*/ | 164 | /*:*/ |
164 | 165 | ||
165 | /*G:033 | 166 | /*G:033 |
166 | * Here are our first native-instruction replacements: four functions for | 167 | * After that diversion we return to our first native-instruction |
167 | * interrupt control. | 168 | * replacements: four functions for interrupt control. |
168 | * | 169 | * |
169 | * The simplest way of implementing these would be to have "turn interrupts | 170 | * The simplest way of implementing these would be to have "turn interrupts |
170 | * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow: | 171 | * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow: |
@@ -183,7 +184,7 @@ static unsigned long save_fl(void) | |||
183 | return lguest_data.irq_enabled; | 184 | return lguest_data.irq_enabled; |
184 | } | 185 | } |
185 | 186 | ||
186 | /* "restore_flags" just sets the flags back to the value given. */ | 187 | /* restore_flags() just sets the flags back to the value given. */ |
187 | static void restore_fl(unsigned long flags) | 188 | static void restore_fl(unsigned long flags) |
188 | { | 189 | { |
189 | lguest_data.irq_enabled = flags; | 190 | lguest_data.irq_enabled = flags; |
@@ -356,7 +357,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx, | |||
356 | * it. The Host needs to know when the Guest wants to change them, so we have | 357 | * it. The Host needs to know when the Guest wants to change them, so we have |
357 | * a whole series of functions like read_cr0() and write_cr0(). | 358 | * a whole series of functions like read_cr0() and write_cr0(). |
358 | * | 359 | * |
359 | * We start with CR0. CR0 allows you to turn on and off all kinds of basic | 360 | * We start with cr0. cr0 allows you to turn on and off all kinds of basic |
360 | * features, but Linux only really cares about one: the horrifically-named Task | 361 | * features, but Linux only really cares about one: the horrifically-named Task |
361 | * Switched (TS) bit at bit 3 (ie. 8) | 362 | * Switched (TS) bit at bit 3 (ie. 8) |
362 | * | 363 | * |
@@ -371,8 +372,7 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx, | |||
371 | static unsigned long current_cr0, current_cr3; | 372 | static unsigned long current_cr0, current_cr3; |
372 | static void lguest_write_cr0(unsigned long val) | 373 | static void lguest_write_cr0(unsigned long val) |
373 | { | 374 | { |
374 | /* 8 == TS bit. */ | 375 | lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0); |
375 | lazy_hcall(LHCALL_TS, val & 8, 0, 0); | ||
376 | current_cr0 = val; | 376 | current_cr0 = val; |
377 | } | 377 | } |
378 | 378 | ||
@@ -387,10 +387,10 @@ static unsigned long lguest_read_cr0(void) | |||
387 | static void lguest_clts(void) | 387 | static void lguest_clts(void) |
388 | { | 388 | { |
389 | lazy_hcall(LHCALL_TS, 0, 0, 0); | 389 | lazy_hcall(LHCALL_TS, 0, 0, 0); |
390 | current_cr0 &= ~8U; | 390 | current_cr0 &= ~X86_CR0_TS; |
391 | } | 391 | } |
392 | 392 | ||
393 | /* CR2 is the virtual address of the last page fault, which the Guest only ever | 393 | /* cr2 is the virtual address of the last page fault, which the Guest only ever |
394 | * reads. The Host kindly writes this into our "struct lguest_data", so we | 394 | * reads. The Host kindly writes this into our "struct lguest_data", so we |
395 | * just read it out of there. */ | 395 | * just read it out of there. */ |
396 | static unsigned long lguest_read_cr2(void) | 396 | static unsigned long lguest_read_cr2(void) |
@@ -398,7 +398,7 @@ static unsigned long lguest_read_cr2(void) | |||
398 | return lguest_data.cr2; | 398 | return lguest_data.cr2; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* CR3 is the current toplevel pagetable page: the principle is the same as | 401 | /* cr3 is the current toplevel pagetable page: the principle is the same as |
402 | * cr0. Keep a local copy, and tell the Host when it changes. */ | 402 | * cr0. Keep a local copy, and tell the Host when it changes. */ |
403 | static void lguest_write_cr3(unsigned long cr3) | 403 | static void lguest_write_cr3(unsigned long cr3) |
404 | { | 404 | { |
@@ -411,7 +411,7 @@ static unsigned long lguest_read_cr3(void) | |||
411 | return current_cr3; | 411 | return current_cr3; |
412 | } | 412 | } |
413 | 413 | ||
414 | /* CR4 is used to enable and disable PGE, but we don't care. */ | 414 | /* cr4 is used to enable and disable PGE, but we don't care. */ |
415 | static unsigned long lguest_read_cr4(void) | 415 | static unsigned long lguest_read_cr4(void) |
416 | { | 416 | { |
417 | return 0; | 417 | return 0; |
@@ -432,7 +432,7 @@ static void lguest_write_cr4(unsigned long val) | |||
432 | * maps virtual addresses to physical addresses using "page tables". We could | 432 | * maps virtual addresses to physical addresses using "page tables". We could |
433 | * use one huge index of 1 million entries: each address is 4 bytes, so that's | 433 | * use one huge index of 1 million entries: each address is 4 bytes, so that's |
434 | * 1024 pages just to hold the page tables. But since most virtual addresses | 434 | * 1024 pages just to hold the page tables. But since most virtual addresses |
435 | * are unused, we use a two level index which saves space. The CR3 register | 435 | * are unused, we use a two level index which saves space. The cr3 register |
436 | * contains the physical address of the top level "page directory" page, which | 436 | * contains the physical address of the top level "page directory" page, which |
437 | * contains physical addresses of up to 1024 second-level pages. Each of these | 437 | * contains physical addresses of up to 1024 second-level pages. Each of these |
438 | * second level pages contains up to 1024 physical addresses of actual pages, | 438 | * second level pages contains up to 1024 physical addresses of actual pages, |
@@ -440,7 +440,7 @@ static void lguest_write_cr4(unsigned long val) | |||
440 | * | 440 | * |
441 | * Here's a diagram, where arrows indicate physical addresses: | 441 | * Here's a diagram, where arrows indicate physical addresses: |
442 | * | 442 | * |
443 | * CR3 ---> +---------+ | 443 | * cr3 ---> +---------+ |
444 | * | --------->+---------+ | 444 | * | --------->+---------+ |
445 | * | | | PADDR1 | | 445 | * | | | PADDR1 | |
446 | * Top-level | | PADDR2 | | 446 | * Top-level | | PADDR2 | |
@@ -498,8 +498,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
498 | * | 498 | * |
499 | * ... except in early boot when the kernel sets up the initial pagetables, | 499 | * ... except in early boot when the kernel sets up the initial pagetables, |
500 | * which makes booting astonishingly slow. So we don't even tell the Host | 500 | * which makes booting astonishingly slow. So we don't even tell the Host |
501 | * anything changed until we've done the first page table switch. | 501 | * anything changed until we've done the first page table switch. */ |
502 | */ | ||
503 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) | 502 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
504 | { | 503 | { |
505 | *ptep = pteval; | 504 | *ptep = pteval; |
@@ -720,10 +719,10 @@ static void lguest_time_init(void) | |||
720 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 719 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
721 | set_irq_handler(0, lguest_time_irq); | 720 | set_irq_handler(0, lguest_time_irq); |
722 | 721 | ||
723 | /* Our clock structure look like arch/i386/kernel/tsc.c if we can use | 722 | /* Our clock structure looks like arch/x86/kernel/tsc_32.c if we can |
724 | * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either | 723 | * use the TSC, otherwise it's a dumb nanosecond-resolution clock. |
725 | * way, the "rating" is initialized so high that it's always chosen | 724 | * Either way, the "rating" is set so high that it's always chosen over |
726 | * over any other clocksource. */ | 725 | * any other clocksource. */ |
727 | if (lguest_data.tsc_khz) | 726 | if (lguest_data.tsc_khz) |
728 | lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz, | 727 | lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz, |
729 | lguest_clock.shift); | 728 | lguest_clock.shift); |
@@ -749,7 +748,7 @@ static void lguest_time_init(void) | |||
749 | * to work. They're pretty simple. | 748 | * to work. They're pretty simple. |
750 | */ | 749 | */ |
751 | 750 | ||
752 | /* The Guest needs to tell the host what stack it expects traps to use. For | 751 | /* The Guest needs to tell the Host what stack it expects traps to use. For |
753 | * native hardware, this is part of the Task State Segment mentioned above in | 752 | * native hardware, this is part of the Task State Segment mentioned above in |
754 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. | 753 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. |
755 | * | 754 | * |
@@ -850,13 +849,16 @@ static __init char *lguest_memory_setup(void) | |||
850 | return "LGUEST"; | 849 | return "LGUEST"; |
851 | } | 850 | } |
852 | 851 | ||
853 | /* Before virtqueues are set up, we use LHCALL_NOTIFY on normal memory to | 852 | /* We will eventually use the virtio console device to produce console output, |
854 | * produce console output. */ | 853 | * but before that is set up we use LHCALL_NOTIFY on normal memory to produce |
854 | * console output. */ | ||
855 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 855 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) |
856 | { | 856 | { |
857 | char scratch[17]; | 857 | char scratch[17]; |
858 | unsigned int len = count; | 858 | unsigned int len = count; |
859 | 859 | ||
860 | /* We use a nul-terminated string, so we have to make a copy. Icky, | ||
861 | * huh? */ | ||
860 | if (len > sizeof(scratch) - 1) | 862 | if (len > sizeof(scratch) - 1) |
861 | len = sizeof(scratch) - 1; | 863 | len = sizeof(scratch) - 1; |
862 | scratch[len] = '\0'; | 864 | scratch[len] = '\0'; |
@@ -883,7 +885,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
883 | * Our current solution is to allow the paravirt back end to optionally patch | 885 | * Our current solution is to allow the paravirt back end to optionally patch |
884 | * over the indirect calls to replace them with something more efficient. We | 886 | * over the indirect calls to replace them with something more efficient. We |
885 | * patch the four most commonly called functions: disable interrupts, enable | 887 | * patch the four most commonly called functions: disable interrupts, enable |
886 | * interrupts, restore interrupts and save interrupts. We usually have 10 | 888 | * interrupts, restore interrupts and save interrupts. We usually have 6 or 10 |
887 | * bytes to patch into: the Guest versions of these operations are small enough | 889 | * bytes to patch into: the Guest versions of these operations are small enough |
888 | * that we can fit comfortably. | 890 | * that we can fit comfortably. |
889 | * | 891 | * |
@@ -1015,7 +1017,7 @@ __init void lguest_init(void) | |||
1015 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); | 1017 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory"); |
1016 | 1018 | ||
1017 | /* The Host uses the top of the Guest's virtual address space for the | 1019 | /* The Host uses the top of the Guest's virtual address space for the |
1018 | * Host<->Guest Switcher, and it tells us how much it needs in | 1020 | * Host<->Guest Switcher, and it tells us how big that is in |
1019 | * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */ | 1021 | * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */ |
1020 | reserve_top_address(lguest_data.reserve_mem); | 1022 | reserve_top_address(lguest_data.reserve_mem); |
1021 | 1023 | ||
@@ -1065,6 +1067,6 @@ __init void lguest_init(void) | |||
1065 | /* | 1067 | /* |
1066 | * This marks the end of stage II of our journey, The Guest. | 1068 | * This marks the end of stage II of our journey, The Guest. |
1067 | * | 1069 | * |
1068 | * It is now time for us to explore the nooks and crannies of the three Guest | 1070 | * It is now time for us to explore the layer of virtual drivers and complete |
1069 | * devices and complete our understanding of the Guest in "make Drivers". | 1071 | * our understanding of the Guest in "make Drivers". |
1070 | */ | 1072 | */ |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index ebc6ac733899..95b6fbcded63 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | 7 | ||
8 | /*G:020 This is where we begin: head.S notes that the boot header's platform | 8 | /*G:020 This is where we begin: head.S notes that the boot header's platform |
9 | * type field is "1" (lguest), so calls us here. The boot header is in %esi. | 9 | * type field is "1" (lguest), so calls us here. |
10 | * | 10 | * |
11 | * WARNING: be very careful here! We're running at addresses equal to physical | 11 | * WARNING: be very careful here! We're running at addresses equal to physical |
12 | * addesses (around 0), not above PAGE_OFFSET as most code expectes | 12 | * addesses (around 0), not above PAGE_OFFSET as most code expectes |
@@ -17,13 +17,15 @@ | |||
17 | * boot. */ | 17 | * boot. */ |
18 | .section .init.text, "ax", @progbits | 18 | .section .init.text, "ax", @progbits |
19 | ENTRY(lguest_entry) | 19 | ENTRY(lguest_entry) |
20 | /* Make initial hypercall now, so we can set up the pagetables. */ | 20 | /* We make the "initialization" hypercall now to tell the Host about |
21 | * us, and also find out where it put our page tables. */ | ||
21 | movl $LHCALL_LGUEST_INIT, %eax | 22 | movl $LHCALL_LGUEST_INIT, %eax |
22 | movl $lguest_data - __PAGE_OFFSET, %edx | 23 | movl $lguest_data - __PAGE_OFFSET, %edx |
23 | int $LGUEST_TRAP_ENTRY | 24 | int $LGUEST_TRAP_ENTRY |
24 | 25 | ||
25 | /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl | 26 | /* The Host put the toplevel pagetable in lguest_data.pgdir. The movsl |
26 | * instruction uses %esi implicitly. */ | 27 | * instruction uses %esi implicitly as the source for the copy we' |
28 | * about to do. */ | ||
27 | movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi | 29 | movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi |
28 | 30 | ||
29 | /* Copy first 32 entries of page directory to __PAGE_OFFSET entries. | 31 | /* Copy first 32 entries of page directory to __PAGE_OFFSET entries. |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 361ac5107b33..69371434b0cf 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -29,14 +29,14 @@ | |||
29 | #include <asm/arch_hooks.h> | 29 | #include <asm/arch_hooks.h> |
30 | 30 | ||
31 | /* TLB state -- visible externally, indexed physically */ | 31 | /* TLB state -- visible externally, indexed physically */ |
32 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; | 32 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; |
33 | 33 | ||
34 | /* CPU IRQ affinity -- set to all ones initially */ | 34 | /* CPU IRQ affinity -- set to all ones initially */ |
35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; | 35 | static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; |
36 | 36 | ||
37 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally | 37 | /* per CPU data structure (for /proc/cpuinfo et al), visible externally |
38 | * indexed physically */ | 38 | * indexed physically */ |
39 | DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned; | 39 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
40 | EXPORT_PER_CPU_SYMBOL(cpu_info); | 40 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
41 | 41 | ||
42 | /* physical ID of the CPU used to boot the system */ | 42 | /* physical ID of the CPU used to boot the system */ |
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c index 503dfc05111b..33563ee8eb0f 100644 --- a/arch/x86/mm/fault_32.c +++ b/arch/x86/mm/fault_32.c | |||
@@ -550,7 +550,7 @@ no_context: | |||
550 | page &= PAGE_MASK; | 550 | page &= PAGE_MASK; |
551 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | 551 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
552 | & (PTRS_PER_PMD - 1)]; | 552 | & (PTRS_PER_PMD - 1)]; |
553 | printk(KERN_ALERT "*pde = %016Lx ", page); | 553 | printk(KERN_CONT "*pde = %016Lx ", page); |
554 | page &= ~_PAGE_NX; | 554 | page &= ~_PAGE_NX; |
555 | } | 555 | } |
556 | #else | 556 | #else |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1e3862e41065..a7308b2cd058 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -728,12 +728,6 @@ int in_gate_area_no_task(unsigned long addr) | |||
728 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); | 728 | return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); |
729 | } | 729 | } |
730 | 730 | ||
731 | void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) | ||
732 | { | ||
733 | return __alloc_bootmem_core(pgdat->bdata, size, | ||
734 | SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); | ||
735 | } | ||
736 | |||
737 | const char *arch_vma_name(struct vm_area_struct *vma) | 731 | const char *arch_vma_name(struct vm_area_struct *vma) |
738 | { | 732 | { |
739 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | 733 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) |
diff --git a/arch/x86_64/.gitignore b/arch/x86_64/.gitignore deleted file mode 100644 index 36ef4c374d25..000000000000 --- a/arch/x86_64/.gitignore +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | boot | ||
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug deleted file mode 100644 index 775d211a5cf9..000000000000 --- a/arch/x86_64/Kconfig.debug +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | config TRACE_IRQFLAGS_SUPPORT | ||
4 | bool | ||
5 | default y | ||
6 | |||
7 | source "lib/Kconfig.debug" | ||
8 | |||
9 | config DEBUG_RODATA | ||
10 | bool "Write protect kernel read-only data structures" | ||
11 | depends on DEBUG_KERNEL | ||
12 | help | ||
13 | Mark the kernel read-only data as write-protected in the pagetables, | ||
14 | in order to catch accidental (and incorrect) writes to such const data. | ||
15 | This option may have a slight performance impact because a portion | ||
16 | of the kernel code won't be covered by a 2MB TLB anymore. | ||
17 | If in doubt, say "N". | ||
18 | |||
19 | config IOMMU_DEBUG | ||
20 | depends on IOMMU && DEBUG_KERNEL | ||
21 | bool "Enable IOMMU debugging" | ||
22 | help | ||
23 | Force the IOMMU to on even when you have less than 4GB of | ||
24 | memory and add debugging code. On overflow always panic. And | ||
25 | allow to enable IOMMU leak tracing. Can be disabled at boot | ||
26 | time with iommu=noforce. This will also enable scatter gather | ||
27 | list merging. Currently not recommended for production | ||
28 | code. When you use it make sure you have a big enough | ||
29 | IOMMU/AGP aperture. Most of the options enabled by this can | ||
30 | be set more finegrained using the iommu= command line | ||
31 | options. See Documentation/x86_64/boot-options.txt for more | ||
32 | details. | ||
33 | |||
34 | config IOMMU_LEAK | ||
35 | bool "IOMMU leak tracing" | ||
36 | depends on DEBUG_KERNEL | ||
37 | depends on IOMMU_DEBUG | ||
38 | help | ||
39 | Add a simple leak tracer to the IOMMU code. This is useful when you | ||
40 | are debugging a buggy device driver that leaks IOMMU mappings. | ||
41 | |||
42 | config DEBUG_STACKOVERFLOW | ||
43 | bool "Check for stack overflows" | ||
44 | depends on DEBUG_KERNEL | ||
45 | help | ||
46 | This option will cause messages to be printed if free stack space | ||
47 | drops below a certain limit. | ||
48 | |||
49 | config DEBUG_STACK_USAGE | ||
50 | bool "Stack utilization instrumentation" | ||
51 | depends on DEBUG_KERNEL | ||
52 | help | ||
53 | Enables the display of the minimum amount of free stack which each | ||
54 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
55 | |||
56 | This option will slow down process creation somewhat. | ||
57 | |||
58 | #config X86_REMOTE_DEBUG | ||
59 | # bool "kgdb debugging stub" | ||
60 | |||
61 | endmenu | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 54dc05439009..e47a9309eb48 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1443,8 +1443,11 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | |||
1443 | cfqq = *async_cfqq; | 1443 | cfqq = *async_cfqq; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | if (!cfqq) | 1446 | if (!cfqq) { |
1447 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | 1447 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); |
1448 | if (!cfqq) | ||
1449 | return NULL; | ||
1450 | } | ||
1448 | 1451 | ||
1449 | /* | 1452 | /* |
1450 | * pin the queue now that it's allocated, scheduler exit will prune it | 1453 | * pin the queue now that it's allocated, scheduler exit will prune it |
@@ -2053,7 +2056,7 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | |||
2053 | { | 2056 | { |
2054 | del_timer_sync(&cfqd->idle_slice_timer); | 2057 | del_timer_sync(&cfqd->idle_slice_timer); |
2055 | del_timer_sync(&cfqd->idle_class_timer); | 2058 | del_timer_sync(&cfqd->idle_class_timer); |
2056 | blk_sync_queue(cfqd->queue); | 2059 | kblockd_flush_work(&cfqd->unplug_work); |
2057 | } | 2060 | } |
2058 | 2061 | ||
2059 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2062 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f84093b97f70..cae0a852619e 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -581,7 +581,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file, | |||
581 | { | 581 | { |
582 | int ret; | 582 | int ret; |
583 | 583 | ||
584 | switch (arg) { | 584 | switch (cmd) { |
585 | case HDIO_GET_UNMASKINTR: | 585 | case HDIO_GET_UNMASKINTR: |
586 | case HDIO_GET_MULTCOUNT: | 586 | case HDIO_GET_MULTCOUNT: |
587 | case HDIO_GET_KEEPSETTINGS: | 587 | case HDIO_GET_KEEPSETTINGS: |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index de5ba479c224..56f2646612e6 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | static void blk_unplug_work(struct work_struct *work); | 40 | static void blk_unplug_work(struct work_struct *work); |
41 | static void blk_unplug_timeout(unsigned long data); | 41 | static void blk_unplug_timeout(unsigned long data); |
42 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 42 | static void drive_stat_acct(struct request *rq, int new_io); |
43 | static void init_request_from_bio(struct request *req, struct bio *bio); | 43 | static void init_request_from_bio(struct request *req, struct bio *bio); |
44 | static int __make_request(struct request_queue *q, struct bio *bio); | 44 | static int __make_request(struct request_queue *q, struct bio *bio); |
45 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | 45 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); |
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) | |||
791 | retval = atomic_dec_and_test(&bqt->refcnt); | 791 | retval = atomic_dec_and_test(&bqt->refcnt); |
792 | if (retval) { | 792 | if (retval) { |
793 | BUG_ON(bqt->busy); | 793 | BUG_ON(bqt->busy); |
794 | BUG_ON(!list_empty(&bqt->busy_list)); | ||
795 | 794 | ||
796 | kfree(bqt->tag_index); | 795 | kfree(bqt->tag_index); |
797 | bqt->tag_index = NULL; | 796 | bqt->tag_index = NULL; |
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |||
903 | if (init_tag_map(q, tags, depth)) | 902 | if (init_tag_map(q, tags, depth)) |
904 | goto fail; | 903 | goto fail; |
905 | 904 | ||
906 | INIT_LIST_HEAD(&tags->busy_list); | ||
907 | tags->busy = 0; | 905 | tags->busy = 0; |
908 | atomic_set(&tags->refcnt, 1); | 906 | atomic_set(&tags->refcnt, 1); |
909 | return tags; | 907 | return tags; |
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
954 | */ | 952 | */ |
955 | q->queue_tags = tags; | 953 | q->queue_tags = tags; |
956 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 954 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); |
955 | INIT_LIST_HEAD(&q->tag_busy_list); | ||
957 | return 0; | 956 | return 0; |
958 | fail: | 957 | fail: |
959 | kfree(tags); | 958 | kfree(tags); |
@@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |||
1057 | 1056 | ||
1058 | bqt->tag_index[tag] = NULL; | 1057 | bqt->tag_index[tag] = NULL; |
1059 | 1058 | ||
1060 | /* | 1059 | if (unlikely(!test_bit(tag, bqt->tag_map))) { |
1061 | * We use test_and_clear_bit's memory ordering properties here. | ||
1062 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1063 | * a barrer before clearing the bit (precisely: release semantics). | ||
1064 | * Could use clear_bit_unlock when it is merged. | ||
1065 | */ | ||
1066 | if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { | ||
1067 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | 1060 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
1068 | __FUNCTION__, tag); | 1061 | __FUNCTION__, tag); |
1069 | return; | 1062 | return; |
1070 | } | 1063 | } |
1071 | 1064 | /* | |
1065 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1066 | * unlock memory barrier semantics. | ||
1067 | */ | ||
1068 | clear_bit_unlock(tag, bqt->tag_map); | ||
1072 | bqt->busy--; | 1069 | bqt->busy--; |
1073 | } | 1070 | } |
1074 | 1071 | ||
@@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
1114 | if (tag >= bqt->max_depth) | 1111 | if (tag >= bqt->max_depth) |
1115 | return 1; | 1112 | return 1; |
1116 | 1113 | ||
1117 | } while (test_and_set_bit(tag, bqt->tag_map)); | 1114 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); |
1118 | /* | 1115 | /* |
1119 | * We rely on test_and_set_bit providing lock memory ordering semantics | 1116 | * We need lock ordering semantics given by test_and_set_bit_lock. |
1120 | * (could use test_and_set_bit_lock when it is merged). | 1117 | * See blk_queue_end_tag for details. |
1121 | */ | 1118 | */ |
1122 | 1119 | ||
1123 | rq->cmd_flags |= REQ_QUEUED; | 1120 | rq->cmd_flags |= REQ_QUEUED; |
1124 | rq->tag = tag; | 1121 | rq->tag = tag; |
1125 | bqt->tag_index[tag] = rq; | 1122 | bqt->tag_index[tag] = rq; |
1126 | blkdev_dequeue_request(rq); | 1123 | blkdev_dequeue_request(rq); |
1127 | list_add(&rq->queuelist, &bqt->busy_list); | 1124 | list_add(&rq->queuelist, &q->tag_busy_list); |
1128 | bqt->busy++; | 1125 | bqt->busy++; |
1129 | return 0; | 1126 | return 0; |
1130 | } | 1127 | } |
@@ -1145,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1145 | **/ | 1142 | **/ |
1146 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1147 | { | 1144 | { |
1148 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1149 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1150 | struct request *rq; | 1146 | struct request *rq; |
1151 | 1147 | ||
1152 | list_for_each_safe(tmp, n, &bqt->busy_list) { | 1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { |
1153 | rq = list_entry_rq(tmp); | 1149 | rq = list_entry_rq(tmp); |
1154 | 1150 | ||
1155 | if (rq->tag == -1) { | 1151 | if (rq->tag == -1) { |
@@ -1366,9 +1362,7 @@ new_segment: | |||
1366 | sg = sg_next(sg); | 1362 | sg = sg_next(sg); |
1367 | } | 1363 | } |
1368 | 1364 | ||
1369 | sg_set_page(sg, bvec->bv_page); | 1365 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); |
1370 | sg->length = nbytes; | ||
1371 | sg->offset = bvec->bv_offset; | ||
1372 | nsegs++; | 1366 | nsegs++; |
1373 | } | 1367 | } |
1374 | bvprv = bvec; | 1368 | bvprv = bvec; |
@@ -1740,6 +1734,7 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
1740 | void blk_sync_queue(struct request_queue *q) | 1734 | void blk_sync_queue(struct request_queue *q) |
1741 | { | 1735 | { |
1742 | del_timer_sync(&q->unplug_timer); | 1736 | del_timer_sync(&q->unplug_timer); |
1737 | kblockd_flush_work(&q->unplug_work); | ||
1743 | } | 1738 | } |
1744 | EXPORT_SYMBOL(blk_sync_queue); | 1739 | EXPORT_SYMBOL(blk_sync_queue); |
1745 | 1740 | ||
@@ -2343,7 +2338,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
2343 | if (blk_rq_tagged(rq)) | 2338 | if (blk_rq_tagged(rq)) |
2344 | blk_queue_end_tag(q, rq); | 2339 | blk_queue_end_tag(q, rq); |
2345 | 2340 | ||
2346 | drive_stat_acct(rq, rq->nr_sectors, 1); | 2341 | drive_stat_acct(rq, 1); |
2347 | __elv_add_request(q, rq, where, 0); | 2342 | __elv_add_request(q, rq, where, 0); |
2348 | blk_start_queueing(q); | 2343 | blk_start_queueing(q); |
2349 | spin_unlock_irqrestore(q->queue_lock, flags); | 2344 | spin_unlock_irqrestore(q->queue_lock, flags); |
@@ -2738,7 +2733,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | |||
2738 | 2733 | ||
2739 | EXPORT_SYMBOL(blkdev_issue_flush); | 2734 | EXPORT_SYMBOL(blkdev_issue_flush); |
2740 | 2735 | ||
2741 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | 2736 | static void drive_stat_acct(struct request *rq, int new_io) |
2742 | { | 2737 | { |
2743 | int rw = rq_data_dir(rq); | 2738 | int rw = rq_data_dir(rq); |
2744 | 2739 | ||
@@ -2760,7 +2755,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | |||
2760 | */ | 2755 | */ |
2761 | static inline void add_request(struct request_queue * q, struct request * req) | 2756 | static inline void add_request(struct request_queue * q, struct request * req) |
2762 | { | 2757 | { |
2763 | drive_stat_acct(req, req->nr_sectors, 1); | 2758 | drive_stat_acct(req, 1); |
2764 | 2759 | ||
2765 | /* | 2760 | /* |
2766 | * elevator indicated where it wants this request to be | 2761 | * elevator indicated where it wants this request to be |
@@ -3017,7 +3012,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
3017 | req->biotail = bio; | 3012 | req->biotail = bio; |
3018 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 3013 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
3019 | req->ioprio = ioprio_best(req->ioprio, prio); | 3014 | req->ioprio = ioprio_best(req->ioprio, prio); |
3020 | drive_stat_acct(req, nr_sectors, 0); | 3015 | drive_stat_acct(req, 0); |
3021 | if (!attempt_back_merge(q, req)) | 3016 | if (!attempt_back_merge(q, req)) |
3022 | elv_merged_request(q, req, el_ret); | 3017 | elv_merged_request(q, req, el_ret); |
3023 | goto out; | 3018 | goto out; |
@@ -3044,7 +3039,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
3044 | req->sector = req->hard_sector = bio->bi_sector; | 3039 | req->sector = req->hard_sector = bio->bi_sector; |
3045 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 3040 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
3046 | req->ioprio = ioprio_best(req->ioprio, prio); | 3041 | req->ioprio = ioprio_best(req->ioprio, prio); |
3047 | drive_stat_acct(req, nr_sectors, 0); | 3042 | drive_stat_acct(req, 0); |
3048 | if (!attempt_front_merge(q, req)) | 3043 | if (!attempt_front_merge(q, req)) |
3049 | elv_merged_request(q, req, el_ret); | 3044 | elv_merged_request(q, req, el_ret); |
3050 | goto out; | 3045 | goto out; |
diff --git a/crypto/hmac.c b/crypto/hmac.c index e4eb6ac53b5c..0f05be769c34 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
@@ -61,7 +61,7 @@ static int hmac_setkey(struct crypto_hash *parent, | |||
61 | desc.tfm = tfm; | 61 | desc.tfm = tfm; |
62 | desc.flags = crypto_hash_get_flags(parent); | 62 | desc.flags = crypto_hash_get_flags(parent); |
63 | desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; | 63 | desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; |
64 | sg_set_buf(&tmp, inkey, keylen); | 64 | sg_init_one(&tmp, inkey, keylen); |
65 | 65 | ||
66 | err = crypto_hash_digest(&desc, &tmp, keylen, digest); | 66 | err = crypto_hash_digest(&desc, &tmp, keylen, digest); |
67 | if (err) | 67 | if (err) |
@@ -96,7 +96,7 @@ static int hmac_init(struct hash_desc *pdesc) | |||
96 | 96 | ||
97 | desc.tfm = ctx->child; | 97 | desc.tfm = ctx->child; |
98 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 98 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
99 | sg_set_buf(&tmp, ipad, bs); | 99 | sg_init_one(&tmp, ipad, bs); |
100 | 100 | ||
101 | err = crypto_hash_init(&desc); | 101 | err = crypto_hash_init(&desc); |
102 | if (unlikely(err)) | 102 | if (unlikely(err)) |
@@ -131,7 +131,7 @@ static int hmac_final(struct hash_desc *pdesc, u8 *out) | |||
131 | 131 | ||
132 | desc.tfm = ctx->child; | 132 | desc.tfm = ctx->child; |
133 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 133 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
134 | sg_set_buf(&tmp, opad, bs + ds); | 134 | sg_init_one(&tmp, opad, bs + ds); |
135 | 135 | ||
136 | err = crypto_hash_final(&desc, digest); | 136 | err = crypto_hash_final(&desc, digest); |
137 | if (unlikely(err)) | 137 | if (unlikely(err)) |
@@ -158,10 +158,11 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, | |||
158 | desc.tfm = ctx->child; | 158 | desc.tfm = ctx->child; |
159 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 159 | desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
160 | 160 | ||
161 | sg_init_table(sg1, 2); | ||
161 | sg_set_buf(sg1, ipad, bs); | 162 | sg_set_buf(sg1, ipad, bs); |
163 | sg_set_page(&sg1[1], (void *) sg, 0, 0); | ||
162 | 164 | ||
163 | sg_set_page(&sg[1], (void *) sg); | 165 | sg_init_table(sg2, 1); |
164 | sg1[1].length = 0; | ||
165 | sg_set_buf(sg2, opad, bs + ds); | 166 | sg_set_buf(sg2, opad, bs + ds); |
166 | 167 | ||
167 | err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); | 168 | err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d741c63af42c..24141fb6f5cb 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -139,7 +139,7 @@ static void test_hash(char *algo, struct hash_testvec *template, | |||
139 | printk("test %u:\n", i + 1); | 139 | printk("test %u:\n", i + 1); |
140 | memset(result, 0, 64); | 140 | memset(result, 0, 64); |
141 | 141 | ||
142 | sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); | 142 | sg_init_one(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); |
143 | 143 | ||
144 | if (hash_tv[i].ksize) { | 144 | if (hash_tv[i].ksize) { |
145 | ret = crypto_hash_setkey(tfm, hash_tv[i].key, | 145 | ret = crypto_hash_setkey(tfm, hash_tv[i].key, |
@@ -176,6 +176,7 @@ static void test_hash(char *algo, struct hash_testvec *template, | |||
176 | memset(result, 0, 64); | 176 | memset(result, 0, 64); |
177 | 177 | ||
178 | temp = 0; | 178 | temp = 0; |
179 | sg_init_table(sg, hash_tv[i].np); | ||
179 | for (k = 0; k < hash_tv[i].np; k++) { | 180 | for (k = 0; k < hash_tv[i].np; k++) { |
180 | memcpy(&xbuf[IDX[k]], | 181 | memcpy(&xbuf[IDX[k]], |
181 | hash_tv[i].plaintext + temp, | 182 | hash_tv[i].plaintext + temp, |
@@ -289,8 +290,8 @@ static void test_cipher(char *algo, int enc, | |||
289 | goto out; | 290 | goto out; |
290 | } | 291 | } |
291 | 292 | ||
292 | sg_set_buf(&sg[0], cipher_tv[i].input, | 293 | sg_init_one(&sg[0], cipher_tv[i].input, |
293 | cipher_tv[i].ilen); | 294 | cipher_tv[i].ilen); |
294 | 295 | ||
295 | ablkcipher_request_set_crypt(req, sg, sg, | 296 | ablkcipher_request_set_crypt(req, sg, sg, |
296 | cipher_tv[i].ilen, | 297 | cipher_tv[i].ilen, |
@@ -353,6 +354,7 @@ static void test_cipher(char *algo, int enc, | |||
353 | } | 354 | } |
354 | 355 | ||
355 | temp = 0; | 356 | temp = 0; |
357 | sg_init_table(sg, cipher_tv[i].np); | ||
356 | for (k = 0; k < cipher_tv[i].np; k++) { | 358 | for (k = 0; k < cipher_tv[i].np; k++) { |
357 | memcpy(&xbuf[IDX[k]], | 359 | memcpy(&xbuf[IDX[k]], |
358 | cipher_tv[i].input + temp, | 360 | cipher_tv[i].input + temp, |
@@ -414,7 +416,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, | |||
414 | int bcount; | 416 | int bcount; |
415 | int ret; | 417 | int ret; |
416 | 418 | ||
417 | sg_set_buf(sg, p, blen); | 419 | sg_init_one(sg, p, blen); |
418 | 420 | ||
419 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 421 | for (start = jiffies, end = start + sec * HZ, bcount = 0; |
420 | time_before(jiffies, end); bcount++) { | 422 | time_before(jiffies, end); bcount++) { |
@@ -440,7 +442,7 @@ static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p, | |||
440 | int ret = 0; | 442 | int ret = 0; |
441 | int i; | 443 | int i; |
442 | 444 | ||
443 | sg_set_buf(sg, p, blen); | 445 | sg_init_one(sg, p, blen); |
444 | 446 | ||
445 | local_bh_disable(); | 447 | local_bh_disable(); |
446 | local_irq_disable(); | 448 | local_irq_disable(); |
@@ -570,6 +572,8 @@ static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen, | |||
570 | int bcount; | 572 | int bcount; |
571 | int ret; | 573 | int ret; |
572 | 574 | ||
575 | sg_init_table(sg, 1); | ||
576 | |||
573 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 577 | for (start = jiffies, end = start + sec * HZ, bcount = 0; |
574 | time_before(jiffies, end); bcount++) { | 578 | time_before(jiffies, end); bcount++) { |
575 | sg_set_buf(sg, p, blen); | 579 | sg_set_buf(sg, p, blen); |
@@ -595,6 +599,8 @@ static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen, | |||
595 | if (plen == blen) | 599 | if (plen == blen) |
596 | return test_hash_jiffies_digest(desc, p, blen, out, sec); | 600 | return test_hash_jiffies_digest(desc, p, blen, out, sec); |
597 | 601 | ||
602 | sg_init_table(sg, 1); | ||
603 | |||
598 | for (start = jiffies, end = start + sec * HZ, bcount = 0; | 604 | for (start = jiffies, end = start + sec * HZ, bcount = 0; |
599 | time_before(jiffies, end); bcount++) { | 605 | time_before(jiffies, end); bcount++) { |
600 | ret = crypto_hash_init(desc); | 606 | ret = crypto_hash_init(desc); |
@@ -626,6 +632,8 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen, | |||
626 | int i; | 632 | int i; |
627 | int ret; | 633 | int ret; |
628 | 634 | ||
635 | sg_init_table(sg, 1); | ||
636 | |||
629 | local_bh_disable(); | 637 | local_bh_disable(); |
630 | local_irq_disable(); | 638 | local_irq_disable(); |
631 | 639 | ||
@@ -677,6 +685,8 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen, | |||
677 | if (plen == blen) | 685 | if (plen == blen) |
678 | return test_hash_cycles_digest(desc, p, blen, out); | 686 | return test_hash_cycles_digest(desc, p, blen, out); |
679 | 687 | ||
688 | sg_init_table(sg, 1); | ||
689 | |||
680 | local_bh_disable(); | 690 | local_bh_disable(); |
681 | local_irq_disable(); | 691 | local_irq_disable(); |
682 | 692 | ||
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 3839efd5eaea..1538355c266b 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -194,6 +194,23 @@ static int get_date_field(char **p, u32 * value) | |||
194 | return result; | 194 | return result; |
195 | } | 195 | } |
196 | 196 | ||
197 | /* Read a possibly BCD register, always return binary */ | ||
198 | static u32 cmos_bcd_read(int offset, int rtc_control) | ||
199 | { | ||
200 | u32 val = CMOS_READ(offset); | ||
201 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
202 | BCD_TO_BIN(val); | ||
203 | return val; | ||
204 | } | ||
205 | |||
206 | /* Write binary value into possibly BCD register */ | ||
207 | static void cmos_bcd_write(u32 val, int offset, int rtc_control) | ||
208 | { | ||
209 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) | ||
210 | BIN_TO_BCD(val); | ||
211 | CMOS_WRITE(val, offset); | ||
212 | } | ||
213 | |||
197 | static ssize_t | 214 | static ssize_t |
198 | acpi_system_write_alarm(struct file *file, | 215 | acpi_system_write_alarm(struct file *file, |
199 | const char __user * buffer, size_t count, loff_t * ppos) | 216 | const char __user * buffer, size_t count, loff_t * ppos) |
@@ -258,35 +275,18 @@ acpi_system_write_alarm(struct file *file, | |||
258 | spin_lock_irq(&rtc_lock); | 275 | spin_lock_irq(&rtc_lock); |
259 | 276 | ||
260 | rtc_control = CMOS_READ(RTC_CONTROL); | 277 | rtc_control = CMOS_READ(RTC_CONTROL); |
261 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
262 | BIN_TO_BCD(yr); | ||
263 | BIN_TO_BCD(mo); | ||
264 | BIN_TO_BCD(day); | ||
265 | BIN_TO_BCD(hr); | ||
266 | BIN_TO_BCD(min); | ||
267 | BIN_TO_BCD(sec); | ||
268 | } | ||
269 | 278 | ||
270 | if (adjust) { | 279 | if (adjust) { |
271 | yr += CMOS_READ(RTC_YEAR); | 280 | yr += cmos_bcd_read(RTC_YEAR, rtc_control); |
272 | mo += CMOS_READ(RTC_MONTH); | 281 | mo += cmos_bcd_read(RTC_MONTH, rtc_control); |
273 | day += CMOS_READ(RTC_DAY_OF_MONTH); | 282 | day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control); |
274 | hr += CMOS_READ(RTC_HOURS); | 283 | hr += cmos_bcd_read(RTC_HOURS, rtc_control); |
275 | min += CMOS_READ(RTC_MINUTES); | 284 | min += cmos_bcd_read(RTC_MINUTES, rtc_control); |
276 | sec += CMOS_READ(RTC_SECONDS); | 285 | sec += cmos_bcd_read(RTC_SECONDS, rtc_control); |
277 | } | 286 | } |
278 | 287 | ||
279 | spin_unlock_irq(&rtc_lock); | 288 | spin_unlock_irq(&rtc_lock); |
280 | 289 | ||
281 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
282 | BCD_TO_BIN(yr); | ||
283 | BCD_TO_BIN(mo); | ||
284 | BCD_TO_BIN(day); | ||
285 | BCD_TO_BIN(hr); | ||
286 | BCD_TO_BIN(min); | ||
287 | BCD_TO_BIN(sec); | ||
288 | } | ||
289 | |||
290 | if (sec > 59) { | 290 | if (sec > 59) { |
291 | min++; | 291 | min++; |
292 | sec -= 60; | 292 | sec -= 60; |
@@ -307,14 +307,6 @@ acpi_system_write_alarm(struct file *file, | |||
307 | yr++; | 307 | yr++; |
308 | mo -= 12; | 308 | mo -= 12; |
309 | } | 309 | } |
310 | if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { | ||
311 | BIN_TO_BCD(yr); | ||
312 | BIN_TO_BCD(mo); | ||
313 | BIN_TO_BCD(day); | ||
314 | BIN_TO_BCD(hr); | ||
315 | BIN_TO_BCD(min); | ||
316 | BIN_TO_BCD(sec); | ||
317 | } | ||
318 | 310 | ||
319 | spin_lock_irq(&rtc_lock); | 311 | spin_lock_irq(&rtc_lock); |
320 | /* | 312 | /* |
@@ -326,9 +318,9 @@ acpi_system_write_alarm(struct file *file, | |||
326 | CMOS_READ(RTC_INTR_FLAGS); | 318 | CMOS_READ(RTC_INTR_FLAGS); |
327 | 319 | ||
328 | /* write the fields the rtc knows about */ | 320 | /* write the fields the rtc knows about */ |
329 | CMOS_WRITE(hr, RTC_HOURS_ALARM); | 321 | cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control); |
330 | CMOS_WRITE(min, RTC_MINUTES_ALARM); | 322 | cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control); |
331 | CMOS_WRITE(sec, RTC_SECONDS_ALARM); | 323 | cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control); |
332 | 324 | ||
333 | /* | 325 | /* |
334 | * If the system supports an enhanced alarm it will have non-zero | 326 | * If the system supports an enhanced alarm it will have non-zero |
@@ -336,11 +328,11 @@ acpi_system_write_alarm(struct file *file, | |||
336 | * to the RTC area of memory. | 328 | * to the RTC area of memory. |
337 | */ | 329 | */ |
338 | if (acpi_gbl_FADT.day_alarm) | 330 | if (acpi_gbl_FADT.day_alarm) |
339 | CMOS_WRITE(day, acpi_gbl_FADT.day_alarm); | 331 | cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control); |
340 | if (acpi_gbl_FADT.month_alarm) | 332 | if (acpi_gbl_FADT.month_alarm) |
341 | CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm); | 333 | cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control); |
342 | if (acpi_gbl_FADT.century) | 334 | if (acpi_gbl_FADT.century) |
343 | CMOS_WRITE(yr / 100, acpi_gbl_FADT.century); | 335 | cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control); |
344 | /* enable the rtc alarm interrupt */ | 336 | /* enable the rtc alarm interrupt */ |
345 | rtc_control |= RTC_AIE; | 337 | rtc_control |= RTC_AIE; |
346 | CMOS_WRITE(rtc_control, RTC_CONTROL); | 338 | CMOS_WRITE(rtc_control, RTC_CONTROL); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 95229e77bffe..ed9b407e42d4 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
42 | #include <linux/dma-mapping.h> | 42 | #include <linux/dma-mapping.h> |
43 | #include <linux/device.h> | 43 | #include <linux/device.h> |
44 | #include <linux/dmi.h> | ||
44 | #include <scsi/scsi_host.h> | 45 | #include <scsi/scsi_host.h> |
45 | #include <scsi/scsi_cmnd.h> | 46 | #include <scsi/scsi_cmnd.h> |
46 | #include <linux/libata.h> | 47 | #include <linux/libata.h> |
@@ -48,6 +49,9 @@ | |||
48 | #define DRV_NAME "ahci" | 49 | #define DRV_NAME "ahci" |
49 | #define DRV_VERSION "3.0" | 50 | #define DRV_VERSION "3.0" |
50 | 51 | ||
52 | static int ahci_enable_alpm(struct ata_port *ap, | ||
53 | enum link_pm policy); | ||
54 | static void ahci_disable_alpm(struct ata_port *ap); | ||
51 | 55 | ||
52 | enum { | 56 | enum { |
53 | AHCI_PCI_BAR = 5, | 57 | AHCI_PCI_BAR = 5, |
@@ -98,6 +102,7 @@ enum { | |||
98 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ | 102 | HOST_CAP_SSC = (1 << 14), /* Slumber capable */ |
99 | HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ | 103 | HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ |
100 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ | 104 | HOST_CAP_CLO = (1 << 24), /* Command List Override support */ |
105 | HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ | ||
101 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ | 106 | HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ |
102 | HOST_CAP_SNTF = (1 << 29), /* SNotification register */ | 107 | HOST_CAP_SNTF = (1 << 29), /* SNotification register */ |
103 | HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ | 108 | HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ |
@@ -154,6 +159,8 @@ enum { | |||
154 | PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, | 159 | PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, |
155 | 160 | ||
156 | /* PORT_CMD bits */ | 161 | /* PORT_CMD bits */ |
162 | PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ | ||
163 | PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ | ||
157 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ | 164 | PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ |
158 | PORT_CMD_PMP = (1 << 17), /* PMP attached */ | 165 | PORT_CMD_PMP = (1 << 17), /* PMP attached */ |
159 | PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ | 166 | PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ |
@@ -177,13 +184,14 @@ enum { | |||
177 | AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ | 184 | AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ |
178 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ | 185 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ |
179 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | 186 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ |
187 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | ||
180 | 188 | ||
181 | /* ap->flags bits */ | 189 | /* ap->flags bits */ |
182 | AHCI_FLAG_NO_HOTPLUG = (1 << 24), /* ignore PxSERR.DIAG.N */ | ||
183 | 190 | ||
184 | AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 191 | AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
185 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 192 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
186 | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN, | 193 | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | |
194 | ATA_FLAG_IPM, | ||
187 | AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, | 195 | AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY, |
188 | }; | 196 | }; |
189 | 197 | ||
@@ -241,6 +249,7 @@ static void ahci_pmp_attach(struct ata_port *ap); | |||
241 | static void ahci_pmp_detach(struct ata_port *ap); | 249 | static void ahci_pmp_detach(struct ata_port *ap); |
242 | static void ahci_error_handler(struct ata_port *ap); | 250 | static void ahci_error_handler(struct ata_port *ap); |
243 | static void ahci_vt8251_error_handler(struct ata_port *ap); | 251 | static void ahci_vt8251_error_handler(struct ata_port *ap); |
252 | static void ahci_p5wdh_error_handler(struct ata_port *ap); | ||
244 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 253 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
245 | static int ahci_port_resume(struct ata_port *ap); | 254 | static int ahci_port_resume(struct ata_port *ap); |
246 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); | 255 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); |
@@ -252,6 +261,11 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); | |||
252 | static int ahci_pci_device_resume(struct pci_dev *pdev); | 261 | static int ahci_pci_device_resume(struct pci_dev *pdev); |
253 | #endif | 262 | #endif |
254 | 263 | ||
264 | static struct class_device_attribute *ahci_shost_attrs[] = { | ||
265 | &class_device_attr_link_power_management_policy, | ||
266 | NULL | ||
267 | }; | ||
268 | |||
255 | static struct scsi_host_template ahci_sht = { | 269 | static struct scsi_host_template ahci_sht = { |
256 | .module = THIS_MODULE, | 270 | .module = THIS_MODULE, |
257 | .name = DRV_NAME, | 271 | .name = DRV_NAME, |
@@ -269,6 +283,7 @@ static struct scsi_host_template ahci_sht = { | |||
269 | .slave_configure = ata_scsi_slave_config, | 283 | .slave_configure = ata_scsi_slave_config, |
270 | .slave_destroy = ata_scsi_slave_destroy, | 284 | .slave_destroy = ata_scsi_slave_destroy, |
271 | .bios_param = ata_std_bios_param, | 285 | .bios_param = ata_std_bios_param, |
286 | .shost_attrs = ahci_shost_attrs, | ||
272 | }; | 287 | }; |
273 | 288 | ||
274 | static const struct ata_port_operations ahci_ops = { | 289 | static const struct ata_port_operations ahci_ops = { |
@@ -300,6 +315,8 @@ static const struct ata_port_operations ahci_ops = { | |||
300 | .port_suspend = ahci_port_suspend, | 315 | .port_suspend = ahci_port_suspend, |
301 | .port_resume = ahci_port_resume, | 316 | .port_resume = ahci_port_resume, |
302 | #endif | 317 | #endif |
318 | .enable_pm = ahci_enable_alpm, | ||
319 | .disable_pm = ahci_disable_alpm, | ||
303 | 320 | ||
304 | .port_start = ahci_port_start, | 321 | .port_start = ahci_port_start, |
305 | .port_stop = ahci_port_stop, | 322 | .port_stop = ahci_port_stop, |
@@ -339,6 +356,40 @@ static const struct ata_port_operations ahci_vt8251_ops = { | |||
339 | .port_stop = ahci_port_stop, | 356 | .port_stop = ahci_port_stop, |
340 | }; | 357 | }; |
341 | 358 | ||
359 | static const struct ata_port_operations ahci_p5wdh_ops = { | ||
360 | .check_status = ahci_check_status, | ||
361 | .check_altstatus = ahci_check_status, | ||
362 | .dev_select = ata_noop_dev_select, | ||
363 | |||
364 | .tf_read = ahci_tf_read, | ||
365 | |||
366 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | ||
367 | .qc_prep = ahci_qc_prep, | ||
368 | .qc_issue = ahci_qc_issue, | ||
369 | |||
370 | .irq_clear = ahci_irq_clear, | ||
371 | |||
372 | .scr_read = ahci_scr_read, | ||
373 | .scr_write = ahci_scr_write, | ||
374 | |||
375 | .freeze = ahci_freeze, | ||
376 | .thaw = ahci_thaw, | ||
377 | |||
378 | .error_handler = ahci_p5wdh_error_handler, | ||
379 | .post_internal_cmd = ahci_post_internal_cmd, | ||
380 | |||
381 | .pmp_attach = ahci_pmp_attach, | ||
382 | .pmp_detach = ahci_pmp_detach, | ||
383 | |||
384 | #ifdef CONFIG_PM | ||
385 | .port_suspend = ahci_port_suspend, | ||
386 | .port_resume = ahci_port_resume, | ||
387 | #endif | ||
388 | |||
389 | .port_start = ahci_port_start, | ||
390 | .port_stop = ahci_port_stop, | ||
391 | }; | ||
392 | |||
342 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) | 393 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) |
343 | 394 | ||
344 | static const struct ata_port_info ahci_port_info[] = { | 395 | static const struct ata_port_info ahci_port_info[] = { |
@@ -800,6 +851,130 @@ static void ahci_power_up(struct ata_port *ap) | |||
800 | writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); | 851 | writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); |
801 | } | 852 | } |
802 | 853 | ||
854 | static void ahci_disable_alpm(struct ata_port *ap) | ||
855 | { | ||
856 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
857 | void __iomem *port_mmio = ahci_port_base(ap); | ||
858 | u32 cmd; | ||
859 | struct ahci_port_priv *pp = ap->private_data; | ||
860 | |||
861 | /* IPM bits should be disabled by libata-core */ | ||
862 | /* get the existing command bits */ | ||
863 | cmd = readl(port_mmio + PORT_CMD); | ||
864 | |||
865 | /* disable ALPM and ASP */ | ||
866 | cmd &= ~PORT_CMD_ASP; | ||
867 | cmd &= ~PORT_CMD_ALPE; | ||
868 | |||
869 | /* force the interface back to active */ | ||
870 | cmd |= PORT_CMD_ICC_ACTIVE; | ||
871 | |||
872 | /* write out new cmd value */ | ||
873 | writel(cmd, port_mmio + PORT_CMD); | ||
874 | cmd = readl(port_mmio + PORT_CMD); | ||
875 | |||
876 | /* wait 10ms to be sure we've come out of any low power state */ | ||
877 | msleep(10); | ||
878 | |||
879 | /* clear out any PhyRdy stuff from interrupt status */ | ||
880 | writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); | ||
881 | |||
882 | /* go ahead and clean out PhyRdy Change from Serror too */ | ||
883 | ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); | ||
884 | |||
885 | /* | ||
886 | * Clear flag to indicate that we should ignore all PhyRdy | ||
887 | * state changes | ||
888 | */ | ||
889 | hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; | ||
890 | |||
891 | /* | ||
892 | * Enable interrupts on Phy Ready. | ||
893 | */ | ||
894 | pp->intr_mask |= PORT_IRQ_PHYRDY; | ||
895 | writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); | ||
896 | |||
897 | /* | ||
898 | * don't change the link pm policy - we can be called | ||
899 | * just to turn of link pm temporarily | ||
900 | */ | ||
901 | } | ||
902 | |||
903 | static int ahci_enable_alpm(struct ata_port *ap, | ||
904 | enum link_pm policy) | ||
905 | { | ||
906 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
907 | void __iomem *port_mmio = ahci_port_base(ap); | ||
908 | u32 cmd; | ||
909 | struct ahci_port_priv *pp = ap->private_data; | ||
910 | u32 asp; | ||
911 | |||
912 | /* Make sure the host is capable of link power management */ | ||
913 | if (!(hpriv->cap & HOST_CAP_ALPM)) | ||
914 | return -EINVAL; | ||
915 | |||
916 | switch (policy) { | ||
917 | case MAX_PERFORMANCE: | ||
918 | case NOT_AVAILABLE: | ||
919 | /* | ||
920 | * if we came here with NOT_AVAILABLE, | ||
921 | * it just means this is the first time we | ||
922 | * have tried to enable - default to max performance, | ||
923 | * and let the user go to lower power modes on request. | ||
924 | */ | ||
925 | ahci_disable_alpm(ap); | ||
926 | return 0; | ||
927 | case MIN_POWER: | ||
928 | /* configure HBA to enter SLUMBER */ | ||
929 | asp = PORT_CMD_ASP; | ||
930 | break; | ||
931 | case MEDIUM_POWER: | ||
932 | /* configure HBA to enter PARTIAL */ | ||
933 | asp = 0; | ||
934 | break; | ||
935 | default: | ||
936 | return -EINVAL; | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Disable interrupts on Phy Ready. This keeps us from | ||
941 | * getting woken up due to spurious phy ready interrupts | ||
942 | * TBD - Hot plug should be done via polling now, is | ||
943 | * that even supported? | ||
944 | */ | ||
945 | pp->intr_mask &= ~PORT_IRQ_PHYRDY; | ||
946 | writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); | ||
947 | |||
948 | /* | ||
949 | * Set a flag to indicate that we should ignore all PhyRdy | ||
950 | * state changes since these can happen now whenever we | ||
951 | * change link state | ||
952 | */ | ||
953 | hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; | ||
954 | |||
955 | /* get the existing command bits */ | ||
956 | cmd = readl(port_mmio + PORT_CMD); | ||
957 | |||
958 | /* | ||
959 | * Set ASP based on Policy | ||
960 | */ | ||
961 | cmd |= asp; | ||
962 | |||
963 | /* | ||
964 | * Setting this bit will instruct the HBA to aggressively | ||
965 | * enter a lower power link state when it's appropriate and | ||
966 | * based on the value set above for ASP | ||
967 | */ | ||
968 | cmd |= PORT_CMD_ALPE; | ||
969 | |||
970 | /* write out new cmd value */ | ||
971 | writel(cmd, port_mmio + PORT_CMD); | ||
972 | cmd = readl(port_mmio + PORT_CMD); | ||
973 | |||
974 | /* IPM bits should be set by libata-core */ | ||
975 | return 0; | ||
976 | } | ||
977 | |||
803 | #ifdef CONFIG_PM | 978 | #ifdef CONFIG_PM |
804 | static void ahci_power_down(struct ata_port *ap) | 979 | static void ahci_power_down(struct ata_port *ap) |
805 | { | 980 | { |
@@ -862,8 +1037,10 @@ static int ahci_reset_controller(struct ata_host *host) | |||
862 | * AHCI-specific, such as HOST_RESET. | 1037 | * AHCI-specific, such as HOST_RESET. |
863 | */ | 1038 | */ |
864 | tmp = readl(mmio + HOST_CTL); | 1039 | tmp = readl(mmio + HOST_CTL); |
865 | if (!(tmp & HOST_AHCI_EN)) | 1040 | if (!(tmp & HOST_AHCI_EN)) { |
866 | writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL); | 1041 | tmp |= HOST_AHCI_EN; |
1042 | writel(tmp, mmio + HOST_CTL); | ||
1043 | } | ||
867 | 1044 | ||
868 | /* global controller reset */ | 1045 | /* global controller reset */ |
869 | if ((tmp & HOST_RESET) == 0) { | 1046 | if ((tmp & HOST_RESET) == 0) { |
@@ -1117,15 +1294,8 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class, | |||
1117 | tf.ctl &= ~ATA_SRST; | 1294 | tf.ctl &= ~ATA_SRST; |
1118 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); | 1295 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); |
1119 | 1296 | ||
1120 | /* spec mandates ">= 2ms" before checking status. | 1297 | /* wait a while before checking status */ |
1121 | * We wait 150ms, because that was the magic delay used for | 1298 | ata_wait_after_reset(ap, deadline); |
1122 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time | ||
1123 | * between when the ATA command register is written, and then | ||
1124 | * status is checked. Because waiting for "a while" before | ||
1125 | * checking status is fine, post SRST, we perform this magic | ||
1126 | * delay here as well. | ||
1127 | */ | ||
1128 | msleep(150); | ||
1129 | 1299 | ||
1130 | rc = ata_wait_ready(ap, deadline); | 1300 | rc = ata_wait_ready(ap, deadline); |
1131 | /* link occupied, -ENODEV too is an error */ | 1301 | /* link occupied, -ENODEV too is an error */ |
@@ -1213,6 +1383,53 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | |||
1213 | return rc ?: -EAGAIN; | 1383 | return rc ?: -EAGAIN; |
1214 | } | 1384 | } |
1215 | 1385 | ||
1386 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | ||
1387 | unsigned long deadline) | ||
1388 | { | ||
1389 | struct ata_port *ap = link->ap; | ||
1390 | struct ahci_port_priv *pp = ap->private_data; | ||
1391 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | ||
1392 | struct ata_taskfile tf; | ||
1393 | int rc; | ||
1394 | |||
1395 | ahci_stop_engine(ap); | ||
1396 | |||
1397 | /* clear D2H reception area to properly wait for D2H FIS */ | ||
1398 | ata_tf_init(link->device, &tf); | ||
1399 | tf.command = 0x80; | ||
1400 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | ||
1401 | |||
1402 | rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), | ||
1403 | deadline); | ||
1404 | |||
1405 | ahci_start_engine(ap); | ||
1406 | |||
1407 | if (rc || ata_link_offline(link)) | ||
1408 | return rc; | ||
1409 | |||
1410 | /* spec mandates ">= 2ms" before checking status */ | ||
1411 | msleep(150); | ||
1412 | |||
1413 | /* The pseudo configuration device on SIMG4726 attached to | ||
1414 | * ASUS P5W-DH Deluxe doesn't send signature FIS after | ||
1415 | * hardreset if no device is attached to the first downstream | ||
1416 | * port && the pseudo device locks up on SRST w/ PMP==0. To | ||
1417 | * work around this, wait for !BSY only briefly. If BSY isn't | ||
1418 | * cleared, perform CLO and proceed to IDENTIFY (achieved by | ||
1419 | * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). | ||
1420 | * | ||
1421 | * Wait for two seconds. Devices attached to downstream port | ||
1422 | * which can't process the following IDENTIFY after this will | ||
1423 | * have to be reset again. For most cases, this should | ||
1424 | * suffice while making probing snappish enough. | ||
1425 | */ | ||
1426 | rc = ata_wait_ready(ap, jiffies + 2 * HZ); | ||
1427 | if (rc) | ||
1428 | ahci_kick_engine(ap, 0); | ||
1429 | |||
1430 | return 0; | ||
1431 | } | ||
1432 | |||
1216 | static void ahci_postreset(struct ata_link *link, unsigned int *class) | 1433 | static void ahci_postreset(struct ata_link *link, unsigned int *class) |
1217 | { | 1434 | { |
1218 | struct ata_port *ap = link->ap; | 1435 | struct ata_port *ap = link->ap; |
@@ -1426,6 +1643,17 @@ static void ahci_port_intr(struct ata_port *ap) | |||
1426 | if (unlikely(resetting)) | 1643 | if (unlikely(resetting)) |
1427 | status &= ~PORT_IRQ_BAD_PMP; | 1644 | status &= ~PORT_IRQ_BAD_PMP; |
1428 | 1645 | ||
1646 | /* If we are getting PhyRdy, this is | ||
1647 | * just a power state change, we should | ||
1648 | * clear out this, plus the PhyRdy/Comm | ||
1649 | * Wake bits from Serror | ||
1650 | */ | ||
1651 | if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && | ||
1652 | (status & PORT_IRQ_PHYRDY)) { | ||
1653 | status &= ~PORT_IRQ_PHYRDY; | ||
1654 | ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); | ||
1655 | } | ||
1656 | |||
1429 | if (unlikely(status & PORT_IRQ_ERROR)) { | 1657 | if (unlikely(status & PORT_IRQ_ERROR)) { |
1430 | ahci_error_intr(ap, status); | 1658 | ahci_error_intr(ap, status); |
1431 | return; | 1659 | return; |
@@ -1670,6 +1898,19 @@ static void ahci_vt8251_error_handler(struct ata_port *ap) | |||
1670 | ahci_postreset); | 1898 | ahci_postreset); |
1671 | } | 1899 | } |
1672 | 1900 | ||
1901 | static void ahci_p5wdh_error_handler(struct ata_port *ap) | ||
1902 | { | ||
1903 | if (!(ap->pflags & ATA_PFLAG_FROZEN)) { | ||
1904 | /* restart engine */ | ||
1905 | ahci_stop_engine(ap); | ||
1906 | ahci_start_engine(ap); | ||
1907 | } | ||
1908 | |||
1909 | /* perform recovery */ | ||
1910 | ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset, | ||
1911 | ahci_postreset); | ||
1912 | } | ||
1913 | |||
1673 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) | 1914 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) |
1674 | { | 1915 | { |
1675 | struct ata_port *ap = qc->ap; | 1916 | struct ata_port *ap = qc->ap; |
@@ -1955,6 +2196,51 @@ static void ahci_print_info(struct ata_host *host) | |||
1955 | ); | 2196 | ); |
1956 | } | 2197 | } |
1957 | 2198 | ||
2199 | /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is | ||
2200 | * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't | ||
2201 | * support PMP and the 4726 either directly exports the device | ||
2202 | * attached to the first downstream port or acts as a hardware storage | ||
2203 | * controller and emulate a single ATA device (can be RAID 0/1 or some | ||
2204 | * other configuration). | ||
2205 | * | ||
2206 | * When there's no device attached to the first downstream port of the | ||
2207 | * 4726, "Config Disk" appears, which is a pseudo ATA device to | ||
2208 | * configure the 4726. However, ATA emulation of the device is very | ||
2209 | * lame. It doesn't send signature D2H Reg FIS after the initial | ||
2210 | * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. | ||
2211 | * | ||
2212 | * The following function works around the problem by always using | ||
2213 | * hardreset on the port and not depending on receiving signature FIS | ||
2214 | * afterward. If signature FIS isn't received soon, ATA class is | ||
2215 | * assumed without follow-up softreset. | ||
2216 | */ | ||
2217 | static void ahci_p5wdh_workaround(struct ata_host *host) | ||
2218 | { | ||
2219 | static struct dmi_system_id sysids[] = { | ||
2220 | { | ||
2221 | .ident = "P5W DH Deluxe", | ||
2222 | .matches = { | ||
2223 | DMI_MATCH(DMI_SYS_VENDOR, | ||
2224 | "ASUSTEK COMPUTER INC"), | ||
2225 | DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), | ||
2226 | }, | ||
2227 | }, | ||
2228 | { } | ||
2229 | }; | ||
2230 | struct pci_dev *pdev = to_pci_dev(host->dev); | ||
2231 | |||
2232 | if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && | ||
2233 | dmi_check_system(sysids)) { | ||
2234 | struct ata_port *ap = host->ports[1]; | ||
2235 | |||
2236 | dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " | ||
2237 | "Deluxe on-board SIMG4726 workaround\n"); | ||
2238 | |||
2239 | ap->ops = &ahci_p5wdh_ops; | ||
2240 | ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; | ||
2241 | } | ||
2242 | } | ||
2243 | |||
1958 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2244 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1959 | { | 2245 | { |
1960 | static int printed_version; | 2246 | static int printed_version; |
@@ -2015,6 +2301,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2015 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, | 2301 | ata_port_pbar_desc(ap, AHCI_PCI_BAR, |
2016 | 0x100 + ap->port_no * 0x80, "port"); | 2302 | 0x100 + ap->port_no * 0x80, "port"); |
2017 | 2303 | ||
2304 | /* set initial link pm policy */ | ||
2305 | ap->pm_policy = NOT_AVAILABLE; | ||
2306 | |||
2018 | /* standard SATA port setup */ | 2307 | /* standard SATA port setup */ |
2019 | if (hpriv->port_map & (1 << i)) | 2308 | if (hpriv->port_map & (1 << i)) |
2020 | ap->ioaddr.cmd_addr = port_mmio; | 2309 | ap->ioaddr.cmd_addr = port_mmio; |
@@ -2024,6 +2313,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2024 | ap->ops = &ata_dummy_port_ops; | 2313 | ap->ops = &ata_dummy_port_ops; |
2025 | } | 2314 | } |
2026 | 2315 | ||
2316 | /* apply workaround for ASUS P5W DH Deluxe mainboard */ | ||
2317 | ahci_p5wdh_workaround(host); | ||
2318 | |||
2027 | /* initialize adapter */ | 2319 | /* initialize adapter */ |
2028 | rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); | 2320 | rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); |
2029 | if (rc) | 2321 | if (rc) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2d147b51c978..63035d71a61a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -68,7 +68,8 @@ const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; | |||
68 | static unsigned int ata_dev_init_params(struct ata_device *dev, | 68 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
69 | u16 heads, u16 sectors); | 69 | u16 heads, u16 sectors); |
70 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); | 70 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
71 | static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable); | 71 | static unsigned int ata_dev_set_feature(struct ata_device *dev, |
72 | u8 enable, u8 feature); | ||
72 | static void ata_dev_xfermask(struct ata_device *dev); | 73 | static void ata_dev_xfermask(struct ata_device *dev); |
73 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); | 74 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); |
74 | 75 | ||
@@ -619,6 +620,177 @@ void ata_dev_disable(struct ata_device *dev) | |||
619 | } | 620 | } |
620 | } | 621 | } |
621 | 622 | ||
623 | static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) | ||
624 | { | ||
625 | struct ata_link *link = dev->link; | ||
626 | struct ata_port *ap = link->ap; | ||
627 | u32 scontrol; | ||
628 | unsigned int err_mask; | ||
629 | int rc; | ||
630 | |||
631 | /* | ||
632 | * disallow DIPM for drivers which haven't set | ||
633 | * ATA_FLAG_IPM. This is because when DIPM is enabled, | ||
634 | * phy ready will be set in the interrupt status on | ||
635 | * state changes, which will cause some drivers to | ||
636 | * think there are errors - additionally drivers will | ||
637 | * need to disable hot plug. | ||
638 | */ | ||
639 | if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { | ||
640 | ap->pm_policy = NOT_AVAILABLE; | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * For DIPM, we will only enable it for the | ||
646 | * min_power setting. | ||
647 | * | ||
648 | * Why? Because Disks are too stupid to know that | ||
649 | * If the host rejects a request to go to SLUMBER | ||
650 | * they should retry at PARTIAL, and instead it | ||
651 | * just would give up. So, for medium_power to | ||
652 | * work at all, we need to only allow HIPM. | ||
653 | */ | ||
654 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); | ||
655 | if (rc) | ||
656 | return rc; | ||
657 | |||
658 | switch (policy) { | ||
659 | case MIN_POWER: | ||
660 | /* no restrictions on IPM transitions */ | ||
661 | scontrol &= ~(0x3 << 8); | ||
662 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
663 | if (rc) | ||
664 | return rc; | ||
665 | |||
666 | /* enable DIPM */ | ||
667 | if (dev->flags & ATA_DFLAG_DIPM) | ||
668 | err_mask = ata_dev_set_feature(dev, | ||
669 | SETFEATURES_SATA_ENABLE, SATA_DIPM); | ||
670 | break; | ||
671 | case MEDIUM_POWER: | ||
672 | /* allow IPM to PARTIAL */ | ||
673 | scontrol &= ~(0x1 << 8); | ||
674 | scontrol |= (0x2 << 8); | ||
675 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
676 | if (rc) | ||
677 | return rc; | ||
678 | |||
679 | /* disable DIPM */ | ||
680 | if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM)) | ||
681 | err_mask = ata_dev_set_feature(dev, | ||
682 | SETFEATURES_SATA_DISABLE, SATA_DIPM); | ||
683 | break; | ||
684 | case NOT_AVAILABLE: | ||
685 | case MAX_PERFORMANCE: | ||
686 | /* disable all IPM transitions */ | ||
687 | scontrol |= (0x3 << 8); | ||
688 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); | ||
689 | if (rc) | ||
690 | return rc; | ||
691 | |||
692 | /* disable DIPM */ | ||
693 | if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM)) | ||
694 | err_mask = ata_dev_set_feature(dev, | ||
695 | SETFEATURES_SATA_DISABLE, SATA_DIPM); | ||
696 | break; | ||
697 | } | ||
698 | |||
699 | /* FIXME: handle SET FEATURES failure */ | ||
700 | (void) err_mask; | ||
701 | |||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | /** | ||
706 | * ata_dev_enable_pm - enable SATA interface power management | ||
707 | * @device - device to enable ipm for | ||
708 | * @policy - the link power management policy | ||
709 | * | ||
710 | * Enable SATA Interface power management. This will enable | ||
711 | * Device Interface Power Management (DIPM) for min_power | ||
712 | * policy, and then call driver specific callbacks for | ||
713 | * enabling Host Initiated Power management. | ||
714 | * | ||
715 | * Locking: Caller. | ||
716 | * Returns: -EINVAL if IPM is not supported, 0 otherwise. | ||
717 | */ | ||
718 | void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) | ||
719 | { | ||
720 | int rc = 0; | ||
721 | struct ata_port *ap = dev->link->ap; | ||
722 | |||
723 | /* set HIPM first, then DIPM */ | ||
724 | if (ap->ops->enable_pm) | ||
725 | rc = ap->ops->enable_pm(ap, policy); | ||
726 | if (rc) | ||
727 | goto enable_pm_out; | ||
728 | rc = ata_dev_set_dipm(dev, policy); | ||
729 | |||
730 | enable_pm_out: | ||
731 | if (rc) | ||
732 | ap->pm_policy = MAX_PERFORMANCE; | ||
733 | else | ||
734 | ap->pm_policy = policy; | ||
735 | return /* rc */; /* hopefully we can use 'rc' eventually */ | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * ata_dev_disable_pm - disable SATA interface power management | ||
740 | * @device - device to enable ipm for | ||
741 | * | ||
742 | * Disable SATA Interface power management. This will disable | ||
743 | * Device Interface Power Management (DIPM) without changing | ||
744 | * policy, call driver specific callbacks for disabling Host | ||
745 | * Initiated Power management. | ||
746 | * | ||
747 | * Locking: Caller. | ||
748 | * Returns: void | ||
749 | */ | ||
750 | static void ata_dev_disable_pm(struct ata_device *dev) | ||
751 | { | ||
752 | struct ata_port *ap = dev->link->ap; | ||
753 | |||
754 | ata_dev_set_dipm(dev, MAX_PERFORMANCE); | ||
755 | if (ap->ops->disable_pm) | ||
756 | ap->ops->disable_pm(ap); | ||
757 | } | ||
758 | |||
759 | void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) | ||
760 | { | ||
761 | ap->pm_policy = policy; | ||
762 | ap->link.eh_info.action |= ATA_EHI_LPM; | ||
763 | ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; | ||
764 | ata_port_schedule_eh(ap); | ||
765 | } | ||
766 | |||
767 | static void ata_lpm_enable(struct ata_host *host) | ||
768 | { | ||
769 | struct ata_link *link; | ||
770 | struct ata_port *ap; | ||
771 | struct ata_device *dev; | ||
772 | int i; | ||
773 | |||
774 | for (i = 0; i < host->n_ports; i++) { | ||
775 | ap = host->ports[i]; | ||
776 | ata_port_for_each_link(link, ap) { | ||
777 | ata_link_for_each_dev(dev, link) | ||
778 | ata_dev_disable_pm(dev); | ||
779 | } | ||
780 | } | ||
781 | } | ||
782 | |||
783 | static void ata_lpm_disable(struct ata_host *host) | ||
784 | { | ||
785 | int i; | ||
786 | |||
787 | for (i = 0; i < host->n_ports; i++) { | ||
788 | struct ata_port *ap = host->ports[i]; | ||
789 | ata_lpm_schedule(ap, ap->pm_policy); | ||
790 | } | ||
791 | } | ||
792 | |||
793 | |||
622 | /** | 794 | /** |
623 | * ata_devchk - PATA device presence detection | 795 | * ata_devchk - PATA device presence detection |
624 | * @ap: ATA channel to examine | 796 | * @ap: ATA channel to examine |
@@ -1799,13 +1971,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
1799 | * SET_FEATURES spin-up subcommand before it will accept | 1971 | * SET_FEATURES spin-up subcommand before it will accept |
1800 | * anything other than the original IDENTIFY command. | 1972 | * anything other than the original IDENTIFY command. |
1801 | */ | 1973 | */ |
1802 | ata_tf_init(dev, &tf); | 1974 | err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); |
1803 | tf.command = ATA_CMD_SET_FEATURES; | ||
1804 | tf.feature = SETFEATURES_SPINUP; | ||
1805 | tf.protocol = ATA_PROT_NODATA; | ||
1806 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | ||
1807 | err_mask = ata_exec_internal(dev, &tf, NULL, | ||
1808 | DMA_NONE, NULL, 0, 0); | ||
1809 | if (err_mask && id[2] != 0x738c) { | 1975 | if (err_mask && id[2] != 0x738c) { |
1810 | rc = -EIO; | 1976 | rc = -EIO; |
1811 | reason = "SPINUP failed"; | 1977 | reason = "SPINUP failed"; |
@@ -2075,7 +2241,8 @@ int ata_dev_configure(struct ata_device *dev) | |||
2075 | unsigned int err_mask; | 2241 | unsigned int err_mask; |
2076 | 2242 | ||
2077 | /* issue SET feature command to turn this on */ | 2243 | /* issue SET feature command to turn this on */ |
2078 | err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE); | 2244 | err_mask = ata_dev_set_feature(dev, |
2245 | SETFEATURES_SATA_ENABLE, SATA_AN); | ||
2079 | if (err_mask) | 2246 | if (err_mask) |
2080 | ata_dev_printk(dev, KERN_ERR, | 2247 | ata_dev_printk(dev, KERN_ERR, |
2081 | "failed to enable ATAPI AN " | 2248 | "failed to enable ATAPI AN " |
@@ -2105,6 +2272,13 @@ int ata_dev_configure(struct ata_device *dev) | |||
2105 | if (dev->flags & ATA_DFLAG_LBA48) | 2272 | if (dev->flags & ATA_DFLAG_LBA48) |
2106 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | 2273 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
2107 | 2274 | ||
2275 | if (!(dev->horkage & ATA_HORKAGE_IPM)) { | ||
2276 | if (ata_id_has_hipm(dev->id)) | ||
2277 | dev->flags |= ATA_DFLAG_HIPM; | ||
2278 | if (ata_id_has_dipm(dev->id)) | ||
2279 | dev->flags |= ATA_DFLAG_DIPM; | ||
2280 | } | ||
2281 | |||
2108 | if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { | 2282 | if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { |
2109 | /* Let the user know. We don't want to disallow opens for | 2283 | /* Let the user know. We don't want to disallow opens for |
2110 | rescue purposes, or in case the vendor is just a blithering | 2284 | rescue purposes, or in case the vendor is just a blithering |
@@ -2130,6 +2304,13 @@ int ata_dev_configure(struct ata_device *dev) | |||
2130 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2304 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2131 | dev->max_sectors); | 2305 | dev->max_sectors); |
2132 | 2306 | ||
2307 | if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { | ||
2308 | dev->horkage |= ATA_HORKAGE_IPM; | ||
2309 | |||
2310 | /* reset link pm_policy for this port to no pm */ | ||
2311 | ap->pm_policy = MAX_PERFORMANCE; | ||
2312 | } | ||
2313 | |||
2133 | if (ap->ops->dev_config) | 2314 | if (ap->ops->dev_config) |
2134 | ap->ops->dev_config(dev); | 2315 | ap->ops->dev_config(dev); |
2135 | 2316 | ||
@@ -2223,6 +2404,25 @@ int ata_bus_probe(struct ata_port *ap) | |||
2223 | tries[dev->devno] = ATA_PROBE_MAX_TRIES; | 2404 | tries[dev->devno] = ATA_PROBE_MAX_TRIES; |
2224 | 2405 | ||
2225 | retry: | 2406 | retry: |
2407 | ata_link_for_each_dev(dev, &ap->link) { | ||
2408 | /* If we issue an SRST then an ATA drive (not ATAPI) | ||
2409 | * may change configuration and be in PIO0 timing. If | ||
2410 | * we do a hard reset (or are coming from power on) | ||
2411 | * this is true for ATA or ATAPI. Until we've set a | ||
2412 | * suitable controller mode we should not touch the | ||
2413 | * bus as we may be talking too fast. | ||
2414 | */ | ||
2415 | dev->pio_mode = XFER_PIO_0; | ||
2416 | |||
2417 | /* If the controller has a pio mode setup function | ||
2418 | * then use it to set the chipset to rights. Don't | ||
2419 | * touch the DMA setup as that will be dealt with when | ||
2420 | * configuring devices. | ||
2421 | */ | ||
2422 | if (ap->ops->set_piomode) | ||
2423 | ap->ops->set_piomode(ap, dev); | ||
2424 | } | ||
2425 | |||
2226 | /* reset and determine device classes */ | 2426 | /* reset and determine device classes */ |
2227 | ap->ops->phy_reset(ap); | 2427 | ap->ops->phy_reset(ap); |
2228 | 2428 | ||
@@ -2238,12 +2438,6 @@ int ata_bus_probe(struct ata_port *ap) | |||
2238 | 2438 | ||
2239 | ata_port_probe(ap); | 2439 | ata_port_probe(ap); |
2240 | 2440 | ||
2241 | /* after the reset the device state is PIO 0 and the controller | ||
2242 | state is undefined. Record the mode */ | ||
2243 | |||
2244 | ata_link_for_each_dev(dev, &ap->link) | ||
2245 | dev->pio_mode = XFER_PIO_0; | ||
2246 | |||
2247 | /* read IDENTIFY page and configure devices. We have to do the identify | 2441 | /* read IDENTIFY page and configure devices. We have to do the identify |
2248 | specific sequence bass-ackwards so that PDIAG- is released by | 2442 | specific sequence bass-ackwards so that PDIAG- is released by |
2249 | the slave device */ | 2443 | the slave device */ |
@@ -2886,6 +3080,13 @@ static int ata_dev_set_mode(struct ata_device *dev) | |||
2886 | dev->pio_mode <= XFER_PIO_2) | 3080 | dev->pio_mode <= XFER_PIO_2) |
2887 | err_mask &= ~AC_ERR_DEV; | 3081 | err_mask &= ~AC_ERR_DEV; |
2888 | 3082 | ||
3083 | /* Early MWDMA devices do DMA but don't allow DMA mode setting. | ||
3084 | Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ | ||
3085 | if (dev->xfer_shift == ATA_SHIFT_MWDMA && | ||
3086 | dev->dma_mode == XFER_MW_DMA_0 && | ||
3087 | (dev->id[63] >> 8) & 1) | ||
3088 | err_mask &= ~AC_ERR_DEV; | ||
3089 | |||
2889 | if (err_mask) { | 3090 | if (err_mask) { |
2890 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " | 3091 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " |
2891 | "(err_mask=0x%x)\n", err_mask); | 3092 | "(err_mask=0x%x)\n", err_mask); |
@@ -3115,6 +3316,55 @@ int ata_busy_sleep(struct ata_port *ap, | |||
3115 | } | 3316 | } |
3116 | 3317 | ||
3117 | /** | 3318 | /** |
3319 | * ata_wait_after_reset - wait before checking status after reset | ||
3320 | * @ap: port containing status register to be polled | ||
3321 | * @deadline: deadline jiffies for the operation | ||
3322 | * | ||
3323 | * After reset, we need to pause a while before reading status. | ||
3324 | * Also, certain combination of controller and device report 0xff | ||
3325 | * for some duration (e.g. until SATA PHY is up and running) | ||
3326 | * which is interpreted as empty port in ATA world. This | ||
3327 | * function also waits for such devices to get out of 0xff | ||
3328 | * status. | ||
3329 | * | ||
3330 | * LOCKING: | ||
3331 | * Kernel thread context (may sleep). | ||
3332 | */ | ||
3333 | void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline) | ||
3334 | { | ||
3335 | unsigned long until = jiffies + ATA_TMOUT_FF_WAIT; | ||
3336 | |||
3337 | if (time_before(until, deadline)) | ||
3338 | deadline = until; | ||
3339 | |||
3340 | /* Spec mandates ">= 2ms" before checking status. We wait | ||
3341 | * 150ms, because that was the magic delay used for ATAPI | ||
3342 | * devices in Hale Landis's ATADRVR, for the period of time | ||
3343 | * between when the ATA command register is written, and then | ||
3344 | * status is checked. Because waiting for "a while" before | ||
3345 | * checking status is fine, post SRST, we perform this magic | ||
3346 | * delay here as well. | ||
3347 | * | ||
3348 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
3349 | */ | ||
3350 | msleep(150); | ||
3351 | |||
3352 | /* Wait for 0xff to clear. Some SATA devices take a long time | ||
3353 | * to clear 0xff after reset. For example, HHD424020F7SV00 | ||
3354 | * iVDR needs >= 800ms while. Quantum GoVault needs even more | ||
3355 | * than that. | ||
3356 | */ | ||
3357 | while (1) { | ||
3358 | u8 status = ata_chk_status(ap); | ||
3359 | |||
3360 | if (status != 0xff || time_after(jiffies, deadline)) | ||
3361 | return; | ||
3362 | |||
3363 | msleep(50); | ||
3364 | } | ||
3365 | } | ||
3366 | |||
3367 | /** | ||
3118 | * ata_wait_ready - sleep until BSY clears, or timeout | 3368 | * ata_wait_ready - sleep until BSY clears, or timeout |
3119 | * @ap: port containing status register to be polled | 3369 | * @ap: port containing status register to be polled |
3120 | * @deadline: deadline jiffies for the operation | 3370 | * @deadline: deadline jiffies for the operation |
@@ -3220,8 +3470,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3220 | unsigned long deadline) | 3470 | unsigned long deadline) |
3221 | { | 3471 | { |
3222 | struct ata_ioports *ioaddr = &ap->ioaddr; | 3472 | struct ata_ioports *ioaddr = &ap->ioaddr; |
3223 | struct ata_device *dev; | ||
3224 | int i = 0; | ||
3225 | 3473 | ||
3226 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); | 3474 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); |
3227 | 3475 | ||
@@ -3232,36 +3480,8 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
3232 | udelay(20); /* FIXME: flush */ | 3480 | udelay(20); /* FIXME: flush */ |
3233 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 3481 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
3234 | 3482 | ||
3235 | /* If we issued an SRST then an ATA drive (not ATAPI) | 3483 | /* wait a while before checking status */ |
3236 | * may have changed configuration and be in PIO0 timing. If | 3484 | ata_wait_after_reset(ap, deadline); |
3237 | * we did a hard reset (or are coming from power on) this is | ||
3238 | * true for ATA or ATAPI. Until we've set a suitable controller | ||
3239 | * mode we should not touch the bus as we may be talking too fast. | ||
3240 | */ | ||
3241 | |||
3242 | ata_link_for_each_dev(dev, &ap->link) | ||
3243 | dev->pio_mode = XFER_PIO_0; | ||
3244 | |||
3245 | /* If the controller has a pio mode setup function then use | ||
3246 | it to set the chipset to rights. Don't touch the DMA setup | ||
3247 | as that will be dealt with when revalidating */ | ||
3248 | if (ap->ops->set_piomode) { | ||
3249 | ata_link_for_each_dev(dev, &ap->link) | ||
3250 | if (devmask & (1 << i++)) | ||
3251 | ap->ops->set_piomode(ap, dev); | ||
3252 | } | ||
3253 | |||
3254 | /* spec mandates ">= 2ms" before checking status. | ||
3255 | * We wait 150ms, because that was the magic delay used for | ||
3256 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time | ||
3257 | * between when the ATA command register is written, and then | ||
3258 | * status is checked. Because waiting for "a while" before | ||
3259 | * checking status is fine, post SRST, we perform this magic | ||
3260 | * delay here as well. | ||
3261 | * | ||
3262 | * Old drivers/ide uses the 2mS rule and then waits for ready | ||
3263 | */ | ||
3264 | msleep(150); | ||
3265 | 3485 | ||
3266 | /* Before we perform post reset processing we want to see if | 3486 | /* Before we perform post reset processing we want to see if |
3267 | * the bus shows 0xFF because the odd clown forgets the D7 | 3487 | * the bus shows 0xFF because the odd clown forgets the D7 |
@@ -3688,8 +3908,8 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class, | |||
3688 | return 0; | 3908 | return 0; |
3689 | } | 3909 | } |
3690 | 3910 | ||
3691 | /* wait a while before checking status, see SRST for more info */ | 3911 | /* wait a while before checking status */ |
3692 | msleep(150); | 3912 | ata_wait_after_reset(ap, deadline); |
3693 | 3913 | ||
3694 | /* If PMP is supported, we have to do follow-up SRST. Note | 3914 | /* If PMP is supported, we have to do follow-up SRST. Note |
3695 | * that some PMPs don't send D2H Reg FIS after hardreset at | 3915 | * that some PMPs don't send D2H Reg FIS after hardreset at |
@@ -3947,9 +4167,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3947 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, | 4167 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, |
3948 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, | 4168 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, |
3949 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, | 4169 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, |
3950 | { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ | ||
3951 | { "IOMEGA ZIP 250 ATAPI Floppy", | ||
3952 | NULL, ATA_HORKAGE_NODMA }, | ||
3953 | /* Odd clown on sil3726/4726 PMPs */ | 4170 | /* Odd clown on sil3726/4726 PMPs */ |
3954 | { "Config Disk", NULL, ATA_HORKAGE_NODMA | | 4171 | { "Config Disk", NULL, ATA_HORKAGE_NODMA | |
3955 | ATA_HORKAGE_SKIP_PM }, | 4172 | ATA_HORKAGE_SKIP_PM }, |
@@ -3992,6 +4209,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3992 | { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, }, | 4209 | { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, }, |
3993 | { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, }, | 4210 | { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, }, |
3994 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, | 4211 | { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, }, |
4212 | { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, }, | ||
3995 | 4213 | ||
3996 | /* devices which puke on READ_NATIVE_MAX */ | 4214 | /* devices which puke on READ_NATIVE_MAX */ |
3997 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, | 4215 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, |
@@ -4007,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4007 | { } | 4225 | { } |
4008 | }; | 4226 | }; |
4009 | 4227 | ||
4010 | int strn_pattern_cmp(const char *patt, const char *name, int wildchar) | 4228 | static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) |
4011 | { | 4229 | { |
4012 | const char *p; | 4230 | const char *p; |
4013 | int len; | 4231 | int len; |
@@ -4181,15 +4399,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
4181 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 4399 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4182 | return err_mask; | 4400 | return err_mask; |
4183 | } | 4401 | } |
4184 | |||
4185 | /** | 4402 | /** |
4186 | * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES | 4403 | * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES |
4187 | * @dev: Device to which command will be sent | 4404 | * @dev: Device to which command will be sent |
4188 | * @enable: Whether to enable or disable the feature | 4405 | * @enable: Whether to enable or disable the feature |
4406 | * @feature: The sector count represents the feature to set | ||
4189 | * | 4407 | * |
4190 | * Issue SET FEATURES - SATA FEATURES command to device @dev | 4408 | * Issue SET FEATURES - SATA FEATURES command to device @dev |
4191 | * on port @ap with sector count set to indicate Asynchronous | 4409 | * on port @ap with sector count |
4192 | * Notification feature | ||
4193 | * | 4410 | * |
4194 | * LOCKING: | 4411 | * LOCKING: |
4195 | * PCI/etc. bus probe sem. | 4412 | * PCI/etc. bus probe sem. |
@@ -4197,7 +4414,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
4197 | * RETURNS: | 4414 | * RETURNS: |
4198 | * 0 on success, AC_ERR_* mask otherwise. | 4415 | * 0 on success, AC_ERR_* mask otherwise. |
4199 | */ | 4416 | */ |
4200 | static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) | 4417 | static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, |
4418 | u8 feature) | ||
4201 | { | 4419 | { |
4202 | struct ata_taskfile tf; | 4420 | struct ata_taskfile tf; |
4203 | unsigned int err_mask; | 4421 | unsigned int err_mask; |
@@ -4210,7 +4428,7 @@ static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable) | |||
4210 | tf.feature = enable; | 4428 | tf.feature = enable; |
4211 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; | 4429 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
4212 | tf.protocol = ATA_PROT_NODATA; | 4430 | tf.protocol = ATA_PROT_NODATA; |
4213 | tf.nsect = SATA_AN; | 4431 | tf.nsect = feature; |
4214 | 4432 | ||
4215 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | 4433 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4216 | 4434 | ||
@@ -4689,8 +4907,9 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
4689 | * data in this function or read data in ata_sg_clean. | 4907 | * data in this function or read data in ata_sg_clean. |
4690 | */ | 4908 | */ |
4691 | offset = lsg->offset + lsg->length - qc->pad_len; | 4909 | offset = lsg->offset + lsg->length - qc->pad_len; |
4692 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT)); | 4910 | sg_init_table(psg, 1); |
4693 | psg->offset = offset_in_page(offset); | 4911 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), |
4912 | qc->pad_len, offset_in_page(offset)); | ||
4694 | 4913 | ||
4695 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 4914 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
4696 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); | 4915 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); |
@@ -5594,6 +5813,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5594 | * taken care of. | 5813 | * taken care of. |
5595 | */ | 5814 | */ |
5596 | if (ap->ops->error_handler) { | 5815 | if (ap->ops->error_handler) { |
5816 | struct ata_device *dev = qc->dev; | ||
5817 | struct ata_eh_info *ehi = &dev->link->eh_info; | ||
5818 | |||
5597 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); | 5819 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); |
5598 | 5820 | ||
5599 | if (unlikely(qc->err_mask)) | 5821 | if (unlikely(qc->err_mask)) |
@@ -5612,6 +5834,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
5612 | if (qc->flags & ATA_QCFLAG_RESULT_TF) | 5834 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
5613 | fill_result_tf(qc); | 5835 | fill_result_tf(qc); |
5614 | 5836 | ||
5837 | /* Some commands need post-processing after successful | ||
5838 | * completion. | ||
5839 | */ | ||
5840 | switch (qc->tf.command) { | ||
5841 | case ATA_CMD_SET_FEATURES: | ||
5842 | if (qc->tf.feature != SETFEATURES_WC_ON && | ||
5843 | qc->tf.feature != SETFEATURES_WC_OFF) | ||
5844 | break; | ||
5845 | /* fall through */ | ||
5846 | case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ | ||
5847 | case ATA_CMD_SET_MULTI: /* multi_count changed */ | ||
5848 | /* revalidate device */ | ||
5849 | ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; | ||
5850 | ata_port_schedule_eh(ap); | ||
5851 | break; | ||
5852 | |||
5853 | case ATA_CMD_SLEEP: | ||
5854 | dev->flags |= ATA_DFLAG_SLEEPING; | ||
5855 | break; | ||
5856 | } | ||
5857 | |||
5615 | __ata_qc_complete(qc); | 5858 | __ata_qc_complete(qc); |
5616 | } else { | 5859 | } else { |
5617 | if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) | 5860 | if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) |
@@ -5749,6 +5992,14 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5749 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 5992 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
5750 | } | 5993 | } |
5751 | 5994 | ||
5995 | /* if device is sleeping, schedule softreset and abort the link */ | ||
5996 | if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { | ||
5997 | link->eh_info.action |= ATA_EH_SOFTRESET; | ||
5998 | ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); | ||
5999 | ata_link_abort(link); | ||
6000 | return; | ||
6001 | } | ||
6002 | |||
5752 | ap->ops->qc_prep(qc); | 6003 | ap->ops->qc_prep(qc); |
5753 | 6004 | ||
5754 | qc->err_mask |= ap->ops->qc_issue(qc); | 6005 | qc->err_mask |= ap->ops->qc_issue(qc); |
@@ -6296,6 +6547,12 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6296 | { | 6547 | { |
6297 | int rc; | 6548 | int rc; |
6298 | 6549 | ||
6550 | /* | ||
6551 | * disable link pm on all ports before requesting | ||
6552 | * any pm activity | ||
6553 | */ | ||
6554 | ata_lpm_enable(host); | ||
6555 | |||
6299 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 6556 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
6300 | if (rc == 0) | 6557 | if (rc == 0) |
6301 | host->dev->power.power_state = mesg; | 6558 | host->dev->power.power_state = mesg; |
@@ -6318,6 +6575,9 @@ void ata_host_resume(struct ata_host *host) | |||
6318 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 6575 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
6319 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 6576 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6320 | host->dev->power.power_state = PMSG_ON; | 6577 | host->dev->power.power_state = PMSG_ON; |
6578 | |||
6579 | /* reenable link pm */ | ||
6580 | ata_lpm_disable(host); | ||
6321 | } | 6581 | } |
6322 | #endif | 6582 | #endif |
6323 | 6583 | ||
@@ -6860,6 +7120,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) | |||
6860 | struct ata_port *ap = host->ports[i]; | 7120 | struct ata_port *ap = host->ports[i]; |
6861 | 7121 | ||
6862 | ata_scsi_scan_host(ap, 1); | 7122 | ata_scsi_scan_host(ap, 1); |
7123 | ata_lpm_schedule(ap, ap->pm_policy); | ||
6863 | } | 7124 | } |
6864 | 7125 | ||
6865 | return 0; | 7126 | return 0; |
@@ -6921,7 +7182,7 @@ int ata_host_activate(struct ata_host *host, int irq, | |||
6921 | * LOCKING: | 7182 | * LOCKING: |
6922 | * Kernel thread context (may sleep). | 7183 | * Kernel thread context (may sleep). |
6923 | */ | 7184 | */ |
6924 | void ata_port_detach(struct ata_port *ap) | 7185 | static void ata_port_detach(struct ata_port *ap) |
6925 | { | 7186 | { |
6926 | unsigned long flags; | 7187 | unsigned long flags; |
6927 | struct ata_link *link; | 7188 | struct ata_link *link; |
@@ -7256,7 +7517,6 @@ const struct ata_port_info ata_dummy_port_info = { | |||
7256 | * likely to change as new drivers are added and updated. | 7517 | * likely to change as new drivers are added and updated. |
7257 | * Do not depend on ABI/API stability. | 7518 | * Do not depend on ABI/API stability. |
7258 | */ | 7519 | */ |
7259 | |||
7260 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); | 7520 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); |
7261 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); | 7521 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); |
7262 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); | 7522 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); |
@@ -7326,6 +7586,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable); | |||
7326 | EXPORT_SYMBOL_GPL(ata_ratelimit); | 7586 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
7327 | EXPORT_SYMBOL_GPL(ata_wait_register); | 7587 | EXPORT_SYMBOL_GPL(ata_wait_register); |
7328 | EXPORT_SYMBOL_GPL(ata_busy_sleep); | 7588 | EXPORT_SYMBOL_GPL(ata_busy_sleep); |
7589 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); | ||
7329 | EXPORT_SYMBOL_GPL(ata_wait_ready); | 7590 | EXPORT_SYMBOL_GPL(ata_wait_ready); |
7330 | EXPORT_SYMBOL_GPL(ata_port_queue_task); | 7591 | EXPORT_SYMBOL_GPL(ata_port_queue_task); |
7331 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); | 7592 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 93e2b545b439..fefea7470e51 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2071,7 +2071,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2071 | int try = 0; | 2071 | int try = 0; |
2072 | struct ata_device *dev; | 2072 | struct ata_device *dev; |
2073 | unsigned long deadline; | 2073 | unsigned long deadline; |
2074 | unsigned int action; | 2074 | unsigned int tmp_action; |
2075 | ata_reset_fn_t reset; | 2075 | ata_reset_fn_t reset; |
2076 | unsigned long flags; | 2076 | unsigned long flags; |
2077 | int rc; | 2077 | int rc; |
@@ -2083,17 +2083,36 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2083 | 2083 | ||
2084 | ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); | 2084 | ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK); |
2085 | 2085 | ||
2086 | ata_link_for_each_dev(dev, link) { | ||
2087 | /* If we issue an SRST then an ATA drive (not ATAPI) | ||
2088 | * may change configuration and be in PIO0 timing. If | ||
2089 | * we do a hard reset (or are coming from power on) | ||
2090 | * this is true for ATA or ATAPI. Until we've set a | ||
2091 | * suitable controller mode we should not touch the | ||
2092 | * bus as we may be talking too fast. | ||
2093 | */ | ||
2094 | dev->pio_mode = XFER_PIO_0; | ||
2095 | |||
2096 | /* If the controller has a pio mode setup function | ||
2097 | * then use it to set the chipset to rights. Don't | ||
2098 | * touch the DMA setup as that will be dealt with when | ||
2099 | * configuring devices. | ||
2100 | */ | ||
2101 | if (ap->ops->set_piomode) | ||
2102 | ap->ops->set_piomode(ap, dev); | ||
2103 | } | ||
2104 | |||
2086 | /* Determine which reset to use and record in ehc->i.action. | 2105 | /* Determine which reset to use and record in ehc->i.action. |
2087 | * prereset() may examine and modify it. | 2106 | * prereset() may examine and modify it. |
2088 | */ | 2107 | */ |
2089 | action = ehc->i.action; | ||
2090 | ehc->i.action &= ~ATA_EH_RESET_MASK; | ||
2091 | if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) && | 2108 | if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) && |
2092 | !sata_set_spd_needed(link) && | 2109 | !sata_set_spd_needed(link) && |
2093 | !(action & ATA_EH_HARDRESET)))) | 2110 | !(ehc->i.action & ATA_EH_HARDRESET)))) |
2094 | ehc->i.action |= ATA_EH_SOFTRESET; | 2111 | tmp_action = ATA_EH_SOFTRESET; |
2095 | else | 2112 | else |
2096 | ehc->i.action |= ATA_EH_HARDRESET; | 2113 | tmp_action = ATA_EH_HARDRESET; |
2114 | |||
2115 | ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action; | ||
2097 | 2116 | ||
2098 | if (prereset) { | 2117 | if (prereset) { |
2099 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); | 2118 | rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); |
@@ -2208,9 +2227,11 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2208 | ata_link_for_each_dev(dev, link) { | 2227 | ata_link_for_each_dev(dev, link) { |
2209 | /* After the reset, the device state is PIO 0 | 2228 | /* After the reset, the device state is PIO 0 |
2210 | * and the controller state is undefined. | 2229 | * and the controller state is undefined. |
2211 | * Record the mode. | 2230 | * Reset also wakes up drives from sleeping |
2231 | * mode. | ||
2212 | */ | 2232 | */ |
2213 | dev->pio_mode = XFER_PIO_0; | 2233 | dev->pio_mode = XFER_PIO_0; |
2234 | dev->flags &= ~ATA_DFLAG_SLEEPING; | ||
2214 | 2235 | ||
2215 | if (ata_link_offline(link)) | 2236 | if (ata_link_offline(link)) |
2216 | continue; | 2237 | continue; |
@@ -2416,7 +2437,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) | |||
2416 | /* give it just one more chance */ | 2437 | /* give it just one more chance */ |
2417 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); | 2438 | ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); |
2418 | case -EIO: | 2439 | case -EIO: |
2419 | if (ehc->tries[dev->devno] == 1) { | 2440 | if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) { |
2420 | /* This is the last chance, better to slow | 2441 | /* This is the last chance, better to slow |
2421 | * down than lose it. | 2442 | * down than lose it. |
2422 | */ | 2443 | */ |
@@ -2607,6 +2628,10 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
2607 | ehc->i.flags &= ~ATA_EHI_SETMODE; | 2628 | ehc->i.flags &= ~ATA_EHI_SETMODE; |
2608 | } | 2629 | } |
2609 | 2630 | ||
2631 | if (ehc->i.action & ATA_EHI_LPM) | ||
2632 | ata_link_for_each_dev(dev, link) | ||
2633 | ata_dev_enable_pm(dev, ap->pm_policy); | ||
2634 | |||
2610 | /* this link is okay now */ | 2635 | /* this link is okay now */ |
2611 | ehc->i.flags = 0; | 2636 | ehc->i.flags = 0; |
2612 | continue; | 2637 | continue; |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f5d5420a1ba2..93bd36c19690 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -110,6 +110,74 @@ static struct scsi_transport_template ata_scsi_transport_template = { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | 112 | ||
113 | static const struct { | ||
114 | enum link_pm value; | ||
115 | const char *name; | ||
116 | } link_pm_policy[] = { | ||
117 | { NOT_AVAILABLE, "max_performance" }, | ||
118 | { MIN_POWER, "min_power" }, | ||
119 | { MAX_PERFORMANCE, "max_performance" }, | ||
120 | { MEDIUM_POWER, "medium_power" }, | ||
121 | }; | ||
122 | |||
123 | const char *ata_scsi_lpm_get(enum link_pm policy) | ||
124 | { | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++) | ||
128 | if (link_pm_policy[i].value == policy) | ||
129 | return link_pm_policy[i].name; | ||
130 | |||
131 | return NULL; | ||
132 | } | ||
133 | |||
134 | static ssize_t ata_scsi_lpm_put(struct class_device *class_dev, | ||
135 | const char *buf, size_t count) | ||
136 | { | ||
137 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
138 | struct ata_port *ap = ata_shost_to_port(shost); | ||
139 | enum link_pm policy = 0; | ||
140 | int i; | ||
141 | |||
142 | /* | ||
143 | * we are skipping array location 0 on purpose - this | ||
144 | * is because a value of NOT_AVAILABLE is displayed | ||
145 | * to the user as max_performance, but when the user | ||
146 | * writes "max_performance", they actually want the | ||
147 | * value to match MAX_PERFORMANCE. | ||
148 | */ | ||
149 | for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) { | ||
150 | const int len = strlen(link_pm_policy[i].name); | ||
151 | if (strncmp(link_pm_policy[i].name, buf, len) == 0 && | ||
152 | buf[len] == '\n') { | ||
153 | policy = link_pm_policy[i].value; | ||
154 | break; | ||
155 | } | ||
156 | } | ||
157 | if (!policy) | ||
158 | return -EINVAL; | ||
159 | |||
160 | ata_lpm_schedule(ap, policy); | ||
161 | return count; | ||
162 | } | ||
163 | |||
164 | static ssize_t | ||
165 | ata_scsi_lpm_show(struct class_device *class_dev, char *buf) | ||
166 | { | ||
167 | struct Scsi_Host *shost = class_to_shost(class_dev); | ||
168 | struct ata_port *ap = ata_shost_to_port(shost); | ||
169 | const char *policy = | ||
170 | ata_scsi_lpm_get(ap->pm_policy); | ||
171 | |||
172 | if (!policy) | ||
173 | return -EINVAL; | ||
174 | |||
175 | return snprintf(buf, 23, "%s\n", policy); | ||
176 | } | ||
177 | CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, | ||
178 | ata_scsi_lpm_show, ata_scsi_lpm_put); | ||
179 | EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy); | ||
180 | |||
113 | static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, | 181 | static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, |
114 | void (*done)(struct scsi_cmnd *)) | 182 | void (*done)(struct scsi_cmnd *)) |
115 | { | 183 | { |
@@ -1361,33 +1429,10 @@ nothing_to_do: | |||
1361 | static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | 1429 | static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) |
1362 | { | 1430 | { |
1363 | struct ata_port *ap = qc->ap; | 1431 | struct ata_port *ap = qc->ap; |
1364 | struct ata_eh_info *ehi = &qc->dev->link->eh_info; | ||
1365 | struct scsi_cmnd *cmd = qc->scsicmd; | 1432 | struct scsi_cmnd *cmd = qc->scsicmd; |
1366 | u8 *cdb = cmd->cmnd; | 1433 | u8 *cdb = cmd->cmnd; |
1367 | int need_sense = (qc->err_mask != 0); | 1434 | int need_sense = (qc->err_mask != 0); |
1368 | 1435 | ||
1369 | /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and | ||
1370 | * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE | ||
1371 | * cache | ||
1372 | */ | ||
1373 | if (ap->ops->error_handler && !need_sense) { | ||
1374 | switch (qc->tf.command) { | ||
1375 | case ATA_CMD_SET_FEATURES: | ||
1376 | if ((qc->tf.feature == SETFEATURES_WC_ON) || | ||
1377 | (qc->tf.feature == SETFEATURES_WC_OFF)) { | ||
1378 | ehi->action |= ATA_EH_REVALIDATE; | ||
1379 | ata_port_schedule_eh(ap); | ||
1380 | } | ||
1381 | break; | ||
1382 | |||
1383 | case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ | ||
1384 | case ATA_CMD_SET_MULTI: /* multi_count changed */ | ||
1385 | ehi->action |= ATA_EH_REVALIDATE; | ||
1386 | ata_port_schedule_eh(ap); | ||
1387 | break; | ||
1388 | } | ||
1389 | } | ||
1390 | |||
1391 | /* For ATA pass thru (SAT) commands, generate a sense block if | 1436 | /* For ATA pass thru (SAT) commands, generate a sense block if |
1392 | * user mandated it or if there's an error. Note that if we | 1437 | * user mandated it or if there's an error. Note that if we |
1393 | * generate because the user forced us to, a check condition | 1438 | * generate because the user forced us to, a check condition |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 90df58a3edc9..0e6cf3a484dc 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -101,6 +101,8 @@ extern int sata_link_init_spd(struct ata_link *link); | |||
101 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); | 101 | extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); |
102 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); | 102 | extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); |
103 | extern struct ata_port *ata_port_alloc(struct ata_host *host); | 103 | extern struct ata_port *ata_port_alloc(struct ata_host *host); |
104 | extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy); | ||
105 | extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm); | ||
104 | 106 | ||
105 | /* libata-acpi.c */ | 107 | /* libata-acpi.c */ |
106 | #ifdef CONFIG_ATA_ACPI | 108 | #ifdef CONFIG_ATA_ACPI |
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index 0f6f7bcc3def..e4542ab9c7f8 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c | |||
@@ -181,7 +181,7 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev) | |||
181 | int unit = adev->devno; | 181 | int unit = adev->devno; |
182 | struct pata_acpi *acpi = ap->private_data; | 182 | struct pata_acpi *acpi = ap->private_data; |
183 | 183 | ||
184 | if(!(acpi->gtm.flags & 0x10)) | 184 | if (!(acpi->gtm.flags & 0x10)) |
185 | unit = 0; | 185 | unit = 0; |
186 | 186 | ||
187 | /* Now stuff the nS values into the structure */ | 187 | /* Now stuff the nS values into the structure */ |
@@ -202,7 +202,7 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |||
202 | int unit = adev->devno; | 202 | int unit = adev->devno; |
203 | struct pata_acpi *acpi = ap->private_data; | 203 | struct pata_acpi *acpi = ap->private_data; |
204 | 204 | ||
205 | if(!(acpi->gtm.flags & 0x10)) | 205 | if (!(acpi->gtm.flags & 0x10)) |
206 | unit = 0; | 206 | unit = 0; |
207 | 207 | ||
208 | /* Now stuff the nS values into the structure */ | 208 | /* Now stuff the nS values into the structure */ |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index be30923566c5..842fe08a3c13 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -332,12 +332,13 @@ static void ata_dummy_noret(struct ata_port *port) | |||
332 | { | 332 | { |
333 | } | 333 | } |
334 | 334 | ||
335 | static void pata_icside_postreset(struct ata_port *ap, unsigned int *classes) | 335 | static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) |
336 | { | 336 | { |
337 | struct ata_port *ap = link->ap; | ||
337 | struct pata_icside_state *state = ap->host->private_data; | 338 | struct pata_icside_state *state = ap->host->private_data; |
338 | 339 | ||
339 | if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) | 340 | if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE) |
340 | return ata_std_postreset(ap, classes); | 341 | return ata_std_postreset(link, classes); |
341 | 342 | ||
342 | state->port[ap->port_no].disabled = 1; | 343 | state->port[ap->port_no].disabled = 1; |
343 | 344 | ||
@@ -395,29 +396,30 @@ static struct ata_port_operations pata_icside_port_ops = { | |||
395 | 396 | ||
396 | static void __devinit | 397 | static void __devinit |
397 | pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, | 398 | pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base, |
398 | const struct portinfo *info) | 399 | struct pata_icside_info *info, |
400 | const struct portinfo *port) | ||
399 | { | 401 | { |
400 | struct ata_ioports *ioaddr = &ap->ioaddr; | 402 | struct ata_ioports *ioaddr = &ap->ioaddr; |
401 | void __iomem *cmd = base + info->dataoffset; | 403 | void __iomem *cmd = base + port->dataoffset; |
402 | 404 | ||
403 | ioaddr->cmd_addr = cmd; | 405 | ioaddr->cmd_addr = cmd; |
404 | ioaddr->data_addr = cmd + (ATA_REG_DATA << info->stepping); | 406 | ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping); |
405 | ioaddr->error_addr = cmd + (ATA_REG_ERR << info->stepping); | 407 | ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping); |
406 | ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << info->stepping); | 408 | ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping); |
407 | ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << info->stepping); | 409 | ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping); |
408 | ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << info->stepping); | 410 | ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping); |
409 | ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << info->stepping); | 411 | ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping); |
410 | ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << info->stepping); | 412 | ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping); |
411 | ioaddr->device_addr = cmd + (ATA_REG_DEVICE << info->stepping); | 413 | ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping); |
412 | ioaddr->status_addr = cmd + (ATA_REG_STATUS << info->stepping); | 414 | ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping); |
413 | ioaddr->command_addr = cmd + (ATA_REG_CMD << info->stepping); | 415 | ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping); |
414 | 416 | ||
415 | ioaddr->ctl_addr = base + info->ctrloffset; | 417 | ioaddr->ctl_addr = base + port->ctrloffset; |
416 | ioaddr->altstatus_addr = ioaddr->ctl_addr; | 418 | ioaddr->altstatus_addr = ioaddr->ctl_addr; |
417 | 419 | ||
418 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", | 420 | ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", |
419 | info->raw_base + info->dataoffset, | 421 | info->raw_base + port->dataoffset, |
420 | info->raw_base + info->ctrloffset); | 422 | info->raw_base + port->ctrloffset); |
421 | 423 | ||
422 | if (info->raw_ioc_base) | 424 | if (info->raw_ioc_base) |
423 | ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); | 425 | ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base); |
@@ -441,7 +443,7 @@ static int __devinit pata_icside_register_v5(struct pata_icside_info *info) | |||
441 | info->nr_ports = 1; | 443 | info->nr_ports = 1; |
442 | info->port[0] = &pata_icside_portinfo_v5; | 444 | info->port[0] = &pata_icside_portinfo_v5; |
443 | 445 | ||
444 | info->raw_base = ecard_resource_start(ec, ECARD_RES_MEMC); | 446 | info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC); |
445 | 447 | ||
446 | return 0; | 448 | return 0; |
447 | } | 449 | } |
@@ -522,7 +524,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) | |||
522 | ap->flags |= ATA_FLAG_SLAVE_POSS; | 524 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
523 | ap->ops = &pata_icside_port_ops; | 525 | ap->ops = &pata_icside_port_ops; |
524 | 526 | ||
525 | pata_icside_setup_ioaddr(ap, info->base, info->port[i]); | 527 | pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); |
526 | } | 528 | } |
527 | 529 | ||
528 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, | 530 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, |
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index b9a17eb100d0..d0e2e50823b1 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c | |||
@@ -215,6 +215,8 @@ static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc) | |||
215 | 215 | ||
216 | #include <asm/superio.h> | 216 | #include <asm/superio.h> |
217 | 217 | ||
218 | #define SUPERIO_IDE_MAX_RETRIES 25 | ||
219 | |||
218 | /** | 220 | /** |
219 | * ns87560_read_buggy - workaround buggy Super I/O chip | 221 | * ns87560_read_buggy - workaround buggy Super I/O chip |
220 | * @port: Port to read | 222 | * @port: Port to read |
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 6b07b5b48532..f9b485a487ae 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c | |||
@@ -449,7 +449,7 @@ static int optiplus_with_udma(struct pci_dev *pdev) | |||
449 | 449 | ||
450 | /* Find function 1 */ | 450 | /* Find function 1 */ |
451 | dev1 = pci_get_device(0x1045, 0xC701, NULL); | 451 | dev1 = pci_get_device(0x1045, 0xC701, NULL); |
452 | if(dev1 == NULL) | 452 | if (dev1 == NULL) |
453 | return 0; | 453 | return 0; |
454 | 454 | ||
455 | /* Rev must be >= 0x10 */ | 455 | /* Rev must be >= 0x10 */ |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 5db2013230b3..fd36099428a4 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -74,8 +74,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d | |||
74 | return ata_do_set_mode(link, r_failed_dev); | 74 | return ata_do_set_mode(link, r_failed_dev); |
75 | 75 | ||
76 | if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, | 76 | if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV, |
77 | ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) | 77 | ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) { |
78 | { | ||
79 | /* Suspicious match, but could be two cards from | 78 | /* Suspicious match, but could be two cards from |
80 | the same vendor - check serial */ | 79 | the same vendor - check serial */ |
81 | if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO, | 80 | if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO, |
@@ -248,7 +247,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) | |||
248 | goto next_entry; | 247 | goto next_entry; |
249 | io_base = pdev->io.BasePort1; | 248 | io_base = pdev->io.BasePort1; |
250 | ctl_base = pdev->io.BasePort1 + 0x0e; | 249 | ctl_base = pdev->io.BasePort1 + 0x0e; |
251 | } else goto next_entry; | 250 | } else |
251 | goto next_entry; | ||
252 | /* If we've got this far, we're done */ | 252 | /* If we've got this far, we're done */ |
253 | break; | 253 | break; |
254 | } | 254 | } |
@@ -285,8 +285,8 @@ next_entry: | |||
285 | printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n"); | 285 | printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n"); |
286 | 286 | ||
287 | /* | 287 | /* |
288 | * Having done the PCMCIA plumbing the ATA side is relatively | 288 | * Having done the PCMCIA plumbing the ATA side is relatively |
289 | * sane. | 289 | * sane. |
290 | */ | 290 | */ |
291 | ret = -ENOMEM; | 291 | ret = -ENOMEM; |
292 | host = ata_host_alloc(&pdev->dev, 1); | 292 | host = ata_host_alloc(&pdev->dev, 1); |
@@ -363,7 +363,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
363 | PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ | 363 | PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ |
364 | PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), | 364 | PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), |
365 | PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ | 365 | PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */ |
366 | PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ | 366 | PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */ |
367 | PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), | 367 | PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001), |
368 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ | 368 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */ |
369 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ | 369 | PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */ |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 3d3f1558cdee..2622577521a1 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long | |||
348 | ata_id_c_string(pair->id, model_num, ATA_ID_PROD, | 348 | ata_id_c_string(pair->id, model_num, ATA_ID_PROD, |
349 | ATA_ID_PROD_LEN + 1); | 349 | ATA_ID_PROD_LEN + 1); |
350 | /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */ | 350 | /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */ |
351 | if(strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6) | 351 | if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6) |
352 | mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); | 352 | mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); |
353 | 353 | ||
354 | return ata_pci_default_filter(adev, mask); | 354 | return ata_pci_default_filter(adev, mask); |
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 65d951618c60..bc7c2d5d8d5e 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c | |||
@@ -351,9 +351,9 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id | |||
351 | struct pci_dev *bridge = dev->bus->self; | 351 | struct pci_dev *bridge = dev->bus->self; |
352 | /* Don't grab anything behind a Promise I2O RAID */ | 352 | /* Don't grab anything behind a Promise I2O RAID */ |
353 | if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) { | 353 | if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) { |
354 | if( bridge->device == PCI_DEVICE_ID_INTEL_I960) | 354 | if (bridge->device == PCI_DEVICE_ID_INTEL_I960) |
355 | return -ENODEV; | 355 | return -ENODEV; |
356 | if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM) | 356 | if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM) |
357 | return -ENODEV; | 357 | return -ENODEV; |
358 | } | 358 | } |
359 | } | 359 | } |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 55576138faea..ea2ef9fc15be 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -570,17 +570,8 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | |||
570 | udelay(20); | 570 | udelay(20); |
571 | out_be32(ioaddr->ctl_addr, ap->ctl); | 571 | out_be32(ioaddr->ctl_addr, ap->ctl); |
572 | 572 | ||
573 | /* spec mandates ">= 2ms" before checking status. | 573 | /* wait a while before checking status */ |
574 | * We wait 150ms, because that was the magic delay used for | 574 | ata_wait_after_reset(ap, deadline); |
575 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time | ||
576 | * between when the ATA command register is written, and then | ||
577 | * status is checked. Because waiting for "a while" before | ||
578 | * checking status is fine, post SRST, we perform this magic | ||
579 | * delay here as well. | ||
580 | * | ||
581 | * Old drivers/ide uses the 2mS rule and then waits for ready | ||
582 | */ | ||
583 | msleep(150); | ||
584 | 575 | ||
585 | /* Before we perform post reset processing we want to see if | 576 | /* Before we perform post reset processing we want to see if |
586 | * the bus shows 0xFF because the odd clown forgets the D7 | 577 | * the bus shows 0xFF because the odd clown forgets the D7 |
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index ea7a9a652e61..a4175fbdd170 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c | |||
@@ -176,7 +176,7 @@ static int via_cable_detect(struct ata_port *ap) { | |||
176 | if ((config->flags & VIA_UDMA) < VIA_UDMA_66) | 176 | if ((config->flags & VIA_UDMA) < VIA_UDMA_66) |
177 | return ATA_CBL_PATA40; | 177 | return ATA_CBL_PATA40; |
178 | /* UDMA 66 chips have only drive side logic */ | 178 | /* UDMA 66 chips have only drive side logic */ |
179 | else if((config->flags & VIA_UDMA) < VIA_UDMA_100) | 179 | else if ((config->flags & VIA_UDMA) < VIA_UDMA_100) |
180 | return ATA_CBL_PATA_UNK; | 180 | return ATA_CBL_PATA_UNK; |
181 | /* UDMA 100 or later */ | 181 | /* UDMA 100 or later */ |
182 | pci_read_config_dword(pdev, 0x50, &ata66); | 182 | pci_read_config_dword(pdev, 0x50, &ata66); |
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c index 549cbbe9fd07..311cdb3a5566 100644 --- a/drivers/ata/pata_winbond.c +++ b/drivers/ata/pata_winbond.c | |||
@@ -279,7 +279,7 @@ static __init int winbond_init(void) | |||
279 | 279 | ||
280 | if (request_region(port, 2, "pata_winbond")) { | 280 | if (request_region(port, 2, "pata_winbond")) { |
281 | ret = winbond_init_one(port); | 281 | ret = winbond_init_one(port); |
282 | if(ret <= 0) | 282 | if (ret <= 0) |
283 | release_region(port, 2); | 283 | release_region(port, 2); |
284 | else ct+= ret; | 284 | else ct+= ret; |
285 | } | 285 | } |
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 199f7e150eb3..bd4c2a3c88d7 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -47,10 +47,10 @@ | |||
47 | #define DRV_VERSION "1.0" | 47 | #define DRV_VERSION "1.0" |
48 | 48 | ||
49 | /* macro to calculate base address for ATA regs */ | 49 | /* macro to calculate base address for ATA regs */ |
50 | #define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) | 50 | #define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40)) |
51 | 51 | ||
52 | /* macro to calculate base address for ADMA regs */ | 52 | /* macro to calculate base address for ADMA regs */ |
53 | #define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) | 53 | #define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20)) |
54 | 54 | ||
55 | /* macro to obtain addresses from ata_port */ | 55 | /* macro to obtain addresses from ata_port */ |
56 | #define ADMA_PORT_REGS(ap) \ | 56 | #define ADMA_PORT_REGS(ap) \ |
@@ -128,7 +128,7 @@ struct adma_port_priv { | |||
128 | adma_state_t state; | 128 | adma_state_t state; |
129 | }; | 129 | }; |
130 | 130 | ||
131 | static int adma_ata_init_one (struct pci_dev *pdev, | 131 | static int adma_ata_init_one(struct pci_dev *pdev, |
132 | const struct pci_device_id *ent); | 132 | const struct pci_device_id *ent); |
133 | static int adma_port_start(struct ata_port *ap); | 133 | static int adma_port_start(struct ata_port *ap); |
134 | static void adma_host_stop(struct ata_host *host); | 134 | static void adma_host_stop(struct ata_host *host); |
@@ -340,8 +340,8 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
340 | buf[i++] = 0; /* pPKLW */ | 340 | buf[i++] = 0; /* pPKLW */ |
341 | buf[i++] = 0; /* reserved */ | 341 | buf[i++] = 0; /* reserved */ |
342 | 342 | ||
343 | *(__le32 *)(buf + i) | 343 | *(__le32 *)(buf + i) = |
344 | = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); | 344 | (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); |
345 | i += 4; | 345 | i += 4; |
346 | 346 | ||
347 | VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, | 347 | VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4, |
@@ -617,7 +617,7 @@ static int adma_port_start(struct ata_port *ap) | |||
617 | return -ENOMEM; | 617 | return -ENOMEM; |
618 | /* paranoia? */ | 618 | /* paranoia? */ |
619 | if ((pp->pkt_dma & 7) != 0) { | 619 | if ((pp->pkt_dma & 7) != 0) { |
620 | printk("bad alignment for pp->pkt_dma: %08x\n", | 620 | printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n", |
621 | (u32)pp->pkt_dma); | 621 | (u32)pp->pkt_dma); |
622 | return -ENOMEM; | 622 | return -ENOMEM; |
623 | } | 623 | } |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 08595f34b3e8..323c087e8cc1 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -143,7 +143,7 @@ static const int scr_map[] = { | |||
143 | [SCR_CONTROL] = 2, | 143 | [SCR_CONTROL] = 2, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static void __iomem * inic_port_base(struct ata_port *ap) | 146 | static void __iomem *inic_port_base(struct ata_port *ap) |
147 | { | 147 | { |
148 | return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; | 148 | return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; |
149 | } | 149 | } |
@@ -448,7 +448,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
448 | struct ata_taskfile tf; | 448 | struct ata_taskfile tf; |
449 | 449 | ||
450 | /* wait a while before checking status */ | 450 | /* wait a while before checking status */ |
451 | msleep(150); | 451 | ata_wait_after_reset(ap, deadline); |
452 | 452 | ||
453 | rc = ata_wait_ready(ap, deadline); | 453 | rc = ata_wait_ready(ap, deadline); |
454 | /* link occupied, -ENODEV too is an error */ | 454 | /* link occupied, -ENODEV too is an error */ |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index b39648f0914b..a43f64d2775b 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1156,7 +1156,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1156 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); | 1156 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) | 1159 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
1160 | { | 1160 | { |
1161 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | 1161 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
1162 | (last ? CRQB_CMD_LAST : 0); | 1162 | (last ? CRQB_CMD_LAST : 0); |
@@ -2429,7 +2429,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2429 | struct mv_host_priv *hpriv = host->private_data; | 2429 | struct mv_host_priv *hpriv = host->private_data; |
2430 | u32 hp_flags = hpriv->hp_flags; | 2430 | u32 hp_flags = hpriv->hp_flags; |
2431 | 2431 | ||
2432 | switch(board_idx) { | 2432 | switch (board_idx) { |
2433 | case chip_5080: | 2433 | case chip_5080: |
2434 | hpriv->ops = &mv5xxx_ops; | 2434 | hpriv->ops = &mv5xxx_ops; |
2435 | hp_flags |= MV_HP_GEN_I; | 2435 | hp_flags |= MV_HP_GEN_I; |
@@ -2510,7 +2510,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2510 | break; | 2510 | break; |
2511 | 2511 | ||
2512 | default: | 2512 | default: |
2513 | printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); | 2513 | dev_printk(KERN_ERR, &pdev->dev, |
2514 | "BUG: invalid board index %u\n", board_idx); | ||
2514 | return 1; | 2515 | return 1; |
2515 | } | 2516 | } |
2516 | 2517 | ||
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 2e0279fdd7aa..35b2df297527 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -291,7 +291,7 @@ struct nv_swncq_port_priv { | |||
291 | }; | 291 | }; |
292 | 292 | ||
293 | 293 | ||
294 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) | 294 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT))))) |
295 | 295 | ||
296 | static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 296 | static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
297 | #ifdef CONFIG_PM | 297 | #ifdef CONFIG_PM |
@@ -365,9 +365,9 @@ static const struct pci_device_id nv_pci_tbl[] = { | |||
365 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ }, | 365 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ }, |
366 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ }, | 366 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ }, |
367 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ }, | 367 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ }, |
368 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ }, | 368 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC }, |
369 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ }, | 369 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC }, |
370 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ }, | 370 | { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC }, |
371 | 371 | ||
372 | { } /* terminate list */ | 372 | { } /* terminate list */ |
373 | }; | 373 | }; |
@@ -884,8 +884,9 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | |||
884 | /* Notifier bits set without a command may indicate the drive | 884 | /* Notifier bits set without a command may indicate the drive |
885 | is misbehaving. Raise host state machine violation on this | 885 | is misbehaving. Raise host state machine violation on this |
886 | condition. */ | 886 | condition. */ |
887 | ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n", | 887 | ata_port_printk(ap, KERN_ERR, |
888 | cpb_num); | 888 | "notifier for tag %d with no cmd?\n", |
889 | cpb_num); | ||
889 | ehi->err_mask |= AC_ERR_HSM; | 890 | ehi->err_mask |= AC_ERR_HSM; |
890 | ehi->action |= ATA_EH_SOFTRESET; | 891 | ehi->action |= ATA_EH_SOFTRESET; |
891 | ata_port_freeze(ap); | 892 | ata_port_freeze(ap); |
@@ -1012,7 +1013,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
1012 | u32 check_commands; | 1013 | u32 check_commands; |
1013 | int pos, error = 0; | 1014 | int pos, error = 0; |
1014 | 1015 | ||
1015 | if(ata_tag_valid(ap->link.active_tag)) | 1016 | if (ata_tag_valid(ap->link.active_tag)) |
1016 | check_commands = 1 << ap->link.active_tag; | 1017 | check_commands = 1 << ap->link.active_tag; |
1017 | else | 1018 | else |
1018 | check_commands = ap->link.sactive; | 1019 | check_commands = ap->link.sactive; |
@@ -1021,14 +1022,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | |||
1021 | while ((pos = ffs(check_commands)) && !error) { | 1022 | while ((pos = ffs(check_commands)) && !error) { |
1022 | pos--; | 1023 | pos--; |
1023 | error = nv_adma_check_cpb(ap, pos, | 1024 | error = nv_adma_check_cpb(ap, pos, |
1024 | notifier_error & (1 << pos) ); | 1025 | notifier_error & (1 << pos)); |
1025 | check_commands &= ~(1 << pos ); | 1026 | check_commands &= ~(1 << pos); |
1026 | } | 1027 | } |
1027 | } | 1028 | } |
1028 | } | 1029 | } |
1029 | } | 1030 | } |
1030 | 1031 | ||
1031 | if(notifier_clears[0] || notifier_clears[1]) { | 1032 | if (notifier_clears[0] || notifier_clears[1]) { |
1032 | /* Note: Both notifier clear registers must be written | 1033 | /* Note: Both notifier clear registers must be written |
1033 | if either is set, even if one is zero, according to NVIDIA. */ | 1034 | if either is set, even if one is zero, according to NVIDIA. */ |
1034 | struct nv_adma_port_priv *pp = host->ports[0]->private_data; | 1035 | struct nv_adma_port_priv *pp = host->ports[0]->private_data; |
@@ -1061,7 +1062,7 @@ static void nv_adma_freeze(struct ata_port *ap) | |||
1061 | tmp = readw(mmio + NV_ADMA_CTL); | 1062 | tmp = readw(mmio + NV_ADMA_CTL); |
1062 | writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), | 1063 | writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), |
1063 | mmio + NV_ADMA_CTL); | 1064 | mmio + NV_ADMA_CTL); |
1064 | readw(mmio + NV_ADMA_CTL ); /* flush posted write */ | 1065 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1065 | } | 1066 | } |
1066 | 1067 | ||
1067 | static void nv_adma_thaw(struct ata_port *ap) | 1068 | static void nv_adma_thaw(struct ata_port *ap) |
@@ -1079,7 +1080,7 @@ static void nv_adma_thaw(struct ata_port *ap) | |||
1079 | tmp = readw(mmio + NV_ADMA_CTL); | 1080 | tmp = readw(mmio + NV_ADMA_CTL); |
1080 | writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), | 1081 | writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), |
1081 | mmio + NV_ADMA_CTL); | 1082 | mmio + NV_ADMA_CTL); |
1082 | readw(mmio + NV_ADMA_CTL ); /* flush posted write */ | 1083 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | static void nv_adma_irq_clear(struct ata_port *ap) | 1086 | static void nv_adma_irq_clear(struct ata_port *ap) |
@@ -1119,7 +1120,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) | |||
1119 | { | 1120 | { |
1120 | struct nv_adma_port_priv *pp = qc->ap->private_data; | 1121 | struct nv_adma_port_priv *pp = qc->ap->private_data; |
1121 | 1122 | ||
1122 | if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) | 1123 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) |
1123 | ata_bmdma_post_internal_cmd(qc); | 1124 | ata_bmdma_post_internal_cmd(qc); |
1124 | } | 1125 | } |
1125 | 1126 | ||
@@ -1165,7 +1166,7 @@ static int nv_adma_port_start(struct ata_port *ap) | |||
1165 | pp->cpb_dma = mem_dma; | 1166 | pp->cpb_dma = mem_dma; |
1166 | 1167 | ||
1167 | writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); | 1168 | writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); |
1168 | writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); | 1169 | writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); |
1169 | 1170 | ||
1170 | mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; | 1171 | mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; |
1171 | mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; | 1172 | mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; |
@@ -1189,15 +1190,15 @@ static int nv_adma_port_start(struct ata_port *ap) | |||
1189 | 1190 | ||
1190 | /* clear GO for register mode, enable interrupt */ | 1191 | /* clear GO for register mode, enable interrupt */ |
1191 | tmp = readw(mmio + NV_ADMA_CTL); | 1192 | tmp = readw(mmio + NV_ADMA_CTL); |
1192 | writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | | 1193 | writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | |
1193 | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); | 1194 | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); |
1194 | 1195 | ||
1195 | tmp = readw(mmio + NV_ADMA_CTL); | 1196 | tmp = readw(mmio + NV_ADMA_CTL); |
1196 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1197 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1197 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1198 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1198 | udelay(1); | 1199 | udelay(1); |
1199 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1200 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1200 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1201 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1201 | 1202 | ||
1202 | return 0; | 1203 | return 0; |
1203 | } | 1204 | } |
@@ -1237,7 +1238,7 @@ static int nv_adma_port_resume(struct ata_port *ap) | |||
1237 | 1238 | ||
1238 | /* set CPB block location */ | 1239 | /* set CPB block location */ |
1239 | writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); | 1240 | writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); |
1240 | writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); | 1241 | writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); |
1241 | 1242 | ||
1242 | /* clear any outstanding interrupt conditions */ | 1243 | /* clear any outstanding interrupt conditions */ |
1243 | writew(0xffff, mmio + NV_ADMA_STAT); | 1244 | writew(0xffff, mmio + NV_ADMA_STAT); |
@@ -1250,15 +1251,15 @@ static int nv_adma_port_resume(struct ata_port *ap) | |||
1250 | 1251 | ||
1251 | /* clear GO for register mode, enable interrupt */ | 1252 | /* clear GO for register mode, enable interrupt */ |
1252 | tmp = readw(mmio + NV_ADMA_CTL); | 1253 | tmp = readw(mmio + NV_ADMA_CTL); |
1253 | writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | | 1254 | writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN | |
1254 | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); | 1255 | NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL); |
1255 | 1256 | ||
1256 | tmp = readw(mmio + NV_ADMA_CTL); | 1257 | tmp = readw(mmio + NV_ADMA_CTL); |
1257 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1258 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1258 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1259 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1259 | udelay(1); | 1260 | udelay(1); |
1260 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1261 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1261 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1262 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1262 | 1263 | ||
1263 | return 0; | 1264 | return 0; |
1264 | } | 1265 | } |
@@ -1342,7 +1343,8 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) | |||
1342 | idx = 0; | 1343 | idx = 0; |
1343 | 1344 | ||
1344 | ata_for_each_sg(sg, qc) { | 1345 | ata_for_each_sg(sg, qc) { |
1345 | aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)]; | 1346 | aprd = (idx < 5) ? &cpb->aprd[idx] : |
1347 | &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)]; | ||
1346 | nv_adma_fill_aprd(qc, sg, idx, aprd); | 1348 | nv_adma_fill_aprd(qc, sg, idx, aprd); |
1347 | idx++; | 1349 | idx++; |
1348 | } | 1350 | } |
@@ -1359,12 +1361,12 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) | |||
1359 | /* ADMA engine can only be used for non-ATAPI DMA commands, | 1361 | /* ADMA engine can only be used for non-ATAPI DMA commands, |
1360 | or interrupt-driven no-data commands, where a result taskfile | 1362 | or interrupt-driven no-data commands, where a result taskfile |
1361 | is not required. */ | 1363 | is not required. */ |
1362 | if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || | 1364 | if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || |
1363 | (qc->tf.flags & ATA_TFLAG_POLLING) || | 1365 | (qc->tf.flags & ATA_TFLAG_POLLING) || |
1364 | (qc->flags & ATA_QCFLAG_RESULT_TF)) | 1366 | (qc->flags & ATA_QCFLAG_RESULT_TF)) |
1365 | return 1; | 1367 | return 1; |
1366 | 1368 | ||
1367 | if((qc->flags & ATA_QCFLAG_DMAMAP) || | 1369 | if ((qc->flags & ATA_QCFLAG_DMAMAP) || |
1368 | (qc->tf.protocol == ATA_PROT_NODATA)) | 1370 | (qc->tf.protocol == ATA_PROT_NODATA)) |
1369 | return 0; | 1371 | return 0; |
1370 | 1372 | ||
@@ -1401,14 +1403,14 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) | |||
1401 | 1403 | ||
1402 | nv_adma_tf_to_cpb(&qc->tf, cpb->tf); | 1404 | nv_adma_tf_to_cpb(&qc->tf, cpb->tf); |
1403 | 1405 | ||
1404 | if(qc->flags & ATA_QCFLAG_DMAMAP) { | 1406 | if (qc->flags & ATA_QCFLAG_DMAMAP) { |
1405 | nv_adma_fill_sg(qc, cpb); | 1407 | nv_adma_fill_sg(qc, cpb); |
1406 | ctl_flags |= NV_CPB_CTL_APRD_VALID; | 1408 | ctl_flags |= NV_CPB_CTL_APRD_VALID; |
1407 | } else | 1409 | } else |
1408 | memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); | 1410 | memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); |
1409 | 1411 | ||
1410 | /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are | 1412 | /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID |
1411 | finished filling in all of the contents */ | 1413 | until we are finished filling in all of the contents */ |
1412 | wmb(); | 1414 | wmb(); |
1413 | cpb->ctl_flags = ctl_flags; | 1415 | cpb->ctl_flags = ctl_flags; |
1414 | wmb(); | 1416 | wmb(); |
@@ -1435,16 +1437,16 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) | |||
1435 | and (number of cpbs to append -1) in top 8 bits */ | 1437 | and (number of cpbs to append -1) in top 8 bits */ |
1436 | wmb(); | 1438 | wmb(); |
1437 | 1439 | ||
1438 | if(curr_ncq != pp->last_issue_ncq) { | 1440 | if (curr_ncq != pp->last_issue_ncq) { |
1439 | /* Seems to need some delay before switching between NCQ and non-NCQ | 1441 | /* Seems to need some delay before switching between NCQ and |
1440 | commands, else we get command timeouts and such. */ | 1442 | non-NCQ commands, else we get command timeouts and such. */ |
1441 | udelay(20); | 1443 | udelay(20); |
1442 | pp->last_issue_ncq = curr_ncq; | 1444 | pp->last_issue_ncq = curr_ncq; |
1443 | } | 1445 | } |
1444 | 1446 | ||
1445 | writew(qc->tag, mmio + NV_ADMA_APPEND); | 1447 | writew(qc->tag, mmio + NV_ADMA_APPEND); |
1446 | 1448 | ||
1447 | DPRINTK("Issued tag %u\n",qc->tag); | 1449 | DPRINTK("Issued tag %u\n", qc->tag); |
1448 | 1450 | ||
1449 | return 0; | 1451 | return 0; |
1450 | } | 1452 | } |
@@ -1641,12 +1643,12 @@ static void nv_error_handler(struct ata_port *ap) | |||
1641 | static void nv_adma_error_handler(struct ata_port *ap) | 1643 | static void nv_adma_error_handler(struct ata_port *ap) |
1642 | { | 1644 | { |
1643 | struct nv_adma_port_priv *pp = ap->private_data; | 1645 | struct nv_adma_port_priv *pp = ap->private_data; |
1644 | if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { | 1646 | if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { |
1645 | void __iomem *mmio = pp->ctl_block; | 1647 | void __iomem *mmio = pp->ctl_block; |
1646 | int i; | 1648 | int i; |
1647 | u16 tmp; | 1649 | u16 tmp; |
1648 | 1650 | ||
1649 | if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { | 1651 | if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { |
1650 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); | 1652 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); |
1651 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | 1653 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); |
1652 | u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); | 1654 | u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); |
@@ -1654,16 +1656,17 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1654 | u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); | 1656 | u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT); |
1655 | u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); | 1657 | u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX); |
1656 | 1658 | ||
1657 | ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X " | 1659 | ata_port_printk(ap, KERN_ERR, |
1660 | "EH in ADMA mode, notifier 0x%X " | ||
1658 | "notifier_error 0x%X gen_ctl 0x%X status 0x%X " | 1661 | "notifier_error 0x%X gen_ctl 0x%X status 0x%X " |
1659 | "next cpb count 0x%X next cpb idx 0x%x\n", | 1662 | "next cpb count 0x%X next cpb idx 0x%x\n", |
1660 | notifier, notifier_error, gen_ctl, status, | 1663 | notifier, notifier_error, gen_ctl, status, |
1661 | cpb_count, next_cpb_idx); | 1664 | cpb_count, next_cpb_idx); |
1662 | 1665 | ||
1663 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) { | 1666 | for (i = 0; i < NV_ADMA_MAX_CPBS; i++) { |
1664 | struct nv_adma_cpb *cpb = &pp->cpb[i]; | 1667 | struct nv_adma_cpb *cpb = &pp->cpb[i]; |
1665 | if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || | 1668 | if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || |
1666 | ap->link.sactive & (1 << i) ) | 1669 | ap->link.sactive & (1 << i)) |
1667 | ata_port_printk(ap, KERN_ERR, | 1670 | ata_port_printk(ap, KERN_ERR, |
1668 | "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", | 1671 | "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", |
1669 | i, cpb->ctl_flags, cpb->resp_flags); | 1672 | i, cpb->ctl_flags, cpb->resp_flags); |
@@ -1673,8 +1676,9 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1673 | /* Push us back into port register mode for error handling. */ | 1676 | /* Push us back into port register mode for error handling. */ |
1674 | nv_adma_register_mode(ap); | 1677 | nv_adma_register_mode(ap); |
1675 | 1678 | ||
1676 | /* Mark all of the CPBs as invalid to prevent them from being executed */ | 1679 | /* Mark all of the CPBs as invalid to prevent them from |
1677 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) | 1680 | being executed */ |
1681 | for (i = 0; i < NV_ADMA_MAX_CPBS; i++) | ||
1678 | pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; | 1682 | pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; |
1679 | 1683 | ||
1680 | /* clear CPB fetch count */ | 1684 | /* clear CPB fetch count */ |
@@ -1683,10 +1687,10 @@ static void nv_adma_error_handler(struct ata_port *ap) | |||
1683 | /* Reset channel */ | 1687 | /* Reset channel */ |
1684 | tmp = readw(mmio + NV_ADMA_CTL); | 1688 | tmp = readw(mmio + NV_ADMA_CTL); |
1685 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1689 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1686 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1690 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1687 | udelay(1); | 1691 | udelay(1); |
1688 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | 1692 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); |
1689 | readw( mmio + NV_ADMA_CTL ); /* flush posted write */ | 1693 | readw(mmio + NV_ADMA_CTL); /* flush posted write */ |
1690 | } | 1694 | } |
1691 | 1695 | ||
1692 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | 1696 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, |
@@ -2350,9 +2354,9 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) | |||
2350 | return IRQ_RETVAL(handled); | 2354 | return IRQ_RETVAL(handled); |
2351 | } | 2355 | } |
2352 | 2356 | ||
2353 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 2357 | static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2354 | { | 2358 | { |
2355 | static int printed_version = 0; | 2359 | static int printed_version; |
2356 | const struct ata_port_info *ppi[] = { NULL, NULL }; | 2360 | const struct ata_port_info *ppi[] = { NULL, NULL }; |
2357 | struct ata_host *host; | 2361 | struct ata_host *host; |
2358 | struct nv_host_priv *hpriv; | 2362 | struct nv_host_priv *hpriv; |
@@ -2364,7 +2368,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2364 | // Make sure this is a SATA controller by counting the number of bars | 2368 | // Make sure this is a SATA controller by counting the number of bars |
2365 | // (NVIDIA SATA controllers will always have six bars). Otherwise, | 2369 | // (NVIDIA SATA controllers will always have six bars). Otherwise, |
2366 | // it's an IDE controller and we ignore it. | 2370 | // it's an IDE controller and we ignore it. |
2367 | for (bar=0; bar<6; bar++) | 2371 | for (bar = 0; bar < 6; bar++) |
2368 | if (pci_resource_start(pdev, bar) == 0) | 2372 | if (pci_resource_start(pdev, bar) == 0) |
2369 | return -ENODEV; | 2373 | return -ENODEV; |
2370 | 2374 | ||
@@ -2381,6 +2385,14 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2381 | type = ADMA; | 2385 | type = ADMA; |
2382 | } | 2386 | } |
2383 | 2387 | ||
2388 | if (type == SWNCQ) { | ||
2389 | if (swncq_enabled) | ||
2390 | dev_printk(KERN_NOTICE, &pdev->dev, | ||
2391 | "Using SWNCQ mode\n"); | ||
2392 | else | ||
2393 | type = GENERIC; | ||
2394 | } | ||
2395 | |||
2384 | ppi[0] = &nv_port_info[type]; | 2396 | ppi[0] = &nv_port_info[type]; |
2385 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); | 2397 | rc = ata_pci_prepare_sff_host(pdev, ppi, &host); |
2386 | if (rc) | 2398 | if (rc) |
@@ -2422,10 +2434,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2422 | rc = nv_adma_host_init(host); | 2434 | rc = nv_adma_host_init(host); |
2423 | if (rc) | 2435 | if (rc) |
2424 | return rc; | 2436 | return rc; |
2425 | } else if (type == SWNCQ && swncq_enabled) { | 2437 | } else if (type == SWNCQ) |
2426 | dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n"); | ||
2427 | nv_swncq_host_init(host); | 2438 | nv_swncq_host_init(host); |
2428 | } | ||
2429 | 2439 | ||
2430 | pci_set_master(pdev); | 2440 | pci_set_master(pdev); |
2431 | return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler, | 2441 | return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler, |
@@ -2440,37 +2450,37 @@ static int nv_pci_device_resume(struct pci_dev *pdev) | |||
2440 | int rc; | 2450 | int rc; |
2441 | 2451 | ||
2442 | rc = ata_pci_device_do_resume(pdev); | 2452 | rc = ata_pci_device_do_resume(pdev); |
2443 | if(rc) | 2453 | if (rc) |
2444 | return rc; | 2454 | return rc; |
2445 | 2455 | ||
2446 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | 2456 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { |
2447 | if(hpriv->type >= CK804) { | 2457 | if (hpriv->type >= CK804) { |
2448 | u8 regval; | 2458 | u8 regval; |
2449 | 2459 | ||
2450 | pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); | 2460 | pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); |
2451 | regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN; | 2461 | regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN; |
2452 | pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); | 2462 | pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); |
2453 | } | 2463 | } |
2454 | if(hpriv->type == ADMA) { | 2464 | if (hpriv->type == ADMA) { |
2455 | u32 tmp32; | 2465 | u32 tmp32; |
2456 | struct nv_adma_port_priv *pp; | 2466 | struct nv_adma_port_priv *pp; |
2457 | /* enable/disable ADMA on the ports appropriately */ | 2467 | /* enable/disable ADMA on the ports appropriately */ |
2458 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); | 2468 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); |
2459 | 2469 | ||
2460 | pp = host->ports[0]->private_data; | 2470 | pp = host->ports[0]->private_data; |
2461 | if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) | 2471 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) |
2462 | tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | | 2472 | tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | |
2463 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); | 2473 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); |
2464 | else | 2474 | else |
2465 | tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN | | 2475 | tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN | |
2466 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); | 2476 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN); |
2467 | pp = host->ports[1]->private_data; | 2477 | pp = host->ports[1]->private_data; |
2468 | if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) | 2478 | if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) |
2469 | tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN | | 2479 | tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN | |
2470 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); | 2480 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); |
2471 | else | 2481 | else |
2472 | tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN | | 2482 | tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN | |
2473 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); | 2483 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); |
2474 | 2484 | ||
2475 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); | 2485 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); |
2476 | } | 2486 | } |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 903213153b5d..deb26f04f2d7 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -83,10 +83,12 @@ enum { | |||
83 | PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */ | 83 | PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */ |
84 | PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */ | 84 | PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */ |
85 | PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR, | 85 | PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR, |
86 | PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR, | 86 | PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | |
87 | PDC_ERR_MASK = (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC_OVERRUN_ERR | 87 | PDC2_ATA_DMA_CNT_ERR, |
88 | | PDC_UNDERRUN_ERR | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR | 88 | PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | |
89 | | PDC1_ERR_MASK | PDC2_ERR_MASK), | 89 | PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR | |
90 | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR | | ||
91 | PDC1_ERR_MASK | PDC2_ERR_MASK, | ||
90 | 92 | ||
91 | board_2037x = 0, /* FastTrak S150 TX2plus */ | 93 | board_2037x = 0, /* FastTrak S150 TX2plus */ |
92 | board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */ | 94 | board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */ |
@@ -695,19 +697,20 @@ static void pdc_irq_clear(struct ata_port *ap) | |||
695 | readl(mmio + PDC_INT_SEQMASK); | 697 | readl(mmio + PDC_INT_SEQMASK); |
696 | } | 698 | } |
697 | 699 | ||
698 | static inline int pdc_is_sataii_tx4(unsigned long flags) | 700 | static int pdc_is_sataii_tx4(unsigned long flags) |
699 | { | 701 | { |
700 | const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; | 702 | const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS; |
701 | return (flags & mask) == mask; | 703 | return (flags & mask) == mask; |
702 | } | 704 | } |
703 | 705 | ||
704 | static inline unsigned int pdc_port_no_to_ata_no(unsigned int port_no, int is_sataii_tx4) | 706 | static unsigned int pdc_port_no_to_ata_no(unsigned int port_no, |
707 | int is_sataii_tx4) | ||
705 | { | 708 | { |
706 | static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; | 709 | static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2}; |
707 | return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; | 710 | return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no; |
708 | } | 711 | } |
709 | 712 | ||
710 | static irqreturn_t pdc_interrupt (int irq, void *dev_instance) | 713 | static irqreturn_t pdc_interrupt(int irq, void *dev_instance) |
711 | { | 714 | { |
712 | struct ata_host *host = dev_instance; | 715 | struct ata_host *host = dev_instance; |
713 | struct ata_port *ap; | 716 | struct ata_port *ap; |
@@ -839,15 +842,16 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) | |||
839 | 842 | ||
840 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 843 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) |
841 | { | 844 | { |
842 | WARN_ON (tf->protocol == ATA_PROT_DMA || | 845 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
843 | tf->protocol == ATA_PROT_ATAPI_DMA); | 846 | tf->protocol == ATA_PROT_ATAPI_DMA); |
844 | ata_tf_load(ap, tf); | 847 | ata_tf_load(ap, tf); |
845 | } | 848 | } |
846 | 849 | ||
847 | static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 850 | static void pdc_exec_command_mmio(struct ata_port *ap, |
851 | const struct ata_taskfile *tf) | ||
848 | { | 852 | { |
849 | WARN_ON (tf->protocol == ATA_PROT_DMA || | 853 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
850 | tf->protocol == ATA_PROT_ATAPI_DMA); | 854 | tf->protocol == ATA_PROT_ATAPI_DMA); |
851 | ata_exec_command(ap, tf); | 855 | ata_exec_command(ap, tf); |
852 | } | 856 | } |
853 | 857 | ||
@@ -870,8 +874,11 @@ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc) | |||
870 | } | 874 | } |
871 | /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ | 875 | /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ |
872 | if (scsicmd[0] == WRITE_10) { | 876 | if (scsicmd[0] == WRITE_10) { |
873 | unsigned int lba; | 877 | unsigned int lba = |
874 | lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5]; | 878 | (scsicmd[2] << 24) | |
879 | (scsicmd[3] << 16) | | ||
880 | (scsicmd[4] << 8) | | ||
881 | scsicmd[5]; | ||
875 | if (lba >= 0xFFFF4FA2) | 882 | if (lba >= 0xFFFF4FA2) |
876 | pio = 1; | 883 | pio = 1; |
877 | } | 884 | } |
@@ -956,7 +963,8 @@ static void pdc_host_init(struct ata_host *host) | |||
956 | writel(tmp, mmio + PDC_SLEW_CTL); | 963 | writel(tmp, mmio + PDC_SLEW_CTL); |
957 | } | 964 | } |
958 | 965 | ||
959 | static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 966 | static int pdc_ata_init_one(struct pci_dev *pdev, |
967 | const struct pci_device_id *ent) | ||
960 | { | 968 | { |
961 | static int printed_version; | 969 | static int printed_version; |
962 | const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; | 970 | const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; |
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index c4c4cd29eebb..6d43ba79e154 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -113,7 +113,7 @@ struct qs_port_priv { | |||
113 | 113 | ||
114 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 114 | static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
115 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 115 | static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
116 | static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 116 | static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
117 | static int qs_port_start(struct ata_port *ap); | 117 | static int qs_port_start(struct ata_port *ap); |
118 | static void qs_host_stop(struct ata_host *host); | 118 | static void qs_host_stop(struct ata_host *host); |
119 | static void qs_phy_reset(struct ata_port *ap); | 119 | static void qs_phy_reset(struct ata_port *ap); |
@@ -135,7 +135,6 @@ static struct scsi_host_template qs_ata_sht = { | |||
135 | .sg_tablesize = QS_MAX_PRD, | 135 | .sg_tablesize = QS_MAX_PRD, |
136 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | 136 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, |
137 | .emulated = ATA_SHT_EMULATED, | 137 | .emulated = ATA_SHT_EMULATED, |
138 | //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
139 | .use_clustering = ENABLE_CLUSTERING, | 138 | .use_clustering = ENABLE_CLUSTERING, |
140 | .proc_name = DRV_NAME, | 139 | .proc_name = DRV_NAME, |
141 | .dma_boundary = QS_DMA_BOUNDARY, | 140 | .dma_boundary = QS_DMA_BOUNDARY, |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index ea3a0ab7e027..4e6e381279cc 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -111,7 +111,7 @@ enum { | |||
111 | SIL_QUIRK_UDMA5MAX = (1 << 1), | 111 | SIL_QUIRK_UDMA5MAX = (1 << 1), |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 114 | static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
115 | #ifdef CONFIG_PM | 115 | #ifdef CONFIG_PM |
116 | static int sil_pci_device_resume(struct pci_dev *pdev); | 116 | static int sil_pci_device_resume(struct pci_dev *pdev); |
117 | #endif | 117 | #endif |
@@ -138,7 +138,7 @@ static const struct pci_device_id sil_pci_tbl[] = { | |||
138 | 138 | ||
139 | /* TODO firmware versions should be added - eric */ | 139 | /* TODO firmware versions should be added - eric */ |
140 | static const struct sil_drivelist { | 140 | static const struct sil_drivelist { |
141 | const char * product; | 141 | const char *product; |
142 | unsigned int quirk; | 142 | unsigned int quirk; |
143 | } sil_blacklist [] = { | 143 | } sil_blacklist [] = { |
144 | { "ST320012AS", SIL_QUIRK_MOD15WRITE }, | 144 | { "ST320012AS", SIL_QUIRK_MOD15WRITE }, |
@@ -279,7 +279,7 @@ MODULE_LICENSE("GPL"); | |||
279 | MODULE_DEVICE_TABLE(pci, sil_pci_tbl); | 279 | MODULE_DEVICE_TABLE(pci, sil_pci_tbl); |
280 | MODULE_VERSION(DRV_VERSION); | 280 | MODULE_VERSION(DRV_VERSION); |
281 | 281 | ||
282 | static int slow_down = 0; | 282 | static int slow_down; |
283 | module_param(slow_down, int, 0444); | 283 | module_param(slow_down, int, 0444); |
284 | MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); | 284 | MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)"); |
285 | 285 | ||
@@ -332,7 +332,8 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed) | |||
332 | return 0; | 332 | return 0; |
333 | } | 333 | } |
334 | 334 | ||
335 | static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) | 335 | static inline void __iomem *sil_scr_addr(struct ata_port *ap, |
336 | unsigned int sc_reg) | ||
336 | { | 337 | { |
337 | void __iomem *offset = ap->ioaddr.scr_addr; | 338 | void __iomem *offset = ap->ioaddr.scr_addr; |
338 | 339 | ||
@@ -643,7 +644,7 @@ static void sil_init_controller(struct ata_host *host) | |||
643 | } | 644 | } |
644 | } | 645 | } |
645 | 646 | ||
646 | static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 647 | static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
647 | { | 648 | { |
648 | static int printed_version; | 649 | static int printed_version; |
649 | int board_id = ent->driver_data; | 650 | int board_id = ent->driver_data; |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 26ebffc10f3e..3c481e0e0c03 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -674,7 +674,7 @@ static int sil24_do_softreset(struct ata_link *link, unsigned int *class, | |||
674 | 674 | ||
675 | /* put the port into known state */ | 675 | /* put the port into known state */ |
676 | if (sil24_init_port(ap)) { | 676 | if (sil24_init_port(ap)) { |
677 | reason ="port not ready"; | 677 | reason = "port not ready"; |
678 | goto err; | 678 | goto err; |
679 | } | 679 | } |
680 | 680 | ||
@@ -756,7 +756,8 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class, | |||
756 | 756 | ||
757 | writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); | 757 | writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); |
758 | tmp = ata_wait_register(port + PORT_CTRL_STAT, | 758 | tmp = ata_wait_register(port + PORT_CTRL_STAT, |
759 | PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec); | 759 | PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, |
760 | tout_msec); | ||
760 | 761 | ||
761 | /* SStatus oscillates between zero and valid status after | 762 | /* SStatus oscillates between zero and valid status after |
762 | * DEV_RST, debounce it. | 763 | * DEV_RST, debounce it. |
@@ -1270,7 +1271,7 @@ static void sil24_init_controller(struct ata_host *host) | |||
1270 | PORT_CS_PORT_RST, 10, 100); | 1271 | PORT_CS_PORT_RST, 10, 100); |
1271 | if (tmp & PORT_CS_PORT_RST) | 1272 | if (tmp & PORT_CS_PORT_RST) |
1272 | dev_printk(KERN_ERR, host->dev, | 1273 | dev_printk(KERN_ERR, host->dev, |
1273 | "failed to clear port RST\n"); | 1274 | "failed to clear port RST\n"); |
1274 | } | 1275 | } |
1275 | 1276 | ||
1276 | /* configure port */ | 1277 | /* configure port */ |
@@ -1283,7 +1284,7 @@ static void sil24_init_controller(struct ata_host *host) | |||
1283 | 1284 | ||
1284 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1285 | static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1285 | { | 1286 | { |
1286 | static int printed_version = 0; | 1287 | static int printed_version; |
1287 | struct ata_port_info pi = sil24_port_info[ent->driver_data]; | 1288 | struct ata_port_info pi = sil24_port_info[ent->driver_data]; |
1288 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 1289 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
1289 | void __iomem * const *iomap; | 1290 | void __iomem * const *iomap; |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index f147dc7bf464..a01260a56432 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -63,17 +63,17 @@ enum { | |||
63 | GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */ | 63 | GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */ |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 66 | static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
67 | static int sis_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 67 | static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
68 | static int sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 68 | static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
69 | 69 | ||
70 | static const struct pci_device_id sis_pci_tbl[] = { | 70 | static const struct pci_device_id sis_pci_tbl[] = { |
71 | { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ | 71 | { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ |
72 | { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */ | 72 | { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */ |
73 | { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */ | 73 | { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */ |
74 | { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */ | 74 | { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */ |
75 | { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */ | 75 | { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */ |
76 | { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */ | 76 | { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */ |
77 | 77 | ||
78 | { } /* terminate list */ | 78 | { } /* terminate list */ |
79 | }; | 79 | }; |
@@ -149,24 +149,24 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) | |||
149 | 149 | ||
150 | if (ap->port_no) { | 150 | if (ap->port_no) { |
151 | switch (pdev->device) { | 151 | switch (pdev->device) { |
152 | case 0x0180: | 152 | case 0x0180: |
153 | case 0x0181: | 153 | case 0x0181: |
154 | pci_read_config_byte(pdev, SIS_PMR, &pmr); | 154 | pci_read_config_byte(pdev, SIS_PMR, &pmr); |
155 | if ((pmr & SIS_PMR_COMBINED) == 0) | 155 | if ((pmr & SIS_PMR_COMBINED) == 0) |
156 | addr += SIS180_SATA1_OFS; | 156 | addr += SIS180_SATA1_OFS; |
157 | break; | 157 | break; |
158 | 158 | ||
159 | case 0x0182: | 159 | case 0x0182: |
160 | case 0x0183: | 160 | case 0x0183: |
161 | case 0x1182: | 161 | case 0x1182: |
162 | addr += SIS182_SATA1_OFS; | 162 | addr += SIS182_SATA1_OFS; |
163 | break; | 163 | break; |
164 | } | 164 | } |
165 | } | 165 | } |
166 | return addr; | 166 | return addr; |
167 | } | 167 | } |
168 | 168 | ||
169 | static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) | 169 | static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
170 | { | 170 | { |
171 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 171 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
172 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); | 172 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); |
@@ -190,7 +190,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) | |||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 193 | static void sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
194 | { | 194 | { |
195 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 195 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
196 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); | 196 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); |
@@ -253,7 +253,7 @@ static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 256 | static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
257 | { | 257 | { |
258 | static int printed_version; | 258 | static int printed_version; |
259 | struct ata_port_info pi = sis_port_info; | 259 | struct ata_port_info pi = sis_port_info; |
@@ -309,29 +309,33 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
309 | } else { | 309 | } else { |
310 | dev_printk(KERN_INFO, &pdev->dev, | 310 | dev_printk(KERN_INFO, &pdev->dev, |
311 | "Detected SiS 180/181 chipset in combined mode\n"); | 311 | "Detected SiS 180/181 chipset in combined mode\n"); |
312 | port2_start=0; | 312 | port2_start = 0; |
313 | pi.flags |= ATA_FLAG_SLAVE_POSS; | 313 | pi.flags |= ATA_FLAG_SLAVE_POSS; |
314 | } | 314 | } |
315 | break; | 315 | break; |
316 | 316 | ||
317 | case 0x0182: | 317 | case 0x0182: |
318 | case 0x0183: | 318 | case 0x0183: |
319 | pci_read_config_dword ( pdev, 0x6C, &val); | 319 | pci_read_config_dword(pdev, 0x6C, &val); |
320 | if (val & (1L << 31)) { | 320 | if (val & (1L << 31)) { |
321 | dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n"); | 321 | dev_printk(KERN_INFO, &pdev->dev, |
322 | "Detected SiS 182/965 chipset\n"); | ||
322 | pi.flags |= ATA_FLAG_SLAVE_POSS; | 323 | pi.flags |= ATA_FLAG_SLAVE_POSS; |
323 | } else { | 324 | } else { |
324 | dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n"); | 325 | dev_printk(KERN_INFO, &pdev->dev, |
326 | "Detected SiS 182/965L chipset\n"); | ||
325 | } | 327 | } |
326 | break; | 328 | break; |
327 | 329 | ||
328 | case 0x1182: | 330 | case 0x1182: |
329 | dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/966/680 SATA controller\n"); | 331 | dev_printk(KERN_INFO, &pdev->dev, |
332 | "Detected SiS 1182/966/680 SATA controller\n"); | ||
330 | pi.flags |= ATA_FLAG_SLAVE_POSS; | 333 | pi.flags |= ATA_FLAG_SLAVE_POSS; |
331 | break; | 334 | break; |
332 | 335 | ||
333 | case 0x1183: | 336 | case 0x1183: |
334 | dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n"); | 337 | dev_printk(KERN_INFO, &pdev->dev, |
338 | "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n"); | ||
335 | ppi[0] = &sis_info133_for_sata; | 339 | ppi[0] = &sis_info133_for_sata; |
336 | ppi[1] = &sis_info133_for_sata; | 340 | ppi[1] = &sis_info133_for_sata; |
337 | break; | 341 | break; |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 12d613c48c19..69f651e0bc98 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -182,7 +182,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
182 | tf->hob_lbal = lbal >> 8; | 182 | tf->hob_lbal = lbal >> 8; |
183 | tf->hob_lbam = lbam >> 8; | 183 | tf->hob_lbam = lbam >> 8; |
184 | tf->hob_lbah = lbah >> 8; | 184 | tf->hob_lbah = lbah >> 8; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
@@ -193,7 +193,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
193 | * spin_lock_irqsave(host lock) | 193 | * spin_lock_irqsave(host lock) |
194 | */ | 194 | */ |
195 | 195 | ||
196 | static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) | 196 | static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) |
197 | { | 197 | { |
198 | struct ata_port *ap = qc->ap; | 198 | struct ata_port *ap = qc->ap; |
199 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | 199 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) | |||
224 | * spin_lock_irqsave(host lock) | 224 | * spin_lock_irqsave(host lock) |
225 | */ | 225 | */ |
226 | 226 | ||
227 | static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) | 227 | static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) |
228 | { | 228 | { |
229 | struct ata_port *ap = qc->ap; | 229 | struct ata_port *ap = qc->ap; |
230 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | 230 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
@@ -255,7 +255,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) | |||
255 | 255 | ||
256 | static u8 k2_stat_check_status(struct ata_port *ap) | 256 | static u8 k2_stat_check_status(struct ata_port *ap) |
257 | { | 257 | { |
258 | return readl(ap->ioaddr.status_addr); | 258 | return readl(ap->ioaddr.status_addr); |
259 | } | 259 | } |
260 | 260 | ||
261 | #ifdef CONFIG_PPC_OF | 261 | #ifdef CONFIG_PPC_OF |
@@ -395,7 +395,7 @@ static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) | |||
395 | } | 395 | } |
396 | 396 | ||
397 | 397 | ||
398 | static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 398 | static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
399 | { | 399 | { |
400 | static int printed_version; | 400 | static int printed_version; |
401 | const struct ata_port_info *ppi[] = | 401 | const struct ata_port_info *ppi[] = |
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index b6026bceccd1..4d857185f33b 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -212,9 +212,9 @@ struct pdc_host_priv { | |||
212 | }; | 212 | }; |
213 | 213 | ||
214 | 214 | ||
215 | static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 215 | static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
216 | static void pdc_eng_timeout(struct ata_port *ap); | 216 | static void pdc_eng_timeout(struct ata_port *ap); |
217 | static void pdc_20621_phy_reset (struct ata_port *ap); | 217 | static void pdc_20621_phy_reset(struct ata_port *ap); |
218 | static int pdc_port_start(struct ata_port *ap); | 218 | static int pdc_port_start(struct ata_port *ap); |
219 | static void pdc20621_qc_prep(struct ata_queued_cmd *qc); | 219 | static void pdc20621_qc_prep(struct ata_queued_cmd *qc); |
220 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); | 220 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); |
@@ -320,16 +320,16 @@ static int pdc_port_start(struct ata_port *ap) | |||
320 | return 0; | 320 | return 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | static void pdc_20621_phy_reset (struct ata_port *ap) | 323 | static void pdc_20621_phy_reset(struct ata_port *ap) |
324 | { | 324 | { |
325 | VPRINTK("ENTER\n"); | 325 | VPRINTK("ENTER\n"); |
326 | ap->cbl = ATA_CBL_SATA; | 326 | ap->cbl = ATA_CBL_SATA; |
327 | ata_port_probe(ap); | 327 | ata_port_probe(ap); |
328 | ata_bus_reset(ap); | 328 | ata_bus_reset(ap); |
329 | } | 329 | } |
330 | 330 | ||
331 | static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, | 331 | static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, |
332 | unsigned int portno, | 332 | unsigned int portno, |
333 | unsigned int total_len) | 333 | unsigned int total_len) |
334 | { | 334 | { |
335 | u32 addr; | 335 | u32 addr; |
@@ -351,7 +351,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, | |||
351 | } | 351 | } |
352 | 352 | ||
353 | static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, | 353 | static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, |
354 | unsigned int portno, | 354 | unsigned int portno, |
355 | unsigned int total_len) | 355 | unsigned int total_len) |
356 | { | 356 | { |
357 | u32 addr; | 357 | u32 addr; |
@@ -711,8 +711,8 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) | |||
711 | return ata_qc_issue_prot(qc); | 711 | return ata_qc_issue_prot(qc); |
712 | } | 712 | } |
713 | 713 | ||
714 | static inline unsigned int pdc20621_host_intr( struct ata_port *ap, | 714 | static inline unsigned int pdc20621_host_intr(struct ata_port *ap, |
715 | struct ata_queued_cmd *qc, | 715 | struct ata_queued_cmd *qc, |
716 | unsigned int doing_hdma, | 716 | unsigned int doing_hdma, |
717 | void __iomem *mmio) | 717 | void __iomem *mmio) |
718 | { | 718 | { |
@@ -803,7 +803,7 @@ static void pdc20621_irq_clear(struct ata_port *ap) | |||
803 | readl(mmio + PDC_20621_SEQMASK); | 803 | readl(mmio + PDC_20621_SEQMASK); |
804 | } | 804 | } |
805 | 805 | ||
806 | static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance) | 806 | static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) |
807 | { | 807 | { |
808 | struct ata_host *host = dev_instance; | 808 | struct ata_host *host = dev_instance; |
809 | struct ata_port *ap; | 809 | struct ata_port *ap; |
@@ -836,9 +836,9 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance) | |||
836 | return IRQ_NONE; | 836 | return IRQ_NONE; |
837 | } | 837 | } |
838 | 838 | ||
839 | spin_lock(&host->lock); | 839 | spin_lock(&host->lock); |
840 | 840 | ||
841 | for (i = 1; i < 9; i++) { | 841 | for (i = 1; i < 9; i++) { |
842 | port_no = i - 1; | 842 | port_no = i - 1; |
843 | if (port_no > 3) | 843 | if (port_no > 3) |
844 | port_no -= 4; | 844 | port_no -= 4; |
@@ -859,7 +859,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance) | |||
859 | } | 859 | } |
860 | } | 860 | } |
861 | 861 | ||
862 | spin_unlock(&host->lock); | 862 | spin_unlock(&host->lock); |
863 | 863 | ||
864 | VPRINTK("mask == 0x%x\n", mask); | 864 | VPRINTK("mask == 0x%x\n", mask); |
865 | 865 | ||
@@ -906,16 +906,16 @@ static void pdc_eng_timeout(struct ata_port *ap) | |||
906 | 906 | ||
907 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 907 | static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) |
908 | { | 908 | { |
909 | WARN_ON (tf->protocol == ATA_PROT_DMA || | 909 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
910 | tf->protocol == ATA_PROT_NODATA); | 910 | tf->protocol == ATA_PROT_NODATA); |
911 | ata_tf_load(ap, tf); | 911 | ata_tf_load(ap, tf); |
912 | } | 912 | } |
913 | 913 | ||
914 | 914 | ||
915 | static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) | 915 | static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) |
916 | { | 916 | { |
917 | WARN_ON (tf->protocol == ATA_PROT_DMA || | 917 | WARN_ON(tf->protocol == ATA_PROT_DMA || |
918 | tf->protocol == ATA_PROT_NODATA); | 918 | tf->protocol == ATA_PROT_NODATA); |
919 | ata_exec_command(ap, tf); | 919 | ata_exec_command(ap, tf); |
920 | } | 920 | } |
921 | 921 | ||
@@ -953,7 +953,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, | |||
953 | mmio += PDC_CHIP0_OFS; | 953 | mmio += PDC_CHIP0_OFS; |
954 | 954 | ||
955 | page_mask = 0x00; | 955 | page_mask = 0x00; |
956 | window_size = 0x2000 * 4; /* 32K byte uchar size */ | 956 | window_size = 0x2000 * 4; /* 32K byte uchar size */ |
957 | idx = (u16) (offset / window_size); | 957 | idx = (u16) (offset / window_size); |
958 | 958 | ||
959 | writel(0x01, mmio + PDC_GENERAL_CTLR); | 959 | writel(0x01, mmio + PDC_GENERAL_CTLR); |
@@ -979,7 +979,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource, | |||
979 | window_size / 4); | 979 | window_size / 4); |
980 | psource += window_size; | 980 | psource += window_size; |
981 | size -= window_size; | 981 | size -= window_size; |
982 | idx ++; | 982 | idx++; |
983 | } | 983 | } |
984 | 984 | ||
985 | if (size) { | 985 | if (size) { |
@@ -1008,7 +1008,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, | |||
1008 | mmio += PDC_CHIP0_OFS; | 1008 | mmio += PDC_CHIP0_OFS; |
1009 | 1009 | ||
1010 | page_mask = 0x00; | 1010 | page_mask = 0x00; |
1011 | window_size = 0x2000 * 4; /* 32K byte uchar size */ | 1011 | window_size = 0x2000 * 4; /* 32K byte uchar size */ |
1012 | idx = (u16) (offset / window_size); | 1012 | idx = (u16) (offset / window_size); |
1013 | 1013 | ||
1014 | writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); | 1014 | writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); |
@@ -1031,7 +1031,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource, | |||
1031 | readl(mmio + PDC_GENERAL_CTLR); | 1031 | readl(mmio + PDC_GENERAL_CTLR); |
1032 | psource += window_size; | 1032 | psource += window_size; |
1033 | size -= window_size; | 1033 | size -= window_size; |
1034 | idx ++; | 1034 | idx++; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | if (size) { | 1037 | if (size) { |
@@ -1050,7 +1050,7 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, | |||
1050 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; | 1050 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; |
1051 | u32 i2creg = 0; | 1051 | u32 i2creg = 0; |
1052 | u32 status; | 1052 | u32 status; |
1053 | u32 count =0; | 1053 | u32 count = 0; |
1054 | 1054 | ||
1055 | /* hard-code chip #0 */ | 1055 | /* hard-code chip #0 */ |
1056 | mmio += PDC_CHIP0_OFS; | 1056 | mmio += PDC_CHIP0_OFS; |
@@ -1082,21 +1082,21 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device, | |||
1082 | 1082 | ||
1083 | static int pdc20621_detect_dimm(struct ata_host *host) | 1083 | static int pdc20621_detect_dimm(struct ata_host *host) |
1084 | { | 1084 | { |
1085 | u32 data=0 ; | 1085 | u32 data = 0; |
1086 | if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, | 1086 | if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, |
1087 | PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { | 1087 | PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { |
1088 | if (data == 100) | 1088 | if (data == 100) |
1089 | return 100; | 1089 | return 100; |
1090 | } else | 1090 | } else |
1091 | return 0; | 1091 | return 0; |
1092 | 1092 | ||
1093 | if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { | 1093 | if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { |
1094 | if(data <= 0x75) | 1094 | if (data <= 0x75) |
1095 | return 133; | 1095 | return 133; |
1096 | } else | 1096 | } else |
1097 | return 0; | 1097 | return 0; |
1098 | 1098 | ||
1099 | return 0; | 1099 | return 0; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | 1102 | ||
@@ -1104,8 +1104,8 @@ static int pdc20621_prog_dimm0(struct ata_host *host) | |||
1104 | { | 1104 | { |
1105 | u32 spd0[50]; | 1105 | u32 spd0[50]; |
1106 | u32 data = 0; | 1106 | u32 data = 0; |
1107 | int size, i; | 1107 | int size, i; |
1108 | u8 bdimmsize; | 1108 | u8 bdimmsize; |
1109 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; | 1109 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; |
1110 | static const struct { | 1110 | static const struct { |
1111 | unsigned int reg; | 1111 | unsigned int reg; |
@@ -1128,40 +1128,40 @@ static int pdc20621_prog_dimm0(struct ata_host *host) | |||
1128 | /* hard-code chip #0 */ | 1128 | /* hard-code chip #0 */ |
1129 | mmio += PDC_CHIP0_OFS; | 1129 | mmio += PDC_CHIP0_OFS; |
1130 | 1130 | ||
1131 | for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++) | 1131 | for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) |
1132 | pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, | 1132 | pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, |
1133 | pdc_i2c_read_data[i].reg, | 1133 | pdc_i2c_read_data[i].reg, |
1134 | &spd0[pdc_i2c_read_data[i].ofs]); | 1134 | &spd0[pdc_i2c_read_data[i].ofs]); |
1135 | 1135 | ||
1136 | data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); | 1136 | data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); |
1137 | data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | | 1137 | data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | |
1138 | ((((spd0[27] + 9) / 10) - 1) << 8) ; | 1138 | ((((spd0[27] + 9) / 10) - 1) << 8) ; |
1139 | data |= (((((spd0[29] > spd0[28]) | 1139 | data |= (((((spd0[29] > spd0[28]) |
1140 | ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; | 1140 | ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; |
1141 | data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; | 1141 | data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; |
1142 | 1142 | ||
1143 | if (spd0[18] & 0x08) | 1143 | if (spd0[18] & 0x08) |
1144 | data |= ((0x03) << 14); | 1144 | data |= ((0x03) << 14); |
1145 | else if (spd0[18] & 0x04) | 1145 | else if (spd0[18] & 0x04) |
1146 | data |= ((0x02) << 14); | 1146 | data |= ((0x02) << 14); |
1147 | else if (spd0[18] & 0x01) | 1147 | else if (spd0[18] & 0x01) |
1148 | data |= ((0x01) << 14); | 1148 | data |= ((0x01) << 14); |
1149 | else | 1149 | else |
1150 | data |= (0 << 14); | 1150 | data |= (0 << 14); |
1151 | 1151 | ||
1152 | /* | 1152 | /* |
1153 | Calculate the size of bDIMMSize (power of 2) and | 1153 | Calculate the size of bDIMMSize (power of 2) and |
1154 | merge the DIMM size by program start/end address. | 1154 | merge the DIMM size by program start/end address. |
1155 | */ | 1155 | */ |
1156 | 1156 | ||
1157 | bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; | 1157 | bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; |
1158 | size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ | 1158 | size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ |
1159 | data |= (((size / 16) - 1) << 16); | 1159 | data |= (((size / 16) - 1) << 16); |
1160 | data |= (0 << 23); | 1160 | data |= (0 << 23); |
1161 | data |= 8; | 1161 | data |= 8; |
1162 | writel(data, mmio + PDC_DIMM0_CONTROL); | 1162 | writel(data, mmio + PDC_DIMM0_CONTROL); |
1163 | readl(mmio + PDC_DIMM0_CONTROL); | 1163 | readl(mmio + PDC_DIMM0_CONTROL); |
1164 | return size; | 1164 | return size; |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | 1167 | ||
@@ -1172,9 +1172,9 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) | |||
1172 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; | 1172 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; |
1173 | 1173 | ||
1174 | /* hard-code chip #0 */ | 1174 | /* hard-code chip #0 */ |
1175 | mmio += PDC_CHIP0_OFS; | 1175 | mmio += PDC_CHIP0_OFS; |
1176 | 1176 | ||
1177 | /* | 1177 | /* |
1178 | Set To Default : DIMM Module Global Control Register (0x022259F1) | 1178 | Set To Default : DIMM Module Global Control Register (0x022259F1) |
1179 | DIMM Arbitration Disable (bit 20) | 1179 | DIMM Arbitration Disable (bit 20) |
1180 | DIMM Data/Control Output Driving Selection (bit12 - bit15) | 1180 | DIMM Data/Control Output Driving Selection (bit12 - bit15) |
@@ -1193,40 +1193,40 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host) | |||
1193 | writel(data, mmio + PDC_SDRAM_CONTROL); | 1193 | writel(data, mmio + PDC_SDRAM_CONTROL); |
1194 | readl(mmio + PDC_SDRAM_CONTROL); | 1194 | readl(mmio + PDC_SDRAM_CONTROL); |
1195 | printk(KERN_ERR "Local DIMM ECC Enabled\n"); | 1195 | printk(KERN_ERR "Local DIMM ECC Enabled\n"); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | /* DIMM Initialization Select/Enable (bit 18/19) */ | 1198 | /* DIMM Initialization Select/Enable (bit 18/19) */ |
1199 | data &= (~(1<<18)); | 1199 | data &= (~(1<<18)); |
1200 | data |= (1<<19); | 1200 | data |= (1<<19); |
1201 | writel(data, mmio + PDC_SDRAM_CONTROL); | 1201 | writel(data, mmio + PDC_SDRAM_CONTROL); |
1202 | 1202 | ||
1203 | error = 1; | 1203 | error = 1; |
1204 | for (i = 1; i <= 10; i++) { /* polling ~5 secs */ | 1204 | for (i = 1; i <= 10; i++) { /* polling ~5 secs */ |
1205 | data = readl(mmio + PDC_SDRAM_CONTROL); | 1205 | data = readl(mmio + PDC_SDRAM_CONTROL); |
1206 | if (!(data & (1<<19))) { | 1206 | if (!(data & (1<<19))) { |
1207 | error = 0; | 1207 | error = 0; |
1208 | break; | 1208 | break; |
1209 | } | 1209 | } |
1210 | msleep(i*100); | 1210 | msleep(i*100); |
1211 | } | 1211 | } |
1212 | return error; | 1212 | return error; |
1213 | } | 1213 | } |
1214 | 1214 | ||
1215 | 1215 | ||
1216 | static unsigned int pdc20621_dimm_init(struct ata_host *host) | 1216 | static unsigned int pdc20621_dimm_init(struct ata_host *host) |
1217 | { | 1217 | { |
1218 | int speed, size, length; | 1218 | int speed, size, length; |
1219 | u32 addr,spd0,pci_status; | 1219 | u32 addr, spd0, pci_status; |
1220 | u32 tmp=0; | 1220 | u32 tmp = 0; |
1221 | u32 time_period=0; | 1221 | u32 time_period = 0; |
1222 | u32 tcount=0; | 1222 | u32 tcount = 0; |
1223 | u32 ticks=0; | 1223 | u32 ticks = 0; |
1224 | u32 clock=0; | 1224 | u32 clock = 0; |
1225 | u32 fparam=0; | 1225 | u32 fparam = 0; |
1226 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; | 1226 | void __iomem *mmio = host->iomap[PDC_MMIO_BAR]; |
1227 | 1227 | ||
1228 | /* hard-code chip #0 */ | 1228 | /* hard-code chip #0 */ |
1229 | mmio += PDC_CHIP0_OFS; | 1229 | mmio += PDC_CHIP0_OFS; |
1230 | 1230 | ||
1231 | /* Initialize PLL based upon PCI Bus Frequency */ | 1231 | /* Initialize PLL based upon PCI Bus Frequency */ |
1232 | 1232 | ||
@@ -1254,7 +1254,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) | |||
1254 | If SX4 is on PCI-X bus, after 3 seconds, the timer counter | 1254 | If SX4 is on PCI-X bus, after 3 seconds, the timer counter |
1255 | register should be >= (0xffffffff - 3x10^8). | 1255 | register should be >= (0xffffffff - 3x10^8). |
1256 | */ | 1256 | */ |
1257 | if(tcount >= PCI_X_TCOUNT) { | 1257 | if (tcount >= PCI_X_TCOUNT) { |
1258 | ticks = (time_period - tcount); | 1258 | ticks = (time_period - tcount); |
1259 | VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); | 1259 | VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); |
1260 | 1260 | ||
@@ -1285,41 +1285,43 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) | |||
1285 | if (!(speed = pdc20621_detect_dimm(host))) { | 1285 | if (!(speed = pdc20621_detect_dimm(host))) { |
1286 | printk(KERN_ERR "Detect Local DIMM Fail\n"); | 1286 | printk(KERN_ERR "Detect Local DIMM Fail\n"); |
1287 | return 1; /* DIMM error */ | 1287 | return 1; /* DIMM error */ |
1288 | } | 1288 | } |
1289 | VPRINTK("Local DIMM Speed = %d\n", speed); | 1289 | VPRINTK("Local DIMM Speed = %d\n", speed); |
1290 | 1290 | ||
1291 | /* Programming DIMM0 Module Control Register (index_CID0:80h) */ | 1291 | /* Programming DIMM0 Module Control Register (index_CID0:80h) */ |
1292 | size = pdc20621_prog_dimm0(host); | 1292 | size = pdc20621_prog_dimm0(host); |
1293 | VPRINTK("Local DIMM Size = %dMB\n",size); | 1293 | VPRINTK("Local DIMM Size = %dMB\n", size); |
1294 | 1294 | ||
1295 | /* Programming DIMM Module Global Control Register (index_CID0:88h) */ | 1295 | /* Programming DIMM Module Global Control Register (index_CID0:88h) */ |
1296 | if (pdc20621_prog_dimm_global(host)) { | 1296 | if (pdc20621_prog_dimm_global(host)) { |
1297 | printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); | 1297 | printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); |
1298 | return 1; | 1298 | return 1; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | #ifdef ATA_VERBOSE_DEBUG | 1301 | #ifdef ATA_VERBOSE_DEBUG |
1302 | { | 1302 | { |
1303 | u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', | 1303 | u8 test_parttern1[40] = |
1304 | 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ', | 1304 | {0x55,0xAA,'P','r','o','m','i','s','e',' ', |
1305 | '1','.','1','0', | 1305 | 'N','o','t',' ','Y','e','t',' ', |
1306 | '9','8','0','3','1','6','1','2',0,0}; | 1306 | 'D','e','f','i','n','e','d',' ', |
1307 | '1','.','1','0', | ||
1308 | '9','8','0','3','1','6','1','2',0,0}; | ||
1307 | u8 test_parttern2[40] = {0}; | 1309 | u8 test_parttern2[40] = {0}; |
1308 | 1310 | ||
1309 | pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x10040, 40); | 1311 | pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40); |
1310 | pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x40, 40); | 1312 | pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40); |
1311 | 1313 | ||
1312 | pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x10040, 40); | 1314 | pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40); |
1313 | pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40); | 1315 | pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); |
1314 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], | 1316 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], |
1315 | test_parttern2[1], &(test_parttern2[2])); | 1317 | test_parttern2[1], &(test_parttern2[2])); |
1316 | pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x10040, | 1318 | pdc20621_get_from_dimm(host, test_parttern2, 0x10040, |
1317 | 40); | 1319 | 40); |
1318 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], | 1320 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], |
1319 | test_parttern2[1], &(test_parttern2[2])); | 1321 | test_parttern2[1], &(test_parttern2[2])); |
1320 | 1322 | ||
1321 | pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x40, 40); | 1323 | pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40); |
1322 | pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40); | 1324 | pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40); |
1323 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], | 1325 | printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], |
1324 | test_parttern2[1], &(test_parttern2[2])); | 1326 | test_parttern2[1], &(test_parttern2[2])); |
1325 | } | 1327 | } |
@@ -1375,7 +1377,8 @@ static void pdc_20621_init(struct ata_host *host) | |||
1375 | readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ | 1377 | readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ |
1376 | } | 1378 | } |
1377 | 1379 | ||
1378 | static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 1380 | static int pdc_sata_init_one(struct pci_dev *pdev, |
1381 | const struct pci_device_id *ent) | ||
1379 | { | 1382 | { |
1380 | static int printed_version; | 1383 | static int printed_version; |
1381 | const struct ata_port_info *ppi[] = | 1384 | const struct ata_port_info *ppi[] = |
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index d394da085ae4..e710e71b7b92 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -56,9 +56,9 @@ struct uli_priv { | |||
56 | unsigned int scr_cfg_addr[uli_max_ports]; | 56 | unsigned int scr_cfg_addr[uli_max_ports]; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 59 | static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
60 | static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); | 60 | static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
61 | static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 61 | static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
62 | 62 | ||
63 | static const struct pci_device_id uli_pci_tbl[] = { | 63 | static const struct pci_device_id uli_pci_tbl[] = { |
64 | { PCI_VDEVICE(AL, 0x5289), uli_5289 }, | 64 | { PCI_VDEVICE(AL, 0x5289), uli_5289 }, |
@@ -143,7 +143,7 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) | |||
143 | return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); | 143 | return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); |
144 | } | 144 | } |
145 | 145 | ||
146 | static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) | 146 | static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg) |
147 | { | 147 | { |
148 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 148 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
149 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); | 149 | unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); |
@@ -153,7 +153,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) | |||
153 | return val; | 153 | return val; |
154 | } | 154 | } |
155 | 155 | ||
156 | static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) | 156 | static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val) |
157 | { | 157 | { |
158 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | 158 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
159 | unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); | 159 | unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); |
@@ -161,7 +161,7 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) | |||
161 | pci_write_config_dword(pdev, cfg_addr, val); | 161 | pci_write_config_dword(pdev, cfg_addr, val); |
162 | } | 162 | } |
163 | 163 | ||
164 | static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) | 164 | static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
165 | { | 165 | { |
166 | if (sc_reg > SCR_CONTROL) | 166 | if (sc_reg > SCR_CONTROL) |
167 | return -EINVAL; | 167 | return -EINVAL; |
@@ -170,16 +170,16 @@ static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val) | |||
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | 173 | static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) |
174 | { | 174 | { |
175 | if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 | 175 | if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 |
176 | return -EINVAL; | 176 | return -EINVAL; |
177 | 177 | ||
178 | uli_scr_cfg_write(ap, sc_reg, val); | 178 | uli_scr_cfg_write(ap, sc_reg, val); |
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
182 | static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 182 | static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
183 | { | 183 | { |
184 | static int printed_version; | 184 | static int printed_version; |
185 | const struct ata_port_info *ppi[] = { &uli_port_info, NULL }; | 185 | const struct ata_port_info *ppi[] = { &uli_port_info, NULL }; |
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index cc6ee0890f56..3ef072ff319d 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> | 4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> |
5 | * Please ALWAYS copy linux-ide@vger.kernel.org | 5 | * Please ALWAYS copy linux-ide@vger.kernel.org |
6 | on emails. | 6 | * on emails. |
7 | * | 7 | * |
8 | * Copyright 2003-2004 Red Hat, Inc. All rights reserved. | 8 | * Copyright 2003-2004 Red Hat, Inc. All rights reserved. |
9 | * Copyright 2003-2004 Jeff Garzik | 9 | * Copyright 2003-2004 Jeff Garzik |
@@ -69,7 +69,7 @@ enum { | |||
69 | SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ | 69 | SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 72 | static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
73 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); | 73 | static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); |
74 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); | 74 | static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); |
75 | static void svia_noop_freeze(struct ata_port *ap); | 75 | static void svia_noop_freeze(struct ata_port *ap); |
@@ -372,12 +372,12 @@ static const unsigned int vt6421_bar_sizes[] = { | |||
372 | 16, 16, 16, 16, 32, 128 | 372 | 16, 16, 16, 16, 32, 128 |
373 | }; | 373 | }; |
374 | 374 | ||
375 | static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port) | 375 | static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port) |
376 | { | 376 | { |
377 | return addr + (port * 128); | 377 | return addr + (port * 128); |
378 | } | 378 | } |
379 | 379 | ||
380 | static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port) | 380 | static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port) |
381 | { | 381 | { |
382 | return addr + (port * 64); | 382 | return addr + (port * 64); |
383 | } | 383 | } |
@@ -472,7 +472,7 @@ static void svia_configure(struct pci_dev *pdev) | |||
472 | if ((tmp8 & ALL_PORTS) != ALL_PORTS) { | 472 | if ((tmp8 & ALL_PORTS) != ALL_PORTS) { |
473 | dev_printk(KERN_DEBUG, &pdev->dev, | 473 | dev_printk(KERN_DEBUG, &pdev->dev, |
474 | "enabling SATA channels (0x%x)\n", | 474 | "enabling SATA channels (0x%x)\n", |
475 | (int) tmp8); | 475 | (int) tmp8); |
476 | tmp8 |= ALL_PORTS; | 476 | tmp8 |= ALL_PORTS; |
477 | pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); | 477 | pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); |
478 | } | 478 | } |
@@ -482,7 +482,7 @@ static void svia_configure(struct pci_dev *pdev) | |||
482 | if ((tmp8 & ALL_PORTS) != ALL_PORTS) { | 482 | if ((tmp8 & ALL_PORTS) != ALL_PORTS) { |
483 | dev_printk(KERN_DEBUG, &pdev->dev, | 483 | dev_printk(KERN_DEBUG, &pdev->dev, |
484 | "enabling SATA channel interrupts (0x%x)\n", | 484 | "enabling SATA channel interrupts (0x%x)\n", |
485 | (int) tmp8); | 485 | (int) tmp8); |
486 | tmp8 |= ALL_PORTS; | 486 | tmp8 |= ALL_PORTS; |
487 | pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); | 487 | pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); |
488 | } | 488 | } |
@@ -492,13 +492,13 @@ static void svia_configure(struct pci_dev *pdev) | |||
492 | if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { | 492 | if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { |
493 | dev_printk(KERN_DEBUG, &pdev->dev, | 493 | dev_printk(KERN_DEBUG, &pdev->dev, |
494 | "enabling SATA channel native mode (0x%x)\n", | 494 | "enabling SATA channel native mode (0x%x)\n", |
495 | (int) tmp8); | 495 | (int) tmp8); |
496 | tmp8 |= NATIVE_MODE_ALL; | 496 | tmp8 |= NATIVE_MODE_ALL; |
497 | pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); | 497 | pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); |
498 | } | 498 | } |
499 | } | 499 | } |
500 | 500 | ||
501 | static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 501 | static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
502 | { | 502 | { |
503 | static int printed_version; | 503 | static int printed_version; |
504 | unsigned int i; | 504 | unsigned int i; |
@@ -525,8 +525,8 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
525 | dev_printk(KERN_ERR, &pdev->dev, | 525 | dev_printk(KERN_ERR, &pdev->dev, |
526 | "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", | 526 | "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", |
527 | i, | 527 | i, |
528 | (unsigned long long)pci_resource_start(pdev, i), | 528 | (unsigned long long)pci_resource_start(pdev, i), |
529 | (unsigned long long)pci_resource_len(pdev, i)); | 529 | (unsigned long long)pci_resource_len(pdev, i)); |
530 | return -ENODEV; | 530 | return -ENODEV; |
531 | } | 531 | } |
532 | 532 | ||
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 0d9be1684873..95ae3ed24a9d 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c | |||
@@ -162,7 +162,8 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
162 | /* | 162 | /* |
163 | * The only thing the ctl register is used for is SRST. | 163 | * The only thing the ctl register is used for is SRST. |
164 | * That is not enabled or disabled via tf_load. | 164 | * That is not enabled or disabled via tf_load. |
165 | * However, if ATA_NIEN is changed, then we need to change the interrupt register. | 165 | * However, if ATA_NIEN is changed, then we need to change |
166 | * the interrupt register. | ||
166 | */ | 167 | */ |
167 | if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { | 168 | if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { |
168 | ap->last_ctl = tf->ctl; | 169 | ap->last_ctl = tf->ctl; |
@@ -219,7 +220,7 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
219 | tf->hob_lbal = lbal >> 8; | 220 | tf->hob_lbal = lbal >> 8; |
220 | tf->hob_lbam = lbam >> 8; | 221 | tf->hob_lbam = lbam >> 8; |
221 | tf->hob_lbah = lbah >> 8; | 222 | tf->hob_lbah = lbah >> 8; |
222 | } | 223 | } |
223 | } | 224 | } |
224 | 225 | ||
225 | static inline void vsc_error_intr(u8 port_status, struct ata_port *ap) | 226 | static inline void vsc_error_intr(u8 port_status, struct ata_port *ap) |
@@ -256,9 +257,10 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) | |||
256 | /* | 257 | /* |
257 | * vsc_sata_interrupt | 258 | * vsc_sata_interrupt |
258 | * | 259 | * |
259 | * Read the interrupt register and process for the devices that have them pending. | 260 | * Read the interrupt register and process for the devices that have |
261 | * them pending. | ||
260 | */ | 262 | */ |
261 | static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance) | 263 | static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance) |
262 | { | 264 | { |
263 | struct ata_host *host = dev_instance; | 265 | struct ata_host *host = dev_instance; |
264 | unsigned int i; | 266 | unsigned int i; |
@@ -287,7 +289,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance) | |||
287 | handled++; | 289 | handled++; |
288 | } else | 290 | } else |
289 | dev_printk(KERN_ERR, host->dev, | 291 | dev_printk(KERN_ERR, host->dev, |
290 | ": interrupt from disabled port %d\n", i); | 292 | "interrupt from disabled port %d\n", i); |
291 | } | 293 | } |
292 | } | 294 | } |
293 | 295 | ||
@@ -363,7 +365,8 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port, | |||
363 | } | 365 | } |
364 | 366 | ||
365 | 367 | ||
366 | static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 368 | static int __devinit vsc_sata_init_one(struct pci_dev *pdev, |
369 | const struct pci_device_id *ent) | ||
367 | { | 370 | { |
368 | static const struct ata_port_info pi = { | 371 | static const struct ata_port_info pi = { |
369 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 372 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 5a6fe17fc638..7d704968765f 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for HP SA 5xxx and 6xxx Controllers | 2 | * Disk Array driver for HP Smart Array controllers. |
3 | * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P. | 3 | * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 4aca7ddfdddf..63ee6c076cb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module | 2 | * Disk Array driver for HP Smart Array controllers, SCSI Tape module. |
3 | * Copyright 2001 Compaq Computer Corporation | 3 | * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h index 5e7e06c07d6c..d9c2c586502f 100644 --- a/drivers/block/cciss_scsi.h +++ b/drivers/block/cciss_scsi.h | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module | 2 | * Disk Array driver for HP Smart Array controllers, SCSI Tape module. |
3 | * Copyright 2001 Compaq Computer Corporation | 3 | * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 1b58b010797f..241167878edf 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c | |||
@@ -150,13 +150,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd, | |||
150 | u32 iv[4] = { 0, }; | 150 | u32 iv[4] = { 0, }; |
151 | iv[0] = cpu_to_le32(IV & 0xffffffff); | 151 | iv[0] = cpu_to_le32(IV & 0xffffffff); |
152 | 152 | ||
153 | sg_set_page(&sg_in, in_page); | 153 | sg_set_page(&sg_in, in_page, sz, in_offs); |
154 | sg_in.offset = in_offs; | 154 | sg_set_page(&sg_out, out_page, sz, out_offs); |
155 | sg_in.length = sz; | ||
156 | |||
157 | sg_set_page(&sg_out, out_page); | ||
158 | sg_out.offset = out_offs; | ||
159 | sg_out.length = sz; | ||
160 | 155 | ||
161 | desc.info = iv; | 156 | desc.info = iv; |
162 | err = encdecfunc(&desc, &sg_out, &sg_in, sz); | 157 | err = encdecfunc(&desc, &sg_out, &sg_in, sz); |
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 7276f7d207c2..fac4c6cd04f7 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/scatterlist.h> | ||
18 | 19 | ||
19 | #include <asm/vio.h> | 20 | #include <asm/vio.h> |
20 | #include <asm/ldc.h> | 21 | #include <asm/ldc.h> |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 14143f2c484d..08e909dc7944 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -1428,9 +1428,9 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |||
1428 | scmd->state = UB_CMDST_INIT; | 1428 | scmd->state = UB_CMDST_INIT; |
1429 | scmd->nsg = 1; | 1429 | scmd->nsg = 1; |
1430 | sg = &scmd->sgv[0]; | 1430 | sg = &scmd->sgv[0]; |
1431 | sg_set_page(sg, virt_to_page(sc->top_sense)); | 1431 | sg_init_table(sg, UB_MAX_REQ_SG); |
1432 | sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1); | 1432 | sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, |
1433 | sg->length = UB_SENSE_SIZE; | 1433 | (unsigned long)sc->top_sense & (PAGE_SIZE-1)); |
1434 | scmd->len = UB_SENSE_SIZE; | 1434 | scmd->len = UB_SENSE_SIZE; |
1435 | scmd->lun = cmd->lun; | 1435 | scmd->lun = cmd->lun; |
1436 | scmd->done = ub_top_sense_done; | 1436 | scmd->done = ub_top_sense_done; |
@@ -1864,9 +1864,8 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | |||
1864 | cmd->state = UB_CMDST_INIT; | 1864 | cmd->state = UB_CMDST_INIT; |
1865 | cmd->nsg = 1; | 1865 | cmd->nsg = 1; |
1866 | sg = &cmd->sgv[0]; | 1866 | sg = &cmd->sgv[0]; |
1867 | sg_set_page(sg, virt_to_page(p)); | 1867 | sg_init_table(sg, UB_MAX_REQ_SG); |
1868 | sg->offset = (unsigned long)p & (PAGE_SIZE-1); | 1868 | sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); |
1869 | sg->length = 8; | ||
1870 | cmd->len = 8; | 1869 | cmd->len = 8; |
1871 | cmd->lun = lun; | 1870 | cmd->lun = lun; |
1872 | cmd->done = ub_probe_done; | 1871 | cmd->done = ub_probe_done; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index a901eee64ba5..3cf7129d83e6 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -4,7 +4,9 @@ | |||
4 | #include <linux/hdreg.h> | 4 | #include <linux/hdreg.h> |
5 | #include <linux/virtio.h> | 5 | #include <linux/virtio.h> |
6 | #include <linux/virtio_blk.h> | 6 | #include <linux/virtio_blk.h> |
7 | #include <linux/virtio_blk.h> | 7 | #include <linux/scatterlist.h> |
8 | |||
9 | #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) | ||
8 | 10 | ||
9 | static unsigned char virtblk_index = 'a'; | 11 | static unsigned char virtblk_index = 'a'; |
10 | struct virtio_blk | 12 | struct virtio_blk |
@@ -23,7 +25,7 @@ struct virtio_blk | |||
23 | mempool_t *pool; | 25 | mempool_t *pool; |
24 | 26 | ||
25 | /* Scatterlist: can be too big for stack. */ | 27 | /* Scatterlist: can be too big for stack. */ |
26 | struct scatterlist sg[3+MAX_PHYS_SEGMENTS]; | 28 | struct scatterlist sg[VIRTIO_MAX_SG]; |
27 | }; | 29 | }; |
28 | 30 | ||
29 | struct virtblk_req | 31 | struct virtblk_req |
@@ -94,8 +96,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |||
94 | if (blk_barrier_rq(vbr->req)) | 96 | if (blk_barrier_rq(vbr->req)) |
95 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | 97 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; |
96 | 98 | ||
97 | /* We have to zero this, otherwise blk_rq_map_sg gets upset. */ | 99 | /* This init could be done at vblk creation time */ |
98 | memset(vblk->sg, 0, sizeof(vblk->sg)); | 100 | sg_init_table(vblk->sg, VIRTIO_MAX_SG); |
99 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); | 101 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
100 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); | 102 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); |
101 | sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); | 103 | sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 880b5dce3a62..d8bb44b98a6a 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -41,9 +41,9 @@ | |||
41 | #include <linux/completion.h> | 41 | #include <linux/completion.h> |
42 | #include <linux/proc_fs.h> | 42 | #include <linux/proc_fs.h> |
43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> |
44 | #include <linux/scatterlist.h> | ||
44 | 45 | ||
45 | #include <asm/vio.h> | 46 | #include <asm/vio.h> |
46 | #include <asm/scatterlist.h> | ||
47 | #include <asm/iseries/hv_types.h> | 47 | #include <asm/iseries/hv_types.h> |
48 | #include <asm/iseries/hv_lp_event.h> | 48 | #include <asm/iseries/hv_lp_event.h> |
49 | #include <asm/iseries/vio.h> | 49 | #include <asm/iseries/vio.h> |
@@ -258,6 +258,7 @@ static int send_request(struct request *req) | |||
258 | cmd = viomajorsubtype_cdio | viocdwrite; | 258 | cmd = viomajorsubtype_cdio | viocdwrite; |
259 | } | 259 | } |
260 | 260 | ||
261 | sg_init_table(&sg, 1); | ||
261 | if (blk_rq_map_sg(req->q, req, &sg) == 0) { | 262 | if (blk_rq_map_sg(req->q, req, &sg) == 0) { |
262 | printk(VIOCD_KERN_WARNING | 263 | printk(VIOCD_KERN_WARNING |
263 | "error setting up scatter/gather list\n"); | 264 | "error setting up scatter/gather list\n"); |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0e937f64a789..20070b7c573d 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -41,7 +41,7 @@ | |||
41 | */ | 41 | */ |
42 | static inline int uncached_access(struct file *file, unsigned long addr) | 42 | static inline int uncached_access(struct file *file, unsigned long addr) |
43 | { | 43 | { |
44 | #if defined(__i386__) | 44 | #if defined(__i386__) && !defined(__arch_um__) |
45 | /* | 45 | /* |
46 | * On the PPro and successors, the MTRRs are used to set | 46 | * On the PPro and successors, the MTRRs are used to set |
47 | * memory types for physical addresses outside main memory, | 47 | * memory types for physical addresses outside main memory, |
@@ -57,7 +57,7 @@ static inline int uncached_access(struct file *file, unsigned long addr) | |||
57 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | 57 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || |
58 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | 58 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) |
59 | && addr >= __pa(high_memory); | 59 | && addr >= __pa(high_memory); |
60 | #elif defined(__x86_64__) | 60 | #elif defined(__x86_64__) && !defined(__arch_um__) |
61 | /* | 61 | /* |
62 | * This is broken because it can generate memory type aliases, | 62 | * This is broken because it can generate memory type aliases, |
63 | * which can cause cache corruptions | 63 | * which can cause cache corruptions |
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 4e8de162fc12..c666b4e0933e 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c | |||
@@ -55,7 +55,7 @@ static void padlock_sha_bypass(struct crypto_tfm *tfm) | |||
55 | if (ctx(tfm)->data && ctx(tfm)->used) { | 55 | if (ctx(tfm)->data && ctx(tfm)->used) { |
56 | struct scatterlist sg; | 56 | struct scatterlist sg; |
57 | 57 | ||
58 | sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); | 58 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); |
59 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | 59 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); |
60 | } | 60 | } |
61 | 61 | ||
@@ -79,7 +79,7 @@ static void padlock_sha_update(struct crypto_tfm *tfm, | |||
79 | 79 | ||
80 | if (unlikely(ctx(tfm)->bypass)) { | 80 | if (unlikely(ctx(tfm)->bypass)) { |
81 | struct scatterlist sg; | 81 | struct scatterlist sg; |
82 | sg_set_buf(&sg, (uint8_t *)data, length); | 82 | sg_init_one(&sg, (uint8_t *)data, length); |
83 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | 83 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); |
84 | return; | 84 | return; |
85 | } | 85 | } |
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 410a0d13e35e..93f71fcfc04d 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c | |||
@@ -316,13 +316,13 @@ static int icside_dma_end(ide_drive_t *drive) | |||
316 | 316 | ||
317 | drive->waiting_for_dma = 0; | 317 | drive->waiting_for_dma = 0; |
318 | 318 | ||
319 | disable_dma(state->dev->dma); | 319 | disable_dma(ECARD_DEV(state->dev)->dma); |
320 | 320 | ||
321 | /* Teardown mappings after DMA has completed. */ | 321 | /* Teardown mappings after DMA has completed. */ |
322 | dma_unmap_sg(state->dev, hwif->sg_table, hwif->sg_nents, | 322 | dma_unmap_sg(state->dev, hwif->sg_table, hwif->sg_nents, |
323 | hwif->sg_dma_direction); | 323 | hwif->sg_dma_direction); |
324 | 324 | ||
325 | return get_dma_residue(state->dev->dma) != 0; | 325 | return get_dma_residue(ECARD_DEV(state->dev)->dma) != 0; |
326 | } | 326 | } |
327 | 327 | ||
328 | static void icside_dma_start(ide_drive_t *drive) | 328 | static void icside_dma_start(ide_drive_t *drive) |
@@ -331,8 +331,8 @@ static void icside_dma_start(ide_drive_t *drive) | |||
331 | struct icside_state *state = hwif->hwif_data; | 331 | struct icside_state *state = hwif->hwif_data; |
332 | 332 | ||
333 | /* We can not enable DMA on both channels simultaneously. */ | 333 | /* We can not enable DMA on both channels simultaneously. */ |
334 | BUG_ON(dma_channel_active(state->dev->dma)); | 334 | BUG_ON(dma_channel_active(ECARD_DEV(state->dev)->dma)); |
335 | enable_dma(state->dev->dma); | 335 | enable_dma(ECARD_DEV(state->dev)->dma); |
336 | } | 336 | } |
337 | 337 | ||
338 | static int icside_dma_setup(ide_drive_t *drive) | 338 | static int icside_dma_setup(ide_drive_t *drive) |
@@ -350,7 +350,7 @@ static int icside_dma_setup(ide_drive_t *drive) | |||
350 | /* | 350 | /* |
351 | * We can not enable DMA on both channels. | 351 | * We can not enable DMA on both channels. |
352 | */ | 352 | */ |
353 | BUG_ON(dma_channel_active(state->dev->dma)); | 353 | BUG_ON(dma_channel_active(ECARD_DEV(state->dev)->dma)); |
354 | 354 | ||
355 | icside_build_sglist(drive, rq); | 355 | icside_build_sglist(drive, rq); |
356 | 356 | ||
@@ -367,14 +367,14 @@ static int icside_dma_setup(ide_drive_t *drive) | |||
367 | /* | 367 | /* |
368 | * Select the correct timing for this drive. | 368 | * Select the correct timing for this drive. |
369 | */ | 369 | */ |
370 | set_dma_speed(state->dev->dma, drive->drive_data); | 370 | set_dma_speed(ECARD_DEV(state->dev)->dma, drive->drive_data); |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * Tell the DMA engine about the SG table and | 373 | * Tell the DMA engine about the SG table and |
374 | * data direction. | 374 | * data direction. |
375 | */ | 375 | */ |
376 | set_dma_sg(state->dev->dma, hwif->sg_table, hwif->sg_nents); | 376 | set_dma_sg(ECARD_DEV(state->dev)->dma, hwif->sg_table, hwif->sg_nents); |
377 | set_dma_mode(state->dev->dma, dma_mode); | 377 | set_dma_mode(ECARD_DEV(state->dev)->dma, dma_mode); |
378 | 378 | ||
379 | drive->waiting_for_dma = 1; | 379 | drive->waiting_for_dma = 1; |
380 | 380 | ||
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 95168833d069..dcda0f109df5 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -582,9 +582,12 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list); | |||
582 | /* | 582 | /* |
583 | * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. | 583 | * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. |
584 | * We list them here and depend on the device side cable detection for them. | 584 | * We list them here and depend on the device side cable detection for them. |
585 | * | ||
586 | * Some optical devices with the buggy firmwares have the same problem. | ||
585 | */ | 587 | */ |
586 | static const struct drive_list_entry ivb_list[] = { | 588 | static const struct drive_list_entry ivb_list[] = { |
587 | { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, | 589 | { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, |
590 | { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, | ||
588 | { NULL , NULL } | 591 | { NULL , NULL } |
589 | }; | 592 | }; |
590 | 593 | ||
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index 3ef4fc10fe2c..1cd4e9cb0521 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/cy82c693.c Version 0.41 Aug 27, 2007 | 2 | * linux/drivers/ide/pci/cy82c693.c Version 0.42 Oct 23, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer | 4 | * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer |
5 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator | 5 | * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator |
@@ -436,10 +436,10 @@ static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif) | |||
436 | hwif->ide_dma_on = &cy82c693_ide_dma_on; | 436 | hwif->ide_dma_on = &cy82c693_ide_dma_on; |
437 | } | 437 | } |
438 | 438 | ||
439 | static __devinitdata ide_hwif_t *primary; | ||
440 | |||
441 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) | 439 | static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) |
442 | { | 440 | { |
441 | static ide_hwif_t *primary; | ||
442 | |||
443 | if (PCI_FUNC(hwif->pci_dev->devfn) == 1) | 443 | if (PCI_FUNC(hwif->pci_dev->devfn) == 1) |
444 | primary = hwif; | 444 | primary = hwif; |
445 | else { | 445 | else { |
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c index f44d70852c3c..06885697ed7b 100644 --- a/drivers/ide/pci/generic.c +++ b/drivers/ide/pci/generic.c | |||
@@ -49,7 +49,7 @@ static int __init ide_generic_all_on(char *unused) | |||
49 | printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n"); | 49 | printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n"); |
50 | return 1; | 50 | return 1; |
51 | } | 51 | } |
52 | __setup("all-generic-ide", ide_generic_all_on); | 52 | const __setup("all-generic-ide", ide_generic_all_on); |
53 | #endif | 53 | #endif |
54 | module_param_named(all_generic_ide, ide_generic_all, bool, 0444); | 54 | module_param_named(all_generic_ide, ide_generic_all, bool, 0444); |
55 | MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); | 55 | MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 612b795241bf..5682895d36d9 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/ide/pci/hpt366.c Version 1.20 Oct 1, 2007 | 2 | * linux/drivers/ide/pci/hpt366.c Version 1.21 Oct 23, 2007 |
3 | * | 3 | * |
4 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> | 4 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> |
5 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | 5 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. |
@@ -433,7 +433,7 @@ static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = { | |||
433 | sixty_six_base_hpt37x | 433 | sixty_six_base_hpt37x |
434 | }; | 434 | }; |
435 | 435 | ||
436 | static struct hpt_info hpt36x __devinitdata = { | 436 | static const struct hpt_info hpt36x __devinitdata = { |
437 | .chip_name = "HPT36x", | 437 | .chip_name = "HPT36x", |
438 | .chip_type = HPT36x, | 438 | .chip_type = HPT36x, |
439 | .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, | 439 | .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, |
@@ -441,7 +441,7 @@ static struct hpt_info hpt36x __devinitdata = { | |||
441 | .settings = hpt36x_settings | 441 | .settings = hpt36x_settings |
442 | }; | 442 | }; |
443 | 443 | ||
444 | static struct hpt_info hpt370 __devinitdata = { | 444 | static const struct hpt_info hpt370 __devinitdata = { |
445 | .chip_name = "HPT370", | 445 | .chip_name = "HPT370", |
446 | .chip_type = HPT370, | 446 | .chip_type = HPT370, |
447 | .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, | 447 | .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, |
@@ -449,7 +449,7 @@ static struct hpt_info hpt370 __devinitdata = { | |||
449 | .settings = hpt37x_settings | 449 | .settings = hpt37x_settings |
450 | }; | 450 | }; |
451 | 451 | ||
452 | static struct hpt_info hpt370a __devinitdata = { | 452 | static const struct hpt_info hpt370a __devinitdata = { |
453 | .chip_name = "HPT370A", | 453 | .chip_name = "HPT370A", |
454 | .chip_type = HPT370A, | 454 | .chip_type = HPT370A, |
455 | .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, | 455 | .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, |
@@ -457,7 +457,7 @@ static struct hpt_info hpt370a __devinitdata = { | |||
457 | .settings = hpt37x_settings | 457 | .settings = hpt37x_settings |
458 | }; | 458 | }; |
459 | 459 | ||
460 | static struct hpt_info hpt374 __devinitdata = { | 460 | static const struct hpt_info hpt374 __devinitdata = { |
461 | .chip_name = "HPT374", | 461 | .chip_name = "HPT374", |
462 | .chip_type = HPT374, | 462 | .chip_type = HPT374, |
463 | .udma_mask = ATA_UDMA5, | 463 | .udma_mask = ATA_UDMA5, |
@@ -465,7 +465,7 @@ static struct hpt_info hpt374 __devinitdata = { | |||
465 | .settings = hpt37x_settings | 465 | .settings = hpt37x_settings |
466 | }; | 466 | }; |
467 | 467 | ||
468 | static struct hpt_info hpt372 __devinitdata = { | 468 | static const struct hpt_info hpt372 __devinitdata = { |
469 | .chip_name = "HPT372", | 469 | .chip_name = "HPT372", |
470 | .chip_type = HPT372, | 470 | .chip_type = HPT372, |
471 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 471 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -473,7 +473,7 @@ static struct hpt_info hpt372 __devinitdata = { | |||
473 | .settings = hpt37x_settings | 473 | .settings = hpt37x_settings |
474 | }; | 474 | }; |
475 | 475 | ||
476 | static struct hpt_info hpt372a __devinitdata = { | 476 | static const struct hpt_info hpt372a __devinitdata = { |
477 | .chip_name = "HPT372A", | 477 | .chip_name = "HPT372A", |
478 | .chip_type = HPT372A, | 478 | .chip_type = HPT372A, |
479 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 479 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -481,7 +481,7 @@ static struct hpt_info hpt372a __devinitdata = { | |||
481 | .settings = hpt37x_settings | 481 | .settings = hpt37x_settings |
482 | }; | 482 | }; |
483 | 483 | ||
484 | static struct hpt_info hpt302 __devinitdata = { | 484 | static const struct hpt_info hpt302 __devinitdata = { |
485 | .chip_name = "HPT302", | 485 | .chip_name = "HPT302", |
486 | .chip_type = HPT302, | 486 | .chip_type = HPT302, |
487 | .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 487 | .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -489,7 +489,7 @@ static struct hpt_info hpt302 __devinitdata = { | |||
489 | .settings = hpt37x_settings | 489 | .settings = hpt37x_settings |
490 | }; | 490 | }; |
491 | 491 | ||
492 | static struct hpt_info hpt371 __devinitdata = { | 492 | static const struct hpt_info hpt371 __devinitdata = { |
493 | .chip_name = "HPT371", | 493 | .chip_name = "HPT371", |
494 | .chip_type = HPT371, | 494 | .chip_type = HPT371, |
495 | .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 495 | .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -497,7 +497,7 @@ static struct hpt_info hpt371 __devinitdata = { | |||
497 | .settings = hpt37x_settings | 497 | .settings = hpt37x_settings |
498 | }; | 498 | }; |
499 | 499 | ||
500 | static struct hpt_info hpt372n __devinitdata = { | 500 | static const struct hpt_info hpt372n __devinitdata = { |
501 | .chip_name = "HPT372N", | 501 | .chip_name = "HPT372N", |
502 | .chip_type = HPT372N, | 502 | .chip_type = HPT372N, |
503 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 503 | .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -505,7 +505,7 @@ static struct hpt_info hpt372n __devinitdata = { | |||
505 | .settings = hpt37x_settings | 505 | .settings = hpt37x_settings |
506 | }; | 506 | }; |
507 | 507 | ||
508 | static struct hpt_info hpt302n __devinitdata = { | 508 | static const struct hpt_info hpt302n __devinitdata = { |
509 | .chip_name = "HPT302N", | 509 | .chip_name = "HPT302N", |
510 | .chip_type = HPT302N, | 510 | .chip_type = HPT302N, |
511 | .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 511 | .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -513,7 +513,7 @@ static struct hpt_info hpt302n __devinitdata = { | |||
513 | .settings = hpt37x_settings | 513 | .settings = hpt37x_settings |
514 | }; | 514 | }; |
515 | 515 | ||
516 | static struct hpt_info hpt371n __devinitdata = { | 516 | static const struct hpt_info hpt371n __devinitdata = { |
517 | .chip_name = "HPT371N", | 517 | .chip_name = "HPT371N", |
518 | .chip_type = HPT371N, | 518 | .chip_type = HPT371N, |
519 | .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, | 519 | .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, |
@@ -1508,7 +1508,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { | |||
1508 | */ | 1508 | */ |
1509 | static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) | 1509 | static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
1510 | { | 1510 | { |
1511 | struct hpt_info *info = NULL; | 1511 | const struct hpt_info *info = NULL; |
1512 | struct pci_dev *dev2 = NULL; | 1512 | struct pci_dev *dev2 = NULL; |
1513 | struct ide_port_info d; | 1513 | struct ide_port_info d; |
1514 | u8 idx = id->driver_data; | 1514 | u8 idx = id->driver_data; |
@@ -1522,7 +1522,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1522 | if (rev < 3) | 1522 | if (rev < 3) |
1523 | info = &hpt36x; | 1523 | info = &hpt36x; |
1524 | else { | 1524 | else { |
1525 | static struct hpt_info *hpt37x_info[] = | 1525 | static const struct hpt_info *hpt37x_info[] = |
1526 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; | 1526 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; |
1527 | 1527 | ||
1528 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; | 1528 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; |
@@ -1552,7 +1552,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1552 | d.name = info->chip_name; | 1552 | d.name = info->chip_name; |
1553 | d.udma_mask = info->udma_mask; | 1553 | d.udma_mask = info->udma_mask; |
1554 | 1554 | ||
1555 | pci_set_drvdata(dev, info); | 1555 | pci_set_drvdata(dev, (void *)info); |
1556 | 1556 | ||
1557 | if (info == &hpt36x || info == &hpt374) | 1557 | if (info == &hpt36x || info == &hpt374) |
1558 | dev2 = pci_get_slot(dev->bus, dev->devfn + 1); | 1558 | dev2 = pci_get_slot(dev->bus, dev->devfn + 1); |
@@ -1560,7 +1560,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1560 | if (dev2) { | 1560 | if (dev2) { |
1561 | int ret; | 1561 | int ret; |
1562 | 1562 | ||
1563 | pci_set_drvdata(dev2, info); | 1563 | pci_set_drvdata(dev2, (void *)info); |
1564 | 1564 | ||
1565 | if (info == &hpt374) | 1565 | if (info == &hpt374) |
1566 | hpt374_init(dev, dev2); | 1566 | hpt374_init(dev, dev2); |
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c index d2c8b5524f28..0a7b3202066d 100644 --- a/drivers/ide/pci/sc1200.c +++ b/drivers/ide/pci/sc1200.c | |||
@@ -324,17 +324,18 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state) | |||
324 | 324 | ||
325 | pci_disable_device(dev); | 325 | pci_disable_device(dev); |
326 | pci_set_power_state(dev, pci_choose_state(dev, state)); | 326 | pci_set_power_state(dev, pci_choose_state(dev, state)); |
327 | dev->current_state = state.event; | ||
328 | return 0; | 327 | return 0; |
329 | } | 328 | } |
330 | 329 | ||
331 | static int sc1200_resume (struct pci_dev *dev) | 330 | static int sc1200_resume (struct pci_dev *dev) |
332 | { | 331 | { |
333 | ide_hwif_t *hwif = NULL; | 332 | ide_hwif_t *hwif = NULL; |
333 | int i; | ||
334 | |||
335 | i = pci_enable_device(dev); | ||
336 | if (i) | ||
337 | return i; | ||
334 | 338 | ||
335 | pci_set_power_state(dev, PCI_D0); // bring chip back from sleep state | ||
336 | dev->current_state = PM_EVENT_ON; | ||
337 | pci_enable_device(dev); | ||
338 | // | 339 | // |
339 | // loop over all interfaces that are part of this pci device: | 340 | // loop over all interfaces that are part of this pci device: |
340 | // | 341 | // |
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c index 3051e312fdc8..f5f4983dfbf3 100644 --- a/drivers/ieee1394/dma.c +++ b/drivers/ieee1394/dma.c | |||
@@ -111,8 +111,8 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, | |||
111 | unsigned long va = | 111 | unsigned long va = |
112 | (unsigned long)dma->kvirt + (i << PAGE_SHIFT); | 112 | (unsigned long)dma->kvirt + (i << PAGE_SHIFT); |
113 | 113 | ||
114 | sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va)); | 114 | sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va), |
115 | dma->sglist[i].length = PAGE_SIZE; | 115 | PAGE_SIZE, 0); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* map sglist to the IOMMU */ | 118 | /* map sglist to the IOMMU */ |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 14159ff29408..4e3128ff73c1 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -171,9 +171,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
171 | if (vma_list && | 171 | if (vma_list && |
172 | !is_vm_hugetlb_page(vma_list[i + off])) | 172 | !is_vm_hugetlb_page(vma_list[i + off])) |
173 | umem->hugetlb = 0; | 173 | umem->hugetlb = 0; |
174 | sg_set_page(&chunk->page_list[i], page_list[i + off]); | 174 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); |
175 | chunk->page_list[i].offset = 0; | ||
176 | chunk->page_list[i].length = PAGE_SIZE; | ||
177 | } | 175 | } |
178 | 176 | ||
179 | chunk->nmap = ib_dma_map_sg(context->device, | 177 | chunk->nmap = ib_dma_map_sg(context->device, |
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 007b38157fc4..1f4d27d7c16d 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -113,9 +113,7 @@ static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_m | |||
113 | if (!page) | 113 | if (!page) |
114 | return -ENOMEM; | 114 | return -ENOMEM; |
115 | 115 | ||
116 | sg_set_page(mem, page); | 116 | sg_set_page(mem, page, PAGE_SIZE << order, 0); |
117 | mem->length = PAGE_SIZE << order; | ||
118 | mem->offset = 0; | ||
119 | return 0; | 117 | return 0; |
120 | } | 118 | } |
121 | 119 | ||
@@ -481,9 +479,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, | |||
481 | if (ret < 0) | 479 | if (ret < 0) |
482 | goto out; | 480 | goto out; |
483 | 481 | ||
484 | sg_set_page(&db_tab->page[i].mem, pages[0]); | 482 | sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, |
485 | db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; | 483 | uaddr & ~PAGE_MASK); |
486 | db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; | ||
487 | 484 | ||
488 | ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); | 485 | ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); |
489 | if (ret < 0) { | 486 | if (ret < 0) { |
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 476012b6dfac..48c1775ef5b3 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c | |||
@@ -1843,6 +1843,7 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb) | |||
1843 | int msglen; | 1843 | int msglen; |
1844 | u16 errcode; | 1844 | u16 errcode; |
1845 | u16 datahandle; | 1845 | u16 datahandle; |
1846 | u32 data; | ||
1846 | 1847 | ||
1847 | if (!card) { | 1848 | if (!card) { |
1848 | printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n", | 1849 | printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n", |
@@ -1860,9 +1861,26 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb) | |||
1860 | return 0; | 1861 | return 0; |
1861 | } | 1862 | } |
1862 | datahandle = nccip->datahandle; | 1863 | datahandle = nccip->datahandle; |
1864 | |||
1865 | /* | ||
1866 | * Here we copy pointer skb->data into the 32-bit 'Data' field. | ||
1867 | * The 'Data' field is not used in practice in linux kernel | ||
1868 | * (neither in 32 or 64 bit), but should have some value, | ||
1869 | * since a CAPI message trace will display it. | ||
1870 | * | ||
1871 | * The correct value in the 32 bit case is the address of the | ||
1872 | * data, in 64 bit it makes no sense, we use 0 there. | ||
1873 | */ | ||
1874 | |||
1875 | #ifdef CONFIG_64BIT | ||
1876 | data = 0; | ||
1877 | #else | ||
1878 | data = (unsigned long) skb->data; | ||
1879 | #endif | ||
1880 | |||
1863 | capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++, | 1881 | capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++, |
1864 | nccip->ncci, /* adr */ | 1882 | nccip->ncci, /* adr */ |
1865 | (u32) skb->data, /* Data */ | 1883 | data, /* Data */ |
1866 | skb->len, /* DataLength */ | 1884 | skb->len, /* DataLength */ |
1867 | datahandle, /* DataHandle */ | 1885 | datahandle, /* DataHandle */ |
1868 | 0 /* Flags */ | 1886 | 0 /* Flags */ |
@@ -2123,7 +2141,10 @@ static int capidrv_delcontr(u16 contr) | |||
2123 | printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr); | 2141 | printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr); |
2124 | return -1; | 2142 | return -1; |
2125 | } | 2143 | } |
2126 | #warning FIXME: maybe a race condition the card should be removed here from global list /kkeil | 2144 | |
2145 | /* FIXME: maybe a race condition the card should be removed | ||
2146 | * here from global list /kkeil | ||
2147 | */ | ||
2127 | spin_unlock_irqrestore(&global_lock, flags); | 2148 | spin_unlock_irqrestore(&global_lock, flags); |
2128 | 2149 | ||
2129 | del_timer(&card->listentimer); | 2150 | del_timer(&card->listentimer); |
diff --git a/drivers/isdn/sc/shmem.c b/drivers/isdn/sc/shmem.c index 034d41a61ae1..e0331e0094f1 100644 --- a/drivers/isdn/sc/shmem.c +++ b/drivers/isdn/sc/shmem.c | |||
@@ -28,15 +28,15 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n) | |||
28 | { | 28 | { |
29 | unsigned long flags; | 29 | unsigned long flags; |
30 | unsigned char ch; | 30 | unsigned char ch; |
31 | unsigned long dest_rem = ((unsigned long) dest) % 0x4000; | ||
31 | 32 | ||
32 | if(!IS_VALID_CARD(card)) { | 33 | if (!IS_VALID_CARD(card)) { |
33 | pr_debug("Invalid param: %d is not a valid card id\n", card); | 34 | pr_debug("Invalid param: %d is not a valid card id\n", card); |
34 | return; | 35 | return; |
35 | } | 36 | } |
36 | 37 | ||
37 | if(n > SRAM_PAGESIZE) { | 38 | if (n > SRAM_PAGESIZE) |
38 | return; | 39 | return; |
39 | } | ||
40 | 40 | ||
41 | /* | 41 | /* |
42 | * determine the page to load from the address | 42 | * determine the page to load from the address |
@@ -50,8 +50,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n) | |||
50 | 50 | ||
51 | outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, | 51 | outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, |
52 | sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); | 52 | sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); |
53 | memcpy_toio(sc_adapter[card]->rambase + | 53 | memcpy_toio(sc_adapter[card]->rambase + dest_rem, src, n); |
54 | ((unsigned long) dest % 0x4000), src, n); | ||
55 | spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); | 54 | spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); |
56 | pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, | 55 | pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, |
57 | ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80); | 56 | ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80); |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 35d19ae58de7..cb4c67025d52 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -128,9 +128,12 @@ static void unmap_switcher(void) | |||
128 | __free_pages(switcher_page[i], 0); | 128 | __free_pages(switcher_page[i], 0); |
129 | } | 129 | } |
130 | 130 | ||
131 | /*L:305 | 131 | /*H:032 |
132 | * Dealing With Guest Memory. | 132 | * Dealing With Guest Memory. |
133 | * | 133 | * |
134 | * Before we go too much further into the Host, we need to grok the routines | ||
135 | * we use to deal with Guest memory. | ||
136 | * | ||
134 | * When the Guest gives us (what it thinks is) a physical address, we can use | 137 | * When the Guest gives us (what it thinks is) a physical address, we can use |
135 | * the normal copy_from_user() & copy_to_user() on the corresponding place in | 138 | * the normal copy_from_user() & copy_to_user() on the corresponding place in |
136 | * the memory region allocated by the Launcher. | 139 | * the memory region allocated by the Launcher. |
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 9d5184c7c14a..b478affe8f91 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
@@ -90,6 +90,7 @@ static void do_hcall(struct lguest *lg, struct hcall_args *args) | |||
90 | lg->pending_notify = args->arg1; | 90 | lg->pending_notify = args->arg1; |
91 | break; | 91 | break; |
92 | default: | 92 | default: |
93 | /* It should be an architecture-specific hypercall. */ | ||
93 | if (lguest_arch_do_hcall(lg, args)) | 94 | if (lguest_arch_do_hcall(lg, args)) |
94 | kill_guest(lg, "Bad hypercall %li\n", args->arg0); | 95 | kill_guest(lg, "Bad hypercall %li\n", args->arg0); |
95 | } | 96 | } |
@@ -157,7 +158,6 @@ static void do_async_hcalls(struct lguest *lg) | |||
157 | * Guest makes a hypercall, we end up here to set things up: */ | 158 | * Guest makes a hypercall, we end up here to set things up: */ |
158 | static void initialize(struct lguest *lg) | 159 | static void initialize(struct lguest *lg) |
159 | { | 160 | { |
160 | |||
161 | /* You can't do anything until you're initialized. The Guest knows the | 161 | /* You can't do anything until you're initialized. The Guest knows the |
162 | * rules, so we're unforgiving here. */ | 162 | * rules, so we're unforgiving here. */ |
163 | if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) { | 163 | if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) { |
@@ -174,7 +174,8 @@ static void initialize(struct lguest *lg) | |||
174 | || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)) | 174 | || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)) |
175 | kill_guest(lg, "bad guest page %p", lg->lguest_data); | 175 | kill_guest(lg, "bad guest page %p", lg->lguest_data); |
176 | 176 | ||
177 | /* We write the current time into the Guest's data page once now. */ | 177 | /* We write the current time into the Guest's data page once so it can |
178 | * set its clock. */ | ||
178 | write_timestamp(lg); | 179 | write_timestamp(lg); |
179 | 180 | ||
180 | /* page_tables.c will also do some setup. */ | 181 | /* page_tables.c will also do some setup. */ |
@@ -182,8 +183,8 @@ static void initialize(struct lguest *lg) | |||
182 | 183 | ||
183 | /* This is the one case where the above accesses might have been the | 184 | /* This is the one case where the above accesses might have been the |
184 | * first write to a Guest page. This may have caused a copy-on-write | 185 | * first write to a Guest page. This may have caused a copy-on-write |
185 | * fault, but the Guest might be referring to the old (read-only) | 186 | * fault, but the old page might be (read-only) in the Guest |
186 | * page. */ | 187 | * pagetable. */ |
187 | guest_pagetable_clear_all(lg); | 188 | guest_pagetable_clear_all(lg); |
188 | } | 189 | } |
189 | 190 | ||
@@ -220,7 +221,7 @@ void do_hypercalls(struct lguest *lg) | |||
220 | * Normally it doesn't matter: the Guest will run again and | 221 | * Normally it doesn't matter: the Guest will run again and |
221 | * update the trap number before we come back here. | 222 | * update the trap number before we come back here. |
222 | * | 223 | * |
223 | * However, if we are signalled or the Guest sends DMA to the | 224 | * However, if we are signalled or the Guest sends I/O to the |
224 | * Launcher, the run_guest() loop will exit without running the | 225 | * Launcher, the run_guest() loop will exit without running the |
225 | * Guest. When it comes back it would try to re-run the | 226 | * Guest. When it comes back it would try to re-run the |
226 | * hypercall. */ | 227 | * hypercall. */ |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 82966982cb38..2b66f79c208b 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -92,8 +92,8 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) | |||
92 | 92 | ||
93 | /* Remember that we never let the Guest actually disable interrupts, so | 93 | /* Remember that we never let the Guest actually disable interrupts, so |
94 | * the "Interrupt Flag" bit is always set. We copy that bit from the | 94 | * the "Interrupt Flag" bit is always set. We copy that bit from the |
95 | * Guest's "irq_enabled" field into the eflags word: the Guest copies | 95 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest |
96 | * it back in "lguest_iret". */ | 96 | * copy it back in "lguest_iret". */ |
97 | eflags = lg->regs->eflags; | 97 | eflags = lg->regs->eflags; |
98 | if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0 | 98 | if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0 |
99 | && !(irq_enable & X86_EFLAGS_IF)) | 99 | && !(irq_enable & X86_EFLAGS_IF)) |
@@ -124,7 +124,7 @@ static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err) | |||
124 | kill_guest(lg, "Disabling interrupts"); | 124 | kill_guest(lg, "Disabling interrupts"); |
125 | } | 125 | } |
126 | 126 | ||
127 | /*H:200 | 127 | /*H:205 |
128 | * Virtual Interrupts. | 128 | * Virtual Interrupts. |
129 | * | 129 | * |
130 | * maybe_do_interrupt() gets called before every entry to the Guest, to see if | 130 | * maybe_do_interrupt() gets called before every entry to the Guest, to see if |
@@ -256,19 +256,21 @@ int deliver_trap(struct lguest *lg, unsigned int num) | |||
256 | * bogus one in): if we fail here, the Guest will be killed. */ | 256 | * bogus one in): if we fail here, the Guest will be killed. */ |
257 | if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b)) | 257 | if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b)) |
258 | return 0; | 258 | return 0; |
259 | set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, has_err(num)); | 259 | set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, |
260 | has_err(num)); | ||
260 | return 1; | 261 | return 1; |
261 | } | 262 | } |
262 | 263 | ||
263 | /*H:250 Here's the hard part: returning to the Host every time a trap happens | 264 | /*H:250 Here's the hard part: returning to the Host every time a trap happens |
264 | * and then calling deliver_trap() and re-entering the Guest is slow. | 265 | * and then calling deliver_trap() and re-entering the Guest is slow. |
265 | * Particularly because Guest userspace system calls are traps (trap 128). | 266 | * Particularly because Guest userspace system calls are traps (usually trap |
267 | * 128). | ||
266 | * | 268 | * |
267 | * So we'd like to set up the IDT to tell the CPU to deliver traps directly | 269 | * So we'd like to set up the IDT to tell the CPU to deliver traps directly |
268 | * into the Guest. This is possible, but the complexities cause the size of | 270 | * into the Guest. This is possible, but the complexities cause the size of |
269 | * this file to double! However, 150 lines of code is worth writing for taking | 271 | * this file to double! However, 150 lines of code is worth writing for taking |
270 | * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all | 272 | * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all |
271 | * the other hypervisors would tease it. | 273 | * the other hypervisors would beat it up at lunchtime. |
272 | * | 274 | * |
273 | * This routine indicates if a particular trap number could be delivered | 275 | * This routine indicates if a particular trap number could be delivered |
274 | * directly. */ | 276 | * directly. */ |
@@ -331,7 +333,7 @@ void pin_stack_pages(struct lguest *lg) | |||
331 | * change stacks on each context switch. */ | 333 | * change stacks on each context switch. */ |
332 | void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) | 334 | void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) |
333 | { | 335 | { |
334 | /* You are not allowd have a stack segment with privilege level 0: bad | 336 | /* You are not allowed have a stack segment with privilege level 0: bad |
335 | * Guest! */ | 337 | * Guest! */ |
336 | if ((seg & 0x3) != GUEST_PL) | 338 | if ((seg & 0x3) != GUEST_PL) |
337 | kill_guest(lg, "bad stack segment %i", seg); | 339 | kill_guest(lg, "bad stack segment %i", seg); |
@@ -350,7 +352,7 @@ void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages) | |||
350 | * part of the Host: page table handling. */ | 352 | * part of the Host: page table handling. */ |
351 | 353 | ||
352 | /*H:235 This is the routine which actually checks the Guest's IDT entry and | 354 | /*H:235 This is the routine which actually checks the Guest's IDT entry and |
353 | * transfers it into our entry in "struct lguest": */ | 355 | * transfers it into the entry in "struct lguest": */ |
354 | static void set_trap(struct lguest *lg, struct desc_struct *trap, | 356 | static void set_trap(struct lguest *lg, struct desc_struct *trap, |
355 | unsigned int num, u32 lo, u32 hi) | 357 | unsigned int num, u32 lo, u32 hi) |
356 | { | 358 | { |
@@ -456,6 +458,18 @@ void copy_traps(const struct lguest *lg, struct desc_struct *idt, | |||
456 | } | 458 | } |
457 | } | 459 | } |
458 | 460 | ||
461 | /*H:200 | ||
462 | * The Guest Clock. | ||
463 | * | ||
464 | * There are two sources of virtual interrupts. We saw one in lguest_user.c: | ||
465 | * the Launcher sending interrupts for virtual devices. The other is the Guest | ||
466 | * timer interrupt. | ||
467 | * | ||
468 | * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to | ||
469 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer | ||
470 | * infrastructure to set a callback at that time. | ||
471 | * | ||
472 | * 0 means "turn off the clock". */ | ||
459 | void guest_set_clockevent(struct lguest *lg, unsigned long delta) | 473 | void guest_set_clockevent(struct lguest *lg, unsigned long delta) |
460 | { | 474 | { |
461 | ktime_t expires; | 475 | ktime_t expires; |
@@ -466,20 +480,27 @@ void guest_set_clockevent(struct lguest *lg, unsigned long delta) | |||
466 | return; | 480 | return; |
467 | } | 481 | } |
468 | 482 | ||
483 | /* We use wallclock time here, so the Guest might not be running for | ||
484 | * all the time between now and the timer interrupt it asked for. This | ||
485 | * is almost always the right thing to do. */ | ||
469 | expires = ktime_add_ns(ktime_get_real(), delta); | 486 | expires = ktime_add_ns(ktime_get_real(), delta); |
470 | hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS); | 487 | hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS); |
471 | } | 488 | } |
472 | 489 | ||
490 | /* This is the function called when the Guest's timer expires. */ | ||
473 | static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) | 491 | static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) |
474 | { | 492 | { |
475 | struct lguest *lg = container_of(timer, struct lguest, hrt); | 493 | struct lguest *lg = container_of(timer, struct lguest, hrt); |
476 | 494 | ||
495 | /* Remember the first interrupt is the timer interrupt. */ | ||
477 | set_bit(0, lg->irqs_pending); | 496 | set_bit(0, lg->irqs_pending); |
497 | /* If the Guest is actually stopped, we need to wake it up. */ | ||
478 | if (lg->halted) | 498 | if (lg->halted) |
479 | wake_up_process(lg->tsk); | 499 | wake_up_process(lg->tsk); |
480 | return HRTIMER_NORESTART; | 500 | return HRTIMER_NORESTART; |
481 | } | 501 | } |
482 | 502 | ||
503 | /* This sets up the timer for this Guest. */ | ||
483 | void init_clockdev(struct lguest *lg) | 504 | void init_clockdev(struct lguest *lg) |
484 | { | 505 | { |
485 | hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 506 | hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index d9144beca82c..86924891b5eb 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -74,9 +74,6 @@ struct lguest | |||
74 | u32 pgdidx; | 74 | u32 pgdidx; |
75 | struct pgdir pgdirs[4]; | 75 | struct pgdir pgdirs[4]; |
76 | 76 | ||
77 | /* Cached wakeup: we hold a reference to this task. */ | ||
78 | struct task_struct *wake; | ||
79 | |||
80 | unsigned long noirq_start, noirq_end; | 77 | unsigned long noirq_start, noirq_end; |
81 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ | 78 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ |
82 | 79 | ||
@@ -103,7 +100,7 @@ int lguest_address_ok(const struct lguest *lg, | |||
103 | void __lgread(struct lguest *, void *, unsigned long, unsigned); | 100 | void __lgread(struct lguest *, void *, unsigned long, unsigned); |
104 | void __lgwrite(struct lguest *, unsigned long, const void *, unsigned); | 101 | void __lgwrite(struct lguest *, unsigned long, const void *, unsigned); |
105 | 102 | ||
106 | /*L:306 Using memory-copy operations like that is usually inconvient, so we | 103 | /*H:035 Using memory-copy operations like that is usually inconvient, so we |
107 | * have the following helper macros which read and write a specific type (often | 104 | * have the following helper macros which read and write a specific type (often |
108 | * an unsigned long). | 105 | * an unsigned long). |
109 | * | 106 | * |
@@ -191,7 +188,7 @@ void write_timestamp(struct lguest *lg); | |||
191 | * Let's step aside for the moment, to study one important routine that's used | 188 | * Let's step aside for the moment, to study one important routine that's used |
192 | * widely in the Host code. | 189 | * widely in the Host code. |
193 | * | 190 | * |
194 | * There are many cases where the Guest does something invalid, like pass crap | 191 | * There are many cases where the Guest can do something invalid, like pass crap |
195 | * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite | 192 | * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite |
196 | * acceptable to simply terminate the Guest and give the Launcher a nicely | 193 | * acceptable to simply terminate the Guest and give the Launcher a nicely |
197 | * formatted reason. It's also simpler for the Guest itself, which doesn't | 194 | * formatted reason. It's also simpler for the Guest itself, which doesn't |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 71c64837b437..8904f72f97c6 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -53,7 +53,8 @@ struct lguest_device { | |||
53 | * Device configurations | 53 | * Device configurations |
54 | * | 54 | * |
55 | * The configuration information for a device consists of a series of fields. | 55 | * The configuration information for a device consists of a series of fields. |
56 | * The device will look for these fields during setup. | 56 | * We don't really care what they are: the Launcher set them up, and the driver |
57 | * will look at them during setup. | ||
57 | * | 58 | * |
58 | * For us these fields come immediately after that device's descriptor in the | 59 | * For us these fields come immediately after that device's descriptor in the |
59 | * lguest_devices page. | 60 | * lguest_devices page. |
@@ -122,8 +123,8 @@ static void lg_set_status(struct virtio_device *vdev, u8 status) | |||
122 | * The other piece of infrastructure virtio needs is a "virtqueue": a way of | 123 | * The other piece of infrastructure virtio needs is a "virtqueue": a way of |
123 | * the Guest device registering buffers for the other side to read from or | 124 | * the Guest device registering buffers for the other side to read from or |
124 | * write into (ie. send and receive buffers). Each device can have multiple | 125 | * write into (ie. send and receive buffers). Each device can have multiple |
125 | * virtqueues: for example the console has one queue for sending and one for | 126 | * virtqueues: for example the console driver uses one queue for sending and |
126 | * receiving. | 127 | * another for receiving. |
127 | * | 128 | * |
128 | * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue | 129 | * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue |
129 | * already exists in virtio_ring.c. We just need to connect it up. | 130 | * already exists in virtio_ring.c. We just need to connect it up. |
@@ -158,7 +159,7 @@ static void lg_notify(struct virtqueue *vq) | |||
158 | * | 159 | * |
159 | * This is kind of an ugly duckling. It'd be nicer to have a standard | 160 | * This is kind of an ugly duckling. It'd be nicer to have a standard |
160 | * representation of a virtqueue in the configuration space, but it seems that | 161 | * representation of a virtqueue in the configuration space, but it seems that |
161 | * everyone wants to do it differently. The KVM guys want the Guest to | 162 | * everyone wants to do it differently. The KVM coders want the Guest to |
162 | * allocate its own pages and tell the Host where they are, but for lguest it's | 163 | * allocate its own pages and tell the Host where they are, but for lguest it's |
163 | * simpler for the Host to simply tell us where the pages are. | 164 | * simpler for the Host to simply tell us where the pages are. |
164 | * | 165 | * |
@@ -284,6 +285,8 @@ static void add_lguest_device(struct lguest_device_desc *d) | |||
284 | { | 285 | { |
285 | struct lguest_device *ldev; | 286 | struct lguest_device *ldev; |
286 | 287 | ||
288 | /* Start with zeroed memory; Linux's device layer seems to count on | ||
289 | * it. */ | ||
287 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); | 290 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); |
288 | if (!ldev) { | 291 | if (!ldev) { |
289 | printk(KERN_EMERG "Cannot allocate lguest dev %u\n", | 292 | printk(KERN_EMERG "Cannot allocate lguest dev %u\n", |
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index ee405b38383d..9d716fa42cad 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -8,20 +8,22 @@ | |||
8 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
9 | #include "lg.h" | 9 | #include "lg.h" |
10 | 10 | ||
11 | /*L:315 To force the Guest to stop running and return to the Launcher, the | 11 | /*L:055 When something happens, the Waker process needs a way to stop the |
12 | * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The | 12 | * kernel running the Guest and return to the Launcher. So the Waker writes |
13 | * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */ | 13 | * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher |
14 | * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release | ||
15 | * the Waker. */ | ||
14 | static int break_guest_out(struct lguest *lg, const unsigned long __user *input) | 16 | static int break_guest_out(struct lguest *lg, const unsigned long __user *input) |
15 | { | 17 | { |
16 | unsigned long on; | 18 | unsigned long on; |
17 | 19 | ||
18 | /* Fetch whether they're turning break on or off.. */ | 20 | /* Fetch whether they're turning break on or off. */ |
19 | if (get_user(on, input) != 0) | 21 | if (get_user(on, input) != 0) |
20 | return -EFAULT; | 22 | return -EFAULT; |
21 | 23 | ||
22 | if (on) { | 24 | if (on) { |
23 | lg->break_out = 1; | 25 | lg->break_out = 1; |
24 | /* Pop it out (may be running on different CPU) */ | 26 | /* Pop it out of the Guest (may be running on different CPU) */ |
25 | wake_up_process(lg->tsk); | 27 | wake_up_process(lg->tsk); |
26 | /* Wait for them to reset it */ | 28 | /* Wait for them to reset it */ |
27 | return wait_event_interruptible(lg->break_wq, !lg->break_out); | 29 | return wait_event_interruptible(lg->break_wq, !lg->break_out); |
@@ -58,7 +60,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
58 | if (!lg) | 60 | if (!lg) |
59 | return -EINVAL; | 61 | return -EINVAL; |
60 | 62 | ||
61 | /* If you're not the task which owns the guest, go away. */ | 63 | /* If you're not the task which owns the Guest, go away. */ |
62 | if (current != lg->tsk) | 64 | if (current != lg->tsk) |
63 | return -EPERM; | 65 | return -EPERM; |
64 | 66 | ||
@@ -92,8 +94,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
92 | * base: The start of the Guest-physical memory inside the Launcher memory. | 94 | * base: The start of the Guest-physical memory inside the Launcher memory. |
93 | * | 95 | * |
94 | * pfnlimit: The highest (Guest-physical) page number the Guest should be | 96 | * pfnlimit: The highest (Guest-physical) page number the Guest should be |
95 | * allowed to access. The Launcher has to live in Guest memory, so it sets | 97 | * allowed to access. The Guest memory lives inside the Launcher, so it sets |
96 | * this to ensure the Guest can't reach it. | 98 | * this to ensure the Guest can only reach its own memory. |
97 | * | 99 | * |
98 | * pgdir: The (Guest-physical) address of the top of the initial Guest | 100 | * pgdir: The (Guest-physical) address of the top of the initial Guest |
99 | * pagetables (which are set up by the Launcher). | 101 | * pagetables (which are set up by the Launcher). |
@@ -189,7 +191,7 @@ unlock: | |||
189 | } | 191 | } |
190 | 192 | ||
191 | /*L:010 The first operation the Launcher does must be a write. All writes | 193 | /*L:010 The first operation the Launcher does must be a write. All writes |
192 | * start with a 32 bit number: for the first write this must be | 194 | * start with an unsigned long number: for the first write this must be |
193 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use | 195 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use |
194 | * writes of other values to send interrupts. */ | 196 | * writes of other values to send interrupts. */ |
195 | static ssize_t write(struct file *file, const char __user *in, | 197 | static ssize_t write(struct file *file, const char __user *in, |
@@ -275,8 +277,7 @@ static int close(struct inode *inode, struct file *file) | |||
275 | * The Launcher is the Host userspace program which sets up, runs and services | 277 | * The Launcher is the Host userspace program which sets up, runs and services |
276 | * the Guest. In fact, many comments in the Drivers which refer to "the Host" | 278 | * the Guest. In fact, many comments in the Drivers which refer to "the Host" |
277 | * doing things are inaccurate: the Launcher does all the device handling for | 279 | * doing things are inaccurate: the Launcher does all the device handling for |
278 | * the Guest. The Guest can't tell what's done by the the Launcher and what by | 280 | * the Guest, but the Guest can't know that. |
279 | * the Host. | ||
280 | * | 281 | * |
281 | * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we | 282 | * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we |
282 | * shall see more of that later. | 283 | * shall see more of that later. |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 2a45f0691c9b..fffabb327157 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -26,7 +26,8 @@ | |||
26 | * | 26 | * |
27 | * We use two-level page tables for the Guest. If you're not entirely | 27 | * We use two-level page tables for the Guest. If you're not entirely |
28 | * comfortable with virtual addresses, physical addresses and page tables then | 28 | * comfortable with virtual addresses, physical addresses and page tables then |
29 | * I recommend you review lguest.c's "Page Table Handling" (with diagrams!). | 29 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with |
30 | * diagrams!). | ||
30 | * | 31 | * |
31 | * The Guest keeps page tables, but we maintain the actual ones here: these are | 32 | * The Guest keeps page tables, but we maintain the actual ones here: these are |
32 | * called "shadow" page tables. Which is a very Guest-centric name: these are | 33 | * called "shadow" page tables. Which is a very Guest-centric name: these are |
@@ -36,11 +37,11 @@ | |||
36 | * | 37 | * |
37 | * Anyway, this is the most complicated part of the Host code. There are seven | 38 | * Anyway, this is the most complicated part of the Host code. There are seven |
38 | * parts to this: | 39 | * parts to this: |
39 | * (i) Setting up a page table entry for the Guest when it faults, | 40 | * (i) Looking up a page table entry when the Guest faults, |
40 | * (ii) Setting up the page table entry for the Guest stack, | 41 | * (ii) Making sure the Guest stack is mapped, |
41 | * (iii) Setting up a page table entry when the Guest tells us it has changed, | 42 | * (iii) Setting up a page table entry when the Guest tells us one has changed, |
42 | * (iv) Switching page tables, | 43 | * (iv) Switching page tables, |
43 | * (v) Flushing (thowing away) page tables, | 44 | * (v) Flushing (throwing away) page tables, |
44 | * (vi) Mapping the Switcher when the Guest is about to run, | 45 | * (vi) Mapping the Switcher when the Guest is about to run, |
45 | * (vii) Setting up the page tables initially. | 46 | * (vii) Setting up the page tables initially. |
46 | :*/ | 47 | :*/ |
@@ -57,16 +58,15 @@ | |||
57 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | 58 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); |
58 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | 59 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
59 | 60 | ||
60 | /*H:320 With our shadow and Guest types established, we need to deal with | 61 | /*H:320 The page table code is curly enough to need helper functions to keep it |
61 | * them: the page table code is curly enough to need helper functions to keep | 62 | * clear and clean. |
62 | * it clear and clean. | ||
63 | * | 63 | * |
64 | * There are two functions which return pointers to the shadow (aka "real") | 64 | * There are two functions which return pointers to the shadow (aka "real") |
65 | * page tables. | 65 | * page tables. |
66 | * | 66 | * |
67 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | 67 | * spgd_addr() takes the virtual address and returns a pointer to the top-level |
68 | * page directory entry for that address. Since we keep track of several page | 68 | * page directory entry (PGD) for that address. Since we keep track of several |
69 | * tables, the "i" argument tells us which one we're interested in (it's | 69 | * page tables, the "i" argument tells us which one we're interested in (it's |
70 | * usually the current one). */ | 70 | * usually the current one). */ |
71 | static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) | 71 | static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) |
72 | { | 72 | { |
@@ -81,9 +81,9 @@ static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) | |||
81 | return &lg->pgdirs[i].pgdir[index]; | 81 | return &lg->pgdirs[i].pgdir[index]; |
82 | } | 82 | } |
83 | 83 | ||
84 | /* This routine then takes the PGD entry given above, which contains the | 84 | /* This routine then takes the page directory entry returned above, which |
85 | * address of the PTE page. It then returns a pointer to the PTE entry for the | 85 | * contains the address of the page table entry (PTE) page. It then returns a |
86 | * given address. */ | 86 | * pointer to the PTE entry for the given address. */ |
87 | static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr) | 87 | static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr) |
88 | { | 88 | { |
89 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); | 89 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); |
@@ -191,7 +191,7 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | /*H:330 | 193 | /*H:330 |
194 | * (i) Setting up a page table entry for the Guest when it faults | 194 | * (i) Looking up a page table entry when the Guest faults. |
195 | * | 195 | * |
196 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | 196 | * We saw this call in run_guest(): when we see a page fault in the Guest, we |
197 | * come here. That's because we only set up the shadow page tables lazily as | 197 | * come here. That's because we only set up the shadow page tables lazily as |
@@ -199,7 +199,7 @@ static void check_gpgd(struct lguest *lg, pgd_t gpgd) | |||
199 | * and return to the Guest without it knowing. | 199 | * and return to the Guest without it knowing. |
200 | * | 200 | * |
201 | * If we fixed up the fault (ie. we mapped the address), this routine returns | 201 | * If we fixed up the fault (ie. we mapped the address), this routine returns |
202 | * true. */ | 202 | * true. Otherwise, it was a real fault and we need to tell the Guest. */ |
203 | int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) | 203 | int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) |
204 | { | 204 | { |
205 | pgd_t gpgd; | 205 | pgd_t gpgd; |
@@ -246,16 +246,16 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) | |||
246 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) | 246 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
247 | return 0; | 247 | return 0; |
248 | 248 | ||
249 | /* User access to a kernel page? (bit 3 == user access) */ | 249 | /* User access to a kernel-only page? (bit 3 == user access) */ |
250 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) | 250 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
251 | return 0; | 251 | return 0; |
252 | 252 | ||
253 | /* Check that the Guest PTE flags are OK, and the page number is below | 253 | /* Check that the Guest PTE flags are OK, and the page number is below |
254 | * the pfn_limit (ie. not mapping the Launcher binary). */ | 254 | * the pfn_limit (ie. not mapping the Launcher binary). */ |
255 | check_gpte(lg, gpte); | 255 | check_gpte(lg, gpte); |
256 | |||
256 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ | 257 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
257 | gpte = pte_mkyoung(gpte); | 258 | gpte = pte_mkyoung(gpte); |
258 | |||
259 | if (errcode & 2) | 259 | if (errcode & 2) |
260 | gpte = pte_mkdirty(gpte); | 260 | gpte = pte_mkdirty(gpte); |
261 | 261 | ||
@@ -272,23 +272,28 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) | |||
272 | else | 272 | else |
273 | /* If this is a read, don't set the "writable" bit in the page | 273 | /* If this is a read, don't set the "writable" bit in the page |
274 | * table entry, even if the Guest says it's writable. That way | 274 | * table entry, even if the Guest says it's writable. That way |
275 | * we come back here when a write does actually ocur, so we can | 275 | * we will come back here when a write does actually occur, so |
276 | * update the Guest's _PAGE_DIRTY flag. */ | 276 | * we can update the Guest's _PAGE_DIRTY flag. */ |
277 | *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0); | 277 | *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0); |
278 | 278 | ||
279 | /* Finally, we write the Guest PTE entry back: we've set the | 279 | /* Finally, we write the Guest PTE entry back: we've set the |
280 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | 280 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ |
281 | lgwrite(lg, gpte_ptr, pte_t, gpte); | 281 | lgwrite(lg, gpte_ptr, pte_t, gpte); |
282 | 282 | ||
283 | /* We succeeded in mapping the page! */ | 283 | /* The fault is fixed, the page table is populated, the mapping |
284 | * manipulated, the result returned and the code complete. A small | ||
285 | * delay and a trace of alliteration are the only indications the Guest | ||
286 | * has that a page fault occurred at all. */ | ||
284 | return 1; | 287 | return 1; |
285 | } | 288 | } |
286 | 289 | ||
287 | /*H:360 (ii) Setting up the page table entry for the Guest stack. | 290 | /*H:360 |
291 | * (ii) Making sure the Guest stack is mapped. | ||
288 | * | 292 | * |
289 | * Remember pin_stack_pages() which makes sure the stack is mapped? It could | 293 | * Remember that direct traps into the Guest need a mapped Guest kernel stack. |
290 | * simply call demand_page(), but as we've seen that logic is quite long, and | 294 | * pin_stack_pages() calls us here: we could simply call demand_page(), but as |
291 | * usually the stack pages are already mapped anyway, so it's not required. | 295 | * we've seen that logic is quite long, and usually the stack pages are already |
296 | * mapped, so it's overkill. | ||
292 | * | 297 | * |
293 | * This is a quick version which answers the question: is this virtual address | 298 | * This is a quick version which answers the question: is this virtual address |
294 | * mapped by the shadow page tables, and is it writable? */ | 299 | * mapped by the shadow page tables, and is it writable? */ |
@@ -297,7 +302,7 @@ static int page_writable(struct lguest *lg, unsigned long vaddr) | |||
297 | pgd_t *spgd; | 302 | pgd_t *spgd; |
298 | unsigned long flags; | 303 | unsigned long flags; |
299 | 304 | ||
300 | /* Look at the top level entry: is it present? */ | 305 | /* Look at the current top level entry: is it present? */ |
301 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); | 306 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); |
302 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) | 307 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) |
303 | return 0; | 308 | return 0; |
@@ -333,15 +338,14 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd) | |||
333 | release_pte(ptepage[i]); | 338 | release_pte(ptepage[i]); |
334 | /* Now we can free the page of PTEs */ | 339 | /* Now we can free the page of PTEs */ |
335 | free_page((long)ptepage); | 340 | free_page((long)ptepage); |
336 | /* And zero out the PGD entry we we never release it twice. */ | 341 | /* And zero out the PGD entry so we never release it twice. */ |
337 | *spgd = __pgd(0); | 342 | *spgd = __pgd(0); |
338 | } | 343 | } |
339 | } | 344 | } |
340 | 345 | ||
341 | /*H:440 (v) Flushing (thowing away) page tables, | 346 | /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() |
342 | * | 347 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. |
343 | * We saw flush_user_mappings() called when we re-used a top-level pgdir page. | 348 | * It simply releases every PTE page from 0 up to the Guest's kernel address. */ |
344 | * It simply releases every PTE page from 0 up to the kernel address. */ | ||
345 | static void flush_user_mappings(struct lguest *lg, int idx) | 349 | static void flush_user_mappings(struct lguest *lg, int idx) |
346 | { | 350 | { |
347 | unsigned int i; | 351 | unsigned int i; |
@@ -350,8 +354,10 @@ static void flush_user_mappings(struct lguest *lg, int idx) | |||
350 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); | 354 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); |
351 | } | 355 | } |
352 | 356 | ||
353 | /* The Guest also has a hypercall to do this manually: it's used when a large | 357 | /*H:440 (v) Flushing (throwing away) page tables, |
354 | * number of mappings have been changed. */ | 358 | * |
359 | * The Guest has a hypercall to throw away the page tables: it's used when a | ||
360 | * large number of mappings have been changed. */ | ||
355 | void guest_pagetable_flush_user(struct lguest *lg) | 361 | void guest_pagetable_flush_user(struct lguest *lg) |
356 | { | 362 | { |
357 | /* Drop the userspace part of the current page table. */ | 363 | /* Drop the userspace part of the current page table. */ |
@@ -423,8 +429,9 @@ static unsigned int new_pgdir(struct lguest *lg, | |||
423 | 429 | ||
424 | /*H:430 (iv) Switching page tables | 430 | /*H:430 (iv) Switching page tables |
425 | * | 431 | * |
426 | * This is what happens when the Guest changes page tables (ie. changes the | 432 | * Now we've seen all the page table setting and manipulation, let's see what |
427 | * top-level pgdir). This happens on almost every context switch. */ | 433 | * what happens when the Guest changes page tables (ie. changes the top-level |
434 | * pgdir). This occurs on almost every context switch. */ | ||
428 | void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) | 435 | void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) |
429 | { | 436 | { |
430 | int newpgdir, repin = 0; | 437 | int newpgdir, repin = 0; |
@@ -443,7 +450,8 @@ void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) | |||
443 | } | 450 | } |
444 | 451 | ||
445 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all | 452 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all |
446 | * the shadow page tables. This is used when we destroy the Guest. */ | 453 | * the shadow page tables, including the Guest's kernel mappings. This is used |
454 | * when we destroy the Guest. */ | ||
447 | static void release_all_pagetables(struct lguest *lg) | 455 | static void release_all_pagetables(struct lguest *lg) |
448 | { | 456 | { |
449 | unsigned int i, j; | 457 | unsigned int i, j; |
@@ -458,13 +466,22 @@ static void release_all_pagetables(struct lguest *lg) | |||
458 | 466 | ||
459 | /* We also throw away everything when a Guest tells us it's changed a kernel | 467 | /* We also throw away everything when a Guest tells us it's changed a kernel |
460 | * mapping. Since kernel mappings are in every page table, it's easiest to | 468 | * mapping. Since kernel mappings are in every page table, it's easiest to |
461 | * throw them all away. This is amazingly slow, but thankfully rare. */ | 469 | * throw them all away. This traps the Guest in amber for a while as |
470 | * everything faults back in, but it's rare. */ | ||
462 | void guest_pagetable_clear_all(struct lguest *lg) | 471 | void guest_pagetable_clear_all(struct lguest *lg) |
463 | { | 472 | { |
464 | release_all_pagetables(lg); | 473 | release_all_pagetables(lg); |
465 | /* We need the Guest kernel stack mapped again. */ | 474 | /* We need the Guest kernel stack mapped again. */ |
466 | pin_stack_pages(lg); | 475 | pin_stack_pages(lg); |
467 | } | 476 | } |
477 | /*:*/ | ||
478 | /*M:009 Since we throw away all mappings when a kernel mapping changes, our | ||
479 | * performance sucks for guests using highmem. In fact, a guest with | ||
480 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | ||
481 | * usually slower than a Guest with less memory. | ||
482 | * | ||
483 | * This, of course, cannot be fixed. It would take some kind of... well, I | ||
484 | * don't know, but the term "puissant code-fu" comes to mind. :*/ | ||
468 | 485 | ||
469 | /*H:420 This is the routine which actually sets the page table entry for then | 486 | /*H:420 This is the routine which actually sets the page table entry for then |
470 | * "idx"'th shadow page table. | 487 | * "idx"'th shadow page table. |
@@ -483,7 +500,7 @@ void guest_pagetable_clear_all(struct lguest *lg) | |||
483 | static void do_set_pte(struct lguest *lg, int idx, | 500 | static void do_set_pte(struct lguest *lg, int idx, |
484 | unsigned long vaddr, pte_t gpte) | 501 | unsigned long vaddr, pte_t gpte) |
485 | { | 502 | { |
486 | /* Look up the matching shadow page directot entry. */ | 503 | /* Look up the matching shadow page directory entry. */ |
487 | pgd_t *spgd = spgd_addr(lg, idx, vaddr); | 504 | pgd_t *spgd = spgd_addr(lg, idx, vaddr); |
488 | 505 | ||
489 | /* If the top level isn't present, there's no entry to update. */ | 506 | /* If the top level isn't present, there's no entry to update. */ |
@@ -500,7 +517,8 @@ static void do_set_pte(struct lguest *lg, int idx, | |||
500 | *spte = gpte_to_spte(lg, gpte, | 517 | *spte = gpte_to_spte(lg, gpte, |
501 | pte_flags(gpte) & _PAGE_DIRTY); | 518 | pte_flags(gpte) & _PAGE_DIRTY); |
502 | } else | 519 | } else |
503 | /* Otherwise we can demand_page() it in later. */ | 520 | /* Otherwise kill it and we can demand_page() it in |
521 | * later. */ | ||
504 | *spte = __pte(0); | 522 | *spte = __pte(0); |
505 | } | 523 | } |
506 | } | 524 | } |
@@ -535,7 +553,7 @@ void guest_set_pte(struct lguest *lg, | |||
535 | } | 553 | } |
536 | 554 | ||
537 | /*H:400 | 555 | /*H:400 |
538 | * (iii) Setting up a page table entry when the Guest tells us it has changed. | 556 | * (iii) Setting up a page table entry when the Guest tells us one has changed. |
539 | * | 557 | * |
540 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | 558 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal |
541 | * with the other side of page tables while we're here: what happens when the | 559 | * with the other side of page tables while we're here: what happens when the |
@@ -612,9 +630,10 @@ void free_guest_pagetable(struct lguest *lg) | |||
612 | 630 | ||
613 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. | 631 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. |
614 | * | 632 | * |
615 | * The Switcher and the two pages for this CPU need to be available to the | 633 | * The Switcher and the two pages for this CPU need to be visible in the |
616 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages | 634 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
617 | * for each CPU already set up, we just need to hook them in. */ | 635 | * for each CPU already set up, we just need to hook them in now we know which |
636 | * Guest is about to run on this CPU. */ | ||
618 | void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) | 637 | void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) |
619 | { | 638 | { |
620 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | 639 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
@@ -677,6 +696,18 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
677 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | 696 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); |
678 | } | 697 | } |
679 | 698 | ||
699 | /* We've made it through the page table code. Perhaps our tired brains are | ||
700 | * still processing the details, or perhaps we're simply glad it's over. | ||
701 | * | ||
702 | * If nothing else, note that all this complexity in juggling shadow page | ||
703 | * tables in sync with the Guest's page tables is for one reason: for most | ||
704 | * Guests this page table dance determines how bad performance will be. This | ||
705 | * is why Xen uses exotic direct Guest pagetable manipulation, and why both | ||
706 | * Intel and AMD have implemented shadow page table support directly into | ||
707 | * hardware. | ||
708 | * | ||
709 | * There is just one file remaining in the Host. */ | ||
710 | |||
680 | /*H:510 At boot or module load time, init_pagetables() allocates and populates | 711 | /*H:510 At boot or module load time, init_pagetables() allocates and populates |
681 | * the Switcher PTE page for each CPU. */ | 712 | * the Switcher PTE page for each CPU. */ |
682 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) | 713 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index c2434ec99f7b..9e189cbec7dd 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
@@ -12,8 +12,6 @@ | |||
12 | #include "lg.h" | 12 | #include "lg.h" |
13 | 13 | ||
14 | /*H:600 | 14 | /*H:600 |
15 | * We've almost completed the Host; there's just one file to go! | ||
16 | * | ||
17 | * Segments & The Global Descriptor Table | 15 | * Segments & The Global Descriptor Table |
18 | * | 16 | * |
19 | * (That title sounds like a bad Nerdcore group. Not to suggest that there are | 17 | * (That title sounds like a bad Nerdcore group. Not to suggest that there are |
@@ -55,7 +53,7 @@ static int ignored_gdt(unsigned int num) | |||
55 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); | 53 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); |
56 | } | 54 | } |
57 | 55 | ||
58 | /*H:610 Once the GDT has been changed, we fix the new entries up a little. We | 56 | /*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We |
59 | * don't care if they're invalid: the worst that can happen is a General | 57 | * don't care if they're invalid: the worst that can happen is a General |
60 | * Protection Fault in the Switcher when it restores a Guest segment register | 58 | * Protection Fault in the Switcher when it restores a Guest segment register |
61 | * which tries to use that entry. Then we kill the Guest for causing such a | 59 | * which tries to use that entry. Then we kill the Guest for causing such a |
@@ -84,25 +82,33 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) | |||
84 | } | 82 | } |
85 | } | 83 | } |
86 | 84 | ||
87 | /* This routine is called at boot or modprobe time for each CPU to set up the | 85 | /*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep |
88 | * "constant" GDT entries for Guests running on that CPU. */ | 86 | * a GDT for each CPU, and copy across the Guest's entries each time we want to |
87 | * run the Guest on that CPU. | ||
88 | * | ||
89 | * This routine is called at boot or modprobe time for each CPU to set up the | ||
90 | * constant GDT entries: the ones which are the same no matter what Guest we're | ||
91 | * running. */ | ||
89 | void setup_default_gdt_entries(struct lguest_ro_state *state) | 92 | void setup_default_gdt_entries(struct lguest_ro_state *state) |
90 | { | 93 | { |
91 | struct desc_struct *gdt = state->guest_gdt; | 94 | struct desc_struct *gdt = state->guest_gdt; |
92 | unsigned long tss = (unsigned long)&state->guest_tss; | 95 | unsigned long tss = (unsigned long)&state->guest_tss; |
93 | 96 | ||
94 | /* The hypervisor segments are full 0-4G segments, privilege level 0 */ | 97 | /* The Switcher segments are full 0-4G segments, privilege level 0 */ |
95 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | 98 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
96 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | 99 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; |
97 | 100 | ||
98 | /* The TSS segment refers to the TSS entry for this CPU, so we cannot | 101 | /* The TSS segment refers to the TSS entry for this particular CPU. |
99 | * copy it from the Guest. Forgive the magic flags */ | 102 | * Forgive the magic flags: the 0x8900 means the entry is Present, it's |
103 | * privilege level 0 Available 386 TSS system segment, and the 0x67 | ||
104 | * means Saturn is eclipsed by Mercury in the twelfth house. */ | ||
100 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); | 105 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); |
101 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | 106 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) |
102 | | ((tss >> 16) & 0x000000FF); | 107 | | ((tss >> 16) & 0x000000FF); |
103 | } | 108 | } |
104 | 109 | ||
105 | /* This routine is called before the Guest is run for the first time. */ | 110 | /* This routine sets up the initial Guest GDT for booting. All entries start |
111 | * as 0 (unusable). */ | ||
106 | void setup_guest_gdt(struct lguest *lg) | 112 | void setup_guest_gdt(struct lguest *lg) |
107 | { | 113 | { |
108 | /* Start with full 0-4G segments... */ | 114 | /* Start with full 0-4G segments... */ |
@@ -114,13 +120,8 @@ void setup_guest_gdt(struct lguest *lg) | |||
114 | lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); | 120 | lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); |
115 | } | 121 | } |
116 | 122 | ||
117 | /* Like the IDT, we never simply use the GDT the Guest gives us. We set up the | 123 | /*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" |
118 | * GDTs for each CPU, then we copy across the entries each time we want to run | 124 | * entries. */ |
119 | * a different Guest on that CPU. */ | ||
120 | |||
121 | /* A partial GDT load, for the three "thead-local storage" entries. Otherwise | ||
122 | * it's just like load_guest_gdt(). So much, in fact, it would probably be | ||
123 | * neater to have a single hypercall to cover both. */ | ||
124 | void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) | 125 | void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) |
125 | { | 126 | { |
126 | unsigned int i; | 127 | unsigned int i; |
@@ -129,7 +130,9 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) | |||
129 | gdt[i] = lg->arch.gdt[i]; | 130 | gdt[i] = lg->arch.gdt[i]; |
130 | } | 131 | } |
131 | 132 | ||
132 | /* This is the full version */ | 133 | /*H:640 When the Guest is run on a different CPU, or the GDT entries have |
134 | * changed, copy_gdt() is called to copy the Guest's GDT entries across to this | ||
135 | * CPU's GDT. */ | ||
133 | void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) | 136 | void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) |
134 | { | 137 | { |
135 | unsigned int i; | 138 | unsigned int i; |
@@ -141,7 +144,8 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) | |||
141 | gdt[i] = lg->arch.gdt[i]; | 144 | gdt[i] = lg->arch.gdt[i]; |
142 | } | 145 | } |
143 | 146 | ||
144 | /* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ | 147 | /*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). |
148 | * We copy it from the Guest and tweak the entries. */ | ||
145 | void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) | 149 | void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) |
146 | { | 150 | { |
147 | /* We assume the Guest has the same number of GDT entries as the | 151 | /* We assume the Guest has the same number of GDT entries as the |
@@ -157,16 +161,22 @@ void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) | |||
157 | lg->changed |= CHANGED_GDT; | 161 | lg->changed |= CHANGED_GDT; |
158 | } | 162 | } |
159 | 163 | ||
164 | /* This is the fast-track version for just changing the three TLS entries. | ||
165 | * Remember that this happens on every context switch, so it's worth | ||
166 | * optimizing. But wouldn't it be neater to have a single hypercall to cover | ||
167 | * both cases? */ | ||
160 | void guest_load_tls(struct lguest *lg, unsigned long gtls) | 168 | void guest_load_tls(struct lguest *lg, unsigned long gtls) |
161 | { | 169 | { |
162 | struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN]; | 170 | struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN]; |
163 | 171 | ||
164 | __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); | 172 | __lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); |
165 | fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); | 173 | fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); |
174 | /* Note that just the TLS entries have changed. */ | ||
166 | lg->changed |= CHANGED_GDT_TLS; | 175 | lg->changed |= CHANGED_GDT_TLS; |
167 | } | 176 | } |
177 | /*:*/ | ||
168 | 178 | ||
169 | /* | 179 | /*H:660 |
170 | * With this, we have finished the Host. | 180 | * With this, we have finished the Host. |
171 | * | 181 | * |
172 | * Five of the seven parts of our task are complete. You have made it through | 182 | * Five of the seven parts of our task are complete. You have made it through |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 9eed12d5a395..482aec2a9631 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -63,7 +63,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu) | |||
63 | static DEFINE_PER_CPU(struct lguest *, last_guest); | 63 | static DEFINE_PER_CPU(struct lguest *, last_guest); |
64 | 64 | ||
65 | /*S:010 | 65 | /*S:010 |
66 | * We are getting close to the Switcher. | 66 | * We approach the Switcher. |
67 | * | 67 | * |
68 | * Remember that each CPU has two pages which are visible to the Guest when it | 68 | * Remember that each CPU has two pages which are visible to the Guest when it |
69 | * runs on that CPU. This has to contain the state for that Guest: we copy the | 69 | * runs on that CPU. This has to contain the state for that Guest: we copy the |
@@ -134,7 +134,7 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) | |||
134 | * | 134 | * |
135 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | 135 | * The lcall also pushes the old code segment (KERNEL_CS) onto the |
136 | * stack, then the address of this call. This stack layout happens to | 136 | * stack, then the address of this call. This stack layout happens to |
137 | * exactly match the stack of an interrupt... */ | 137 | * exactly match the stack layout created by an interrupt... */ |
138 | asm volatile("pushf; lcall *lguest_entry" | 138 | asm volatile("pushf; lcall *lguest_entry" |
139 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") | 139 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") |
140 | * are changed by this routine. The "=" means output. */ | 140 | * are changed by this routine. The "=" means output. */ |
@@ -151,40 +151,46 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages) | |||
151 | } | 151 | } |
152 | /*:*/ | 152 | /*:*/ |
153 | 153 | ||
154 | /*M:002 There are hooks in the scheduler which we can register to tell when we | ||
155 | * get kicked off the CPU (preempt_notifier_register()). This would allow us | ||
156 | * to lazily disable SYSENTER which would regain some performance, and should | ||
157 | * also simplify copy_in_guest_info(). Note that we'd still need to restore | ||
158 | * things when we exit to Launcher userspace, but that's fairly easy. | ||
159 | * | ||
160 | * The hooks were designed for KVM, but we can also put them to good use. :*/ | ||
161 | |||
154 | /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts | 162 | /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts |
155 | * are disabled: we own the CPU. */ | 163 | * are disabled: we own the CPU. */ |
156 | void lguest_arch_run_guest(struct lguest *lg) | 164 | void lguest_arch_run_guest(struct lguest *lg) |
157 | { | 165 | { |
158 | /* Remember the awfully-named TS bit? If the Guest has asked | 166 | /* Remember the awfully-named TS bit? If the Guest has asked to set it |
159 | * to set it we set it now, so we can trap and pass that trap | 167 | * we set it now, so we can trap and pass that trap to the Guest if it |
160 | * to the Guest if it uses the FPU. */ | 168 | * uses the FPU. */ |
161 | if (lg->ts) | 169 | if (lg->ts) |
162 | lguest_set_ts(); | 170 | lguest_set_ts(); |
163 | 171 | ||
164 | /* SYSENTER is an optimized way of doing system calls. We | 172 | /* SYSENTER is an optimized way of doing system calls. We can't allow |
165 | * can't allow it because it always jumps to privilege level 0. | 173 | * it because it always jumps to privilege level 0. A normal Guest |
166 | * A normal Guest won't try it because we don't advertise it in | 174 | * won't try it because we don't advertise it in CPUID, but a malicious |
167 | * CPUID, but a malicious Guest (or malicious Guest userspace | 175 | * Guest (or malicious Guest userspace program) could, so we tell the |
168 | * program) could, so we tell the CPU to disable it before | 176 | * CPU to disable it before running the Guest. */ |
169 | * running the Guest. */ | ||
170 | if (boot_cpu_has(X86_FEATURE_SEP)) | 177 | if (boot_cpu_has(X86_FEATURE_SEP)) |
171 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | 178 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); |
172 | 179 | ||
173 | /* Now we actually run the Guest. It will pop back out when | 180 | /* Now we actually run the Guest. It will return when something |
174 | * something interesting happens, and we can examine its | 181 | * interesting happens, and we can examine its registers to see what it |
175 | * registers to see what it was doing. */ | 182 | * was doing. */ |
176 | run_guest_once(lg, lguest_pages(raw_smp_processor_id())); | 183 | run_guest_once(lg, lguest_pages(raw_smp_processor_id())); |
177 | 184 | ||
178 | /* The "regs" pointer contains two extra entries which are not | 185 | /* Note that the "regs" pointer contains two extra entries which are |
179 | * really registers: a trap number which says what interrupt or | 186 | * not really registers: a trap number which says what interrupt or |
180 | * trap made the switcher code come back, and an error code | 187 | * trap made the switcher code come back, and an error code which some |
181 | * which some traps set. */ | 188 | * traps set. */ |
182 | 189 | ||
183 | /* If the Guest page faulted, then the cr2 register will tell | 190 | /* If the Guest page faulted, then the cr2 register will tell us the |
184 | * us the bad virtual address. We have to grab this now, | 191 | * bad virtual address. We have to grab this now, because once we |
185 | * because once we re-enable interrupts an interrupt could | 192 | * re-enable interrupts an interrupt could fault and thus overwrite |
186 | * fault and thus overwrite cr2, or we could even move off to a | 193 | * cr2, or we could even move off to a different CPU. */ |
187 | * different CPU. */ | ||
188 | if (lg->regs->trapnum == 14) | 194 | if (lg->regs->trapnum == 14) |
189 | lg->arch.last_pagefault = read_cr2(); | 195 | lg->arch.last_pagefault = read_cr2(); |
190 | /* Similarly, if we took a trap because the Guest used the FPU, | 196 | /* Similarly, if we took a trap because the Guest used the FPU, |
@@ -197,14 +203,15 @@ void lguest_arch_run_guest(struct lguest *lg) | |||
197 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | 203 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); |
198 | } | 204 | } |
199 | 205 | ||
200 | /*H:130 Our Guest is usually so well behaved; it never tries to do things it | 206 | /*H:130 Now we've examined the hypercall code; our Guest can make requests. |
201 | * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't | 207 | * Our Guest is usually so well behaved; it never tries to do things it isn't |
202 | * quite complete, because it doesn't contain replacements for the Intel I/O | 208 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual |
203 | * instructions. As a result, the Guest sometimes fumbles across one during | 209 | * infrastructure isn't quite complete, because it doesn't contain replacements |
204 | * the boot process as it probes for various things which are usually attached | 210 | * for the Intel I/O instructions. As a result, the Guest sometimes fumbles |
205 | * to a PC. | 211 | * across one during the boot process as it probes for various things which are |
212 | * usually attached to a PC. | ||
206 | * | 213 | * |
207 | * When the Guest uses one of these instructions, we get trap #13 (General | 214 | * When the Guest uses one of these instructions, we get a trap (General |
208 | * Protection Fault) and come here. We see if it's one of those troublesome | 215 | * Protection Fault) and come here. We see if it's one of those troublesome |
209 | * instructions and skip over it. We return true if we did. */ | 216 | * instructions and skip over it. We return true if we did. */ |
210 | static int emulate_insn(struct lguest *lg) | 217 | static int emulate_insn(struct lguest *lg) |
@@ -275,43 +282,43 @@ static int emulate_insn(struct lguest *lg) | |||
275 | void lguest_arch_handle_trap(struct lguest *lg) | 282 | void lguest_arch_handle_trap(struct lguest *lg) |
276 | { | 283 | { |
277 | switch (lg->regs->trapnum) { | 284 | switch (lg->regs->trapnum) { |
278 | case 13: /* We've intercepted a GPF. */ | 285 | case 13: /* We've intercepted a General Protection Fault. */ |
279 | /* Check if this was one of those annoying IN or OUT | 286 | /* Check if this was one of those annoying IN or OUT |
280 | * instructions which we need to emulate. If so, we | 287 | * instructions which we need to emulate. If so, we just go |
281 | * just go back into the Guest after we've done it. */ | 288 | * back into the Guest after we've done it. */ |
282 | if (lg->regs->errcode == 0) { | 289 | if (lg->regs->errcode == 0) { |
283 | if (emulate_insn(lg)) | 290 | if (emulate_insn(lg)) |
284 | return; | 291 | return; |
285 | } | 292 | } |
286 | break; | 293 | break; |
287 | case 14: /* We've intercepted a page fault. */ | 294 | case 14: /* We've intercepted a Page Fault. */ |
288 | /* The Guest accessed a virtual address that wasn't | 295 | /* The Guest accessed a virtual address that wasn't mapped. |
289 | * mapped. This happens a lot: we don't actually set | 296 | * This happens a lot: we don't actually set up most of the |
290 | * up most of the page tables for the Guest at all when | 297 | * page tables for the Guest at all when we start: as it runs |
291 | * we start: as it runs it asks for more and more, and | 298 | * it asks for more and more, and we set them up as |
292 | * we set them up as required. In this case, we don't | 299 | * required. In this case, we don't even tell the Guest that |
293 | * even tell the Guest that the fault happened. | 300 | * the fault happened. |
294 | * | 301 | * |
295 | * The errcode tells whether this was a read or a | 302 | * The errcode tells whether this was a read or a write, and |
296 | * write, and whether kernel or userspace code. */ | 303 | * whether kernel or userspace code. */ |
297 | if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode)) | 304 | if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode)) |
298 | return; | 305 | return; |
299 | 306 | ||
300 | /* OK, it's really not there (or not OK): the Guest | 307 | /* OK, it's really not there (or not OK): the Guest needs to |
301 | * needs to know. We write out the cr2 value so it | 308 | * know. We write out the cr2 value so it knows where the |
302 | * knows where the fault occurred. | 309 | * fault occurred. |
303 | * | 310 | * |
304 | * Note that if the Guest were really messed up, this | 311 | * Note that if the Guest were really messed up, this could |
305 | * could happen before it's done the INITIALIZE | 312 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so |
306 | * hypercall, so lg->lguest_data will be NULL */ | 313 | * lg->lguest_data could be NULL */ |
307 | if (lg->lguest_data && | 314 | if (lg->lguest_data && |
308 | put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2)) | 315 | put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2)) |
309 | kill_guest(lg, "Writing cr2"); | 316 | kill_guest(lg, "Writing cr2"); |
310 | break; | 317 | break; |
311 | case 7: /* We've intercepted a Device Not Available fault. */ | 318 | case 7: /* We've intercepted a Device Not Available fault. */ |
312 | /* If the Guest doesn't want to know, we already | 319 | /* If the Guest doesn't want to know, we already restored the |
313 | * restored the Floating Point Unit, so we just | 320 | * Floating Point Unit, so we just continue without telling |
314 | * continue without telling it. */ | 321 | * it. */ |
315 | if (!lg->ts) | 322 | if (!lg->ts) |
316 | return; | 323 | return; |
317 | break; | 324 | break; |
@@ -536,9 +543,6 @@ int lguest_arch_init_hypercalls(struct lguest *lg) | |||
536 | 543 | ||
537 | return 0; | 544 | return 0; |
538 | } | 545 | } |
539 | /* Now we've examined the hypercall code; our Guest can make requests. There | ||
540 | * is one other way we can do things for the Guest, as we see in | ||
541 | * emulate_insn(). :*/ | ||
542 | 546 | ||
543 | /*L:030 lguest_arch_setup_regs() | 547 | /*L:030 lguest_arch_setup_regs() |
544 | * | 548 | * |
@@ -562,7 +566,7 @@ void lguest_arch_setup_regs(struct lguest *lg, unsigned long start) | |||
562 | * is supposed to always be "1". Bit 9 (0x200) controls whether | 566 | * is supposed to always be "1". Bit 9 (0x200) controls whether |
563 | * interrupts are enabled. We always leave interrupts enabled while | 567 | * interrupts are enabled. We always leave interrupts enabled while |
564 | * running the Guest. */ | 568 | * running the Guest. */ |
565 | regs->eflags = 0x202; | 569 | regs->eflags = X86_EFLAGS_IF | 0x2; |
566 | 570 | ||
567 | /* The "Extended Instruction Pointer" register says where the Guest is | 571 | /* The "Extended Instruction Pointer" register says where the Guest is |
568 | * running. */ | 572 | * running. */ |
@@ -570,8 +574,8 @@ void lguest_arch_setup_regs(struct lguest *lg, unsigned long start) | |||
570 | 574 | ||
571 | /* %esi points to our boot information, at physical address 0, so don't | 575 | /* %esi points to our boot information, at physical address 0, so don't |
572 | * touch it. */ | 576 | * touch it. */ |
577 | |||
573 | /* There are a couple of GDT entries the Guest expects when first | 578 | /* There are a couple of GDT entries the Guest expects when first |
574 | * booting. */ | 579 | * booting. */ |
575 | |||
576 | setup_guest_gdt(lg); | 580 | setup_guest_gdt(lg); |
577 | } | 581 | } |
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 1010b90b11fc..0af8baaa0d4a 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S | |||
@@ -6,6 +6,37 @@ | |||
6 | * are feeling invigorated and refreshed then the next, more challenging stage | 6 | * are feeling invigorated and refreshed then the next, more challenging stage |
7 | * can be found in "make Guest". :*/ | 7 | * can be found in "make Guest". :*/ |
8 | 8 | ||
9 | /*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must | ||
10 | * gain at least 1% more performance. Since neither LOC nor performance can be | ||
11 | * measured beforehand, it generally means implementing a feature then deciding | ||
12 | * if it's worth it. And once it's implemented, who can say no? | ||
13 | * | ||
14 | * This is why I haven't implemented this idea myself. I want to, but I | ||
15 | * haven't. You could, though. | ||
16 | * | ||
17 | * The main place where lguest performance sucks is Guest page faulting. When | ||
18 | * a Guest userspace process hits an unmapped page we switch back to the Host, | ||
19 | * walk the page tables, find it's not mapped, switch back to the Guest page | ||
20 | * fault handler, which calls a hypercall to set the page table entry, then | ||
21 | * finally returns to userspace. That's two round-trips. | ||
22 | * | ||
23 | * If we had a small walker in the Switcher, we could quickly check the Guest | ||
24 | * page table and if the page isn't mapped, immediately reflect the fault back | ||
25 | * into the Guest. This means the Switcher would have to know the top of the | ||
26 | * Guest page table and the page fault handler address. | ||
27 | * | ||
28 | * For simplicity, the Guest should only handle the case where the privilege | ||
29 | * level of the fault is 3 and probably only not present or write faults. It | ||
30 | * should also detect recursive faults, and hand the original fault to the | ||
31 | * Host (which is actually really easy). | ||
32 | * | ||
33 | * Two questions remain. Would the performance gain outweigh the complexity? | ||
34 | * And who would write the verse documenting it? :*/ | ||
35 | |||
36 | /*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their | ||
37 | * code). It's worth doing though, since it would let us use oprofile in the | ||
38 | * Host when a Guest is running. :*/ | ||
39 | |||
9 | /*S:100 | 40 | /*S:100 |
10 | * Welcome to the Switcher itself! | 41 | * Welcome to the Switcher itself! |
11 | * | 42 | * |
@@ -88,7 +119,7 @@ ENTRY(switch_to_guest) | |||
88 | 119 | ||
89 | // All saved and there's now five steps before us: | 120 | // All saved and there's now five steps before us: |
90 | // Stack, GDT, IDT, TSS | 121 | // Stack, GDT, IDT, TSS |
91 | // And last of all the page tables are flipped. | 122 | // Then last of all the page tables are flipped. |
92 | 123 | ||
93 | // Yet beware that our stack pointer must be | 124 | // Yet beware that our stack pointer must be |
94 | // Always valid lest an NMI hits | 125 | // Always valid lest an NMI hits |
@@ -103,25 +134,25 @@ ENTRY(switch_to_guest) | |||
103 | lgdt LGUEST_PAGES_guest_gdt_desc(%eax) | 134 | lgdt LGUEST_PAGES_guest_gdt_desc(%eax) |
104 | 135 | ||
105 | // The Guest's IDT we did partially | 136 | // The Guest's IDT we did partially |
106 | // Move to the "struct lguest_pages" as well. | 137 | // Copy to "struct lguest_pages" as well. |
107 | lidt LGUEST_PAGES_guest_idt_desc(%eax) | 138 | lidt LGUEST_PAGES_guest_idt_desc(%eax) |
108 | 139 | ||
109 | // The TSS entry which controls traps | 140 | // The TSS entry which controls traps |
110 | // Must be loaded up with "ltr" now: | 141 | // Must be loaded up with "ltr" now: |
142 | // The GDT entry that TSS uses | ||
143 | // Changes type when we load it: damn Intel! | ||
111 | // For after we switch over our page tables | 144 | // For after we switch over our page tables |
112 | // It (as the rest) will be writable no more. | 145 | // That entry will be read-only: we'd crash. |
113 | // (The GDT entry TSS needs | ||
114 | // Changes type when we load it: damn Intel!) | ||
115 | movl $(GDT_ENTRY_TSS*8), %edx | 146 | movl $(GDT_ENTRY_TSS*8), %edx |
116 | ltr %dx | 147 | ltr %dx |
117 | 148 | ||
118 | // Look back now, before we take this last step! | 149 | // Look back now, before we take this last step! |
119 | // The Host's TSS entry was also marked used; | 150 | // The Host's TSS entry was also marked used; |
120 | // Let's clear it again, ere we return. | 151 | // Let's clear it again for our return. |
121 | // The GDT descriptor of the Host | 152 | // The GDT descriptor of the Host |
122 | // Points to the table after two "size" bytes | 153 | // Points to the table after two "size" bytes |
123 | movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx | 154 | movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx |
124 | // Clear the type field of "used" (byte 5, bit 2) | 155 | // Clear "used" from type field (byte 5, bit 2) |
125 | andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) | 156 | andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) |
126 | 157 | ||
127 | // Once our page table's switched, the Guest is live! | 158 | // Once our page table's switched, the Guest is live! |
@@ -131,7 +162,7 @@ ENTRY(switch_to_guest) | |||
131 | 162 | ||
132 | // The page table change did one tricky thing: | 163 | // The page table change did one tricky thing: |
133 | // The Guest's register page has been mapped | 164 | // The Guest's register page has been mapped |
134 | // Writable onto our %esp (stack) -- | 165 | // Writable under our %esp (stack) -- |
135 | // We can simply pop off all Guest regs. | 166 | // We can simply pop off all Guest regs. |
136 | popl %eax | 167 | popl %eax |
137 | popl %ebx | 168 | popl %ebx |
@@ -152,16 +183,15 @@ ENTRY(switch_to_guest) | |||
152 | addl $8, %esp | 183 | addl $8, %esp |
153 | 184 | ||
154 | // The last five stack slots hold return address | 185 | // The last five stack slots hold return address |
155 | // And everything needed to change privilege | 186 | // And everything needed to switch privilege |
156 | // Into the Guest privilege level of 1, | 187 | // From Switcher's level 0 to Guest's 1, |
157 | // And the stack where the Guest had last left it. | 188 | // And the stack where the Guest had last left it. |
158 | // Interrupts are turned back on: we are Guest. | 189 | // Interrupts are turned back on: we are Guest. |
159 | iret | 190 | iret |
160 | 191 | ||
161 | // There are two paths where we switch to the Host | 192 | // We treat two paths to switch back to the Host |
193 | // Yet both must save Guest state and restore Host | ||
162 | // So we put the routine in a macro. | 194 | // So we put the routine in a macro. |
163 | // We are on our way home, back to the Host | ||
164 | // Interrupted out of the Guest, we come here. | ||
165 | #define SWITCH_TO_HOST \ | 195 | #define SWITCH_TO_HOST \ |
166 | /* We save the Guest state: all registers first \ | 196 | /* We save the Guest state: all registers first \ |
167 | * Laid out just as "struct lguest_regs" defines */ \ | 197 | * Laid out just as "struct lguest_regs" defines */ \ |
@@ -194,7 +224,7 @@ ENTRY(switch_to_guest) | |||
194 | movl %esp, %eax; \ | 224 | movl %esp, %eax; \ |
195 | andl $(~(1 << PAGE_SHIFT - 1)), %eax; \ | 225 | andl $(~(1 << PAGE_SHIFT - 1)), %eax; \ |
196 | /* Save our trap number: the switch will obscure it \ | 226 | /* Save our trap number: the switch will obscure it \ |
197 | * (The Guest regs are not mapped here in the Host) \ | 227 | * (In the Host the Guest regs are not mapped here) \ |
198 | * %ebx holds it safe for deliver_to_host */ \ | 228 | * %ebx holds it safe for deliver_to_host */ \ |
199 | movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \ | 229 | movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \ |
200 | /* The Host GDT, IDT and stack! \ | 230 | /* The Host GDT, IDT and stack! \ |
@@ -210,9 +240,9 @@ ENTRY(switch_to_guest) | |||
210 | /* Switch to Host's GDT, IDT. */ \ | 240 | /* Switch to Host's GDT, IDT. */ \ |
211 | lgdt LGUEST_PAGES_host_gdt_desc(%eax); \ | 241 | lgdt LGUEST_PAGES_host_gdt_desc(%eax); \ |
212 | lidt LGUEST_PAGES_host_idt_desc(%eax); \ | 242 | lidt LGUEST_PAGES_host_idt_desc(%eax); \ |
213 | /* Restore the Host's stack where it's saved regs lie */ \ | 243 | /* Restore the Host's stack where its saved regs lie */ \ |
214 | movl LGUEST_PAGES_host_sp(%eax), %esp; \ | 244 | movl LGUEST_PAGES_host_sp(%eax), %esp; \ |
215 | /* Last the TSS: our Host is complete */ \ | 245 | /* Last the TSS: our Host is returned */ \ |
216 | movl $(GDT_ENTRY_TSS*8), %edx; \ | 246 | movl $(GDT_ENTRY_TSS*8), %edx; \ |
217 | ltr %dx; \ | 247 | ltr %dx; \ |
218 | /* Restore now the regs saved right at the first. */ \ | 248 | /* Restore now the regs saved right at the first. */ \ |
@@ -222,14 +252,15 @@ ENTRY(switch_to_guest) | |||
222 | popl %ds; \ | 252 | popl %ds; \ |
223 | popl %es | 253 | popl %es |
224 | 254 | ||
225 | // Here's where we come when the Guest has just trapped: | 255 | // The first path is trod when the Guest has trapped: |
226 | // (Which trap we'll see has been pushed on the stack). | 256 | // (Which trap it was has been pushed on the stack). |
227 | // We need only switch back, and the Host will decode | 257 | // We need only switch back, and the Host will decode |
228 | // Why we came home, and what needs to be done. | 258 | // Why we came home, and what needs to be done. |
229 | return_to_host: | 259 | return_to_host: |
230 | SWITCH_TO_HOST | 260 | SWITCH_TO_HOST |
231 | iret | 261 | iret |
232 | 262 | ||
263 | // We are lead to the second path like so: | ||
233 | // An interrupt, with some cause external | 264 | // An interrupt, with some cause external |
234 | // Has ajerked us rudely from the Guest's code | 265 | // Has ajerked us rudely from the Guest's code |
235 | // Again we must return home to the Host | 266 | // Again we must return home to the Host |
@@ -238,7 +269,7 @@ deliver_to_host: | |||
238 | // But now we must go home via that place | 269 | // But now we must go home via that place |
239 | // Where that interrupt was supposed to go | 270 | // Where that interrupt was supposed to go |
240 | // Had we not been ensconced, running the Guest. | 271 | // Had we not been ensconced, running the Guest. |
241 | // Here we see the cleverness of our stack: | 272 | // Here we see the trickness of run_guest_once(): |
242 | // The Host stack is formed like an interrupt | 273 | // The Host stack is formed like an interrupt |
243 | // With EIP, CS and EFLAGS layered. | 274 | // With EIP, CS and EFLAGS layered. |
244 | // Interrupt handlers end with "iret" | 275 | // Interrupt handlers end with "iret" |
@@ -263,7 +294,7 @@ deliver_to_host: | |||
263 | xorw %ax, %ax | 294 | xorw %ax, %ax |
264 | orl %eax, %edx | 295 | orl %eax, %edx |
265 | // Now the address of the handler's in %edx | 296 | // Now the address of the handler's in %edx |
266 | // We call it now: its "iret" takes us home. | 297 | // We call it now: its "iret" drops us home. |
267 | jmp *%edx | 298 | jmp *%edx |
268 | 299 | ||
269 | // Every interrupt can come to us here | 300 | // Every interrupt can come to us here |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ac54f697c508..28c6ae095c56 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -168,7 +168,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
168 | return -ENOMEM; | 168 | return -ENOMEM; |
169 | } | 169 | } |
170 | 170 | ||
171 | sg_set_buf(&sg, cc->key, cc->key_size); | 171 | sg_init_one(&sg, cc->key, cc->key_size); |
172 | desc.tfm = hash_tfm; | 172 | desc.tfm = hash_tfm; |
173 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 173 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
174 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | 174 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); |
@@ -351,14 +351,10 @@ static int crypt_convert(struct crypt_config *cc, | |||
351 | struct scatterlist sg_in, sg_out; | 351 | struct scatterlist sg_in, sg_out; |
352 | 352 | ||
353 | sg_init_table(&sg_in, 1); | 353 | sg_init_table(&sg_in, 1); |
354 | sg_set_page(&sg_in, bv_in->bv_page); | 354 | sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); |
355 | sg_in.offset = bv_in->bv_offset + ctx->offset_in; | ||
356 | sg_in.length = 1 << SECTOR_SHIFT; | ||
357 | 355 | ||
358 | sg_init_table(&sg_out, 1); | 356 | sg_init_table(&sg_out, 1); |
359 | sg_set_page(&sg_out, bv_out->bv_page); | 357 | sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); |
360 | sg_out.offset = bv_out->bv_offset + ctx->offset_out; | ||
361 | sg_out.length = 1 << SECTOR_SHIFT; | ||
362 | 358 | ||
363 | ctx->offset_in += sg_in.length; | 359 | ctx->offset_in += sg_in.length; |
364 | if (ctx->offset_in >= bv_in->bv_len) { | 360 | if (ctx->offset_in >= bv_in->bv_len) { |
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c index 926576156578..77a6e4bf503d 100644 --- a/drivers/md/raid6algos.c +++ b/drivers/md/raid6algos.c | |||
@@ -52,7 +52,7 @@ const struct raid6_calls * const raid6_algos[] = { | |||
52 | &raid6_intx16, | 52 | &raid6_intx16, |
53 | &raid6_intx32, | 53 | &raid6_intx32, |
54 | #endif | 54 | #endif |
55 | #if defined(__i386__) | 55 | #if defined(__i386__) && !defined(__arch_um__) |
56 | &raid6_mmxx1, | 56 | &raid6_mmxx1, |
57 | &raid6_mmxx2, | 57 | &raid6_mmxx2, |
58 | &raid6_sse1x1, | 58 | &raid6_sse1x1, |
@@ -60,7 +60,7 @@ const struct raid6_calls * const raid6_algos[] = { | |||
60 | &raid6_sse2x1, | 60 | &raid6_sse2x1, |
61 | &raid6_sse2x2, | 61 | &raid6_sse2x2, |
62 | #endif | 62 | #endif |
63 | #if defined(__x86_64__) | 63 | #if defined(__x86_64__) && !defined(__arch_um__) |
64 | &raid6_sse2x1, | 64 | &raid6_sse2x1, |
65 | &raid6_sse2x2, | 65 | &raid6_sse2x2, |
66 | &raid6_sse2x4, | 66 | &raid6_sse2x4, |
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c index 6181a5a3365a..d4e4a1bd70ad 100644 --- a/drivers/md/raid6mmx.c +++ b/drivers/md/raid6mmx.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * MMX implementation of RAID-6 syndrome functions | 16 | * MMX implementation of RAID-6 syndrome functions |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #if defined(__i386__) | 19 | #if defined(__i386__) && !defined(__arch_um__) |
20 | 20 | ||
21 | #include "raid6.h" | 21 | #include "raid6.h" |
22 | #include "raid6x86.h" | 22 | #include "raid6x86.h" |
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c index f0a1ba8f40ba..0666237276ff 100644 --- a/drivers/md/raid6sse1.c +++ b/drivers/md/raid6sse1.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * worthwhile as a separate implementation. | 21 | * worthwhile as a separate implementation. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #if defined(__i386__) | 24 | #if defined(__i386__) && !defined(__arch_um__) |
25 | 25 | ||
26 | #include "raid6.h" | 26 | #include "raid6.h" |
27 | #include "raid6x86.h" | 27 | #include "raid6x86.h" |
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c index 0f019762a7c3..b034ad868039 100644 --- a/drivers/md/raid6sse2.c +++ b/drivers/md/raid6sse2.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #if defined(__i386__) || defined(__x86_64__) | 20 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) |
21 | 21 | ||
22 | #include "raid6.h" | 22 | #include "raid6.h" |
23 | #include "raid6x86.h" | 23 | #include "raid6x86.h" |
@@ -161,7 +161,7 @@ const struct raid6_calls raid6_sse2x2 = { | |||
161 | 161 | ||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | #ifdef __x86_64__ | 164 | #if defined(__x86_64__) && !defined(__arch_um__) |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * Unrolled-by-4 SSE2 implementation | 167 | * Unrolled-by-4 SSE2 implementation |
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h index 9111950414ff..99fea7a70ca7 100644 --- a/drivers/md/raid6x86.h +++ b/drivers/md/raid6x86.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifndef LINUX_RAID_RAID6X86_H | 19 | #ifndef LINUX_RAID_RAID6X86_H |
20 | #define LINUX_RAID_RAID6X86_H | 20 | #define LINUX_RAID_RAID6X86_H |
21 | 21 | ||
22 | #if defined(__i386__) || defined(__x86_64__) | 22 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) |
23 | 23 | ||
24 | #ifdef __KERNEL__ /* Real code */ | 24 | #ifdef __KERNEL__ /* Real code */ |
25 | 25 | ||
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c index 2b1f8b4be00a..cb034ead95ab 100644 --- a/drivers/media/common/saa7146_core.c +++ b/drivers/media/common/saa7146_core.c | |||
@@ -118,8 +118,7 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) | |||
118 | if (NULL == pg) | 118 | if (NULL == pg) |
119 | goto err; | 119 | goto err; |
120 | BUG_ON(PageHighMem(pg)); | 120 | BUG_ON(PageHighMem(pg)); |
121 | sg_set_page(&sglist[i], pg); | 121 | sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); |
122 | sglist[i].length = PAGE_SIZE; | ||
123 | } | 122 | } |
124 | return sglist; | 123 | return sglist; |
125 | 124 | ||
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c index 912b424e5204..460db03b0ba0 100644 --- a/drivers/media/video/ivtv/ivtv-udma.c +++ b/drivers/media/video/ivtv/ivtv-udma.c | |||
@@ -49,8 +49,6 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info | |||
49 | unsigned int len = (i == dma_page->page_count - 1) ? | 49 | unsigned int len = (i == dma_page->page_count - 1) ? |
50 | dma_page->tail : PAGE_SIZE - offset; | 50 | dma_page->tail : PAGE_SIZE - offset; |
51 | 51 | ||
52 | dma->SGlist[map_offset].length = len; | ||
53 | dma->SGlist[map_offset].offset = offset; | ||
54 | if (PageHighMem(dma->map[map_offset])) { | 52 | if (PageHighMem(dma->map[map_offset])) { |
55 | void *src; | 53 | void *src; |
56 | 54 | ||
@@ -63,10 +61,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info | |||
63 | memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); | 61 | memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); |
64 | kunmap_atomic(src, KM_BOUNCE_READ); | 62 | kunmap_atomic(src, KM_BOUNCE_READ); |
65 | local_irq_restore(flags); | 63 | local_irq_restore(flags); |
66 | sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]); | 64 | sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset); |
67 | } | 65 | } |
68 | else { | 66 | else { |
69 | sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]); | 67 | sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset); |
70 | } | 68 | } |
71 | offset = 0; | 69 | offset = 0; |
72 | map_offset++; | 70 | map_offset++; |
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 9ab94a749d81..44ee408e145f 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c | |||
@@ -67,8 +67,7 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) | |||
67 | if (NULL == pg) | 67 | if (NULL == pg) |
68 | goto err; | 68 | goto err; |
69 | BUG_ON(PageHighMem(pg)); | 69 | BUG_ON(PageHighMem(pg)); |
70 | sg_set_page(&sglist[i], pg); | 70 | sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); |
71 | sglist[i].length = PAGE_SIZE; | ||
72 | } | 71 | } |
73 | return sglist; | 72 | return sglist; |
74 | 73 | ||
@@ -95,16 +94,13 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset) | |||
95 | if (PageHighMem(pages[0])) | 94 | if (PageHighMem(pages[0])) |
96 | /* DMA to highmem pages might not work */ | 95 | /* DMA to highmem pages might not work */ |
97 | goto highmem; | 96 | goto highmem; |
98 | sg_set_page(&sglist[0], pages[0]); | 97 | sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); |
99 | sglist[0].offset = offset; | ||
100 | sglist[0].length = PAGE_SIZE - offset; | ||
101 | for (i = 1; i < nr_pages; i++) { | 98 | for (i = 1; i < nr_pages; i++) { |
102 | if (NULL == pages[i]) | 99 | if (NULL == pages[i]) |
103 | goto nopage; | 100 | goto nopage; |
104 | if (PageHighMem(pages[i])) | 101 | if (PageHighMem(pages[i])) |
105 | goto highmem; | 102 | goto highmem; |
106 | sg_set_page(&sglist[i], pages[i]); | 103 | sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); |
107 | sglist[i].length = PAGE_SIZE; | ||
108 | } | 104 | } |
109 | return sglist; | 105 | return sglist; |
110 | 106 | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index d602ba6d5417..682406168de9 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -284,6 +284,7 @@ static inline struct i2o_block_request *i2o_block_request_alloc(void) | |||
284 | return ERR_PTR(-ENOMEM); | 284 | return ERR_PTR(-ENOMEM); |
285 | 285 | ||
286 | INIT_LIST_HEAD(&ireq->queue); | 286 | INIT_LIST_HEAD(&ireq->queue); |
287 | sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); | ||
287 | 288 | ||
288 | return ireq; | 289 | return ireq; |
289 | }; | 290 | }; |
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index bf4bc6adcfef..7471d49909b2 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -267,15 +267,26 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, | |||
267 | 267 | ||
268 | int mmc_send_csd(struct mmc_card *card, u32 *csd) | 268 | int mmc_send_csd(struct mmc_card *card, u32 *csd) |
269 | { | 269 | { |
270 | int ret, i; | ||
271 | |||
270 | if (!mmc_host_is_spi(card->host)) | 272 | if (!mmc_host_is_spi(card->host)) |
271 | return mmc_send_cxd_native(card->host, card->rca << 16, | 273 | return mmc_send_cxd_native(card->host, card->rca << 16, |
272 | csd, MMC_SEND_CSD); | 274 | csd, MMC_SEND_CSD); |
273 | 275 | ||
274 | return mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); | 276 | ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); |
277 | if (ret) | ||
278 | return ret; | ||
279 | |||
280 | for (i = 0;i < 4;i++) | ||
281 | csd[i] = be32_to_cpu(csd[i]); | ||
282 | |||
283 | return 0; | ||
275 | } | 284 | } |
276 | 285 | ||
277 | int mmc_send_cid(struct mmc_host *host, u32 *cid) | 286 | int mmc_send_cid(struct mmc_host *host, u32 *cid) |
278 | { | 287 | { |
288 | int ret, i; | ||
289 | |||
279 | if (!mmc_host_is_spi(host)) { | 290 | if (!mmc_host_is_spi(host)) { |
280 | if (!host->card) | 291 | if (!host->card) |
281 | return -EINVAL; | 292 | return -EINVAL; |
@@ -283,7 +294,14 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid) | |||
283 | cid, MMC_SEND_CID); | 294 | cid, MMC_SEND_CID); |
284 | } | 295 | } |
285 | 296 | ||
286 | return mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); | 297 | ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); |
298 | if (ret) | ||
299 | return ret; | ||
300 | |||
301 | for (i = 0;i < 4;i++) | ||
302 | cid[i] = be32_to_cpu(cid[i]); | ||
303 | |||
304 | return 0; | ||
287 | } | 305 | } |
288 | 306 | ||
289 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) | 307 | int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) |
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index ee4029a24efd..a6dafe62b992 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c | |||
@@ -294,8 +294,8 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) | |||
294 | if (data.error) | 294 | if (data.error) |
295 | return data.error; | 295 | return data.error; |
296 | 296 | ||
297 | scr[0] = ntohl(scr[0]); | 297 | scr[0] = be32_to_cpu(scr[0]); |
298 | scr[1] = ntohl(scr[1]); | 298 | scr[1] = be32_to_cpu(scr[1]); |
299 | 299 | ||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bcbb6d247bf7..c3926eb3bf43 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c | |||
@@ -40,13 +40,13 @@ | |||
40 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
42 | #include <linux/dma-mapping.h> | 42 | #include <linux/dma-mapping.h> |
43 | #include <linux/scatterlist.h> | ||
43 | 44 | ||
44 | #include <linux/mmc/host.h> | 45 | #include <linux/mmc/host.h> |
45 | #include <asm/io.h> | 46 | #include <asm/io.h> |
46 | #include <asm/mach-au1x00/au1000.h> | 47 | #include <asm/mach-au1x00/au1000.h> |
47 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 48 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
48 | #include <asm/mach-au1x00/au1100_mmc.h> | 49 | #include <asm/mach-au1x00/au1100_mmc.h> |
49 | #include <asm/scatterlist.h> | ||
50 | 50 | ||
51 | #include <au1xxx.h> | 51 | #include <au1xxx.h> |
52 | #include "au1xmmc.h" | 52 | #include "au1xmmc.h" |
@@ -212,12 +212,12 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, | |||
212 | } | 212 | } |
213 | 213 | ||
214 | if (data) { | 214 | if (data) { |
215 | if (flags & MMC_DATA_READ) { | 215 | if (data->flags & MMC_DATA_READ) { |
216 | if (data->blocks > 1) | 216 | if (data->blocks > 1) |
217 | mmccmd |= SD_CMD_CT_4; | 217 | mmccmd |= SD_CMD_CT_4; |
218 | else | 218 | else |
219 | mmccmd |= SD_CMD_CT_2; | 219 | mmccmd |= SD_CMD_CT_2; |
220 | } else if (flags & MMC_DATA_WRITE) { | 220 | } else if (data->flags & MMC_DATA_WRITE) { |
221 | if (data->blocks > 1) | 221 | if (data->blocks > 1) |
222 | mmccmd |= SD_CMD_CT_3; | 222 | mmccmd |= SD_CMD_CT_3; |
223 | else | 223 | else |
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index fc72e1fadb6a..f2070a19cfa7 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c | |||
@@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | /* Convert back to virtual address */ | 264 | /* Convert back to virtual address */ |
265 | host->data_ptr = (u16*)sg_virt(sg); | 265 | host->data_ptr = (u16*)sg_virt(data->sg); |
266 | host->data_cnt = 0; | 266 | host->data_cnt = 0; |
267 | 267 | ||
268 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | 268 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 12c2d807c145..a6469218f194 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -1165,6 +1165,23 @@ mmc_spi_detect_irq(int irq, void *mmc) | |||
1165 | return IRQ_HANDLED; | 1165 | return IRQ_HANDLED; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | struct count_children { | ||
1169 | unsigned n; | ||
1170 | struct bus_type *bus; | ||
1171 | }; | ||
1172 | |||
1173 | static int maybe_count_child(struct device *dev, void *c) | ||
1174 | { | ||
1175 | struct count_children *ccp = c; | ||
1176 | |||
1177 | if (dev->bus == ccp->bus) { | ||
1178 | if (ccp->n) | ||
1179 | return -EBUSY; | ||
1180 | ccp->n++; | ||
1181 | } | ||
1182 | return 0; | ||
1183 | } | ||
1184 | |||
1168 | static int mmc_spi_probe(struct spi_device *spi) | 1185 | static int mmc_spi_probe(struct spi_device *spi) |
1169 | { | 1186 | { |
1170 | void *ones; | 1187 | void *ones; |
@@ -1188,33 +1205,30 @@ static int mmc_spi_probe(struct spi_device *spi) | |||
1188 | return status; | 1205 | return status; |
1189 | } | 1206 | } |
1190 | 1207 | ||
1191 | /* We can use the bus safely iff nobody else will interfere with | 1208 | /* We can use the bus safely iff nobody else will interfere with us. |
1192 | * us. That is, either we have the experimental exclusive access | 1209 | * Most commands consist of one SPI message to issue a command, then |
1193 | * primitives ... or else there's nobody to share it with. | 1210 | * several more to collect its response, then possibly more for data |
1211 | * transfer. Clocking access to other devices during that period will | ||
1212 | * corrupt the command execution. | ||
1213 | * | ||
1214 | * Until we have software primitives which guarantee non-interference, | ||
1215 | * we'll aim for a hardware-level guarantee. | ||
1216 | * | ||
1217 | * REVISIT we can't guarantee another device won't be added later... | ||
1194 | */ | 1218 | */ |
1195 | if (spi->master->num_chipselect > 1) { | 1219 | if (spi->master->num_chipselect > 1) { |
1196 | struct device *parent = spi->dev.parent; | 1220 | struct count_children cc; |
1197 | 1221 | ||
1198 | /* If there are multiple devices on this bus, we | 1222 | cc.n = 0; |
1199 | * can't proceed. | 1223 | cc.bus = spi->dev.bus; |
1200 | */ | 1224 | status = device_for_each_child(spi->dev.parent, &cc, |
1201 | spin_lock(&parent->klist_children.k_lock); | 1225 | maybe_count_child); |
1202 | if (parent->klist_children.k_list.next | ||
1203 | != parent->klist_children.k_list.prev) | ||
1204 | status = -EMLINK; | ||
1205 | else | ||
1206 | status = 0; | ||
1207 | spin_unlock(&parent->klist_children.k_lock); | ||
1208 | if (status < 0) { | 1226 | if (status < 0) { |
1209 | dev_err(&spi->dev, "can't share SPI bus\n"); | 1227 | dev_err(&spi->dev, "can't share SPI bus\n"); |
1210 | return status; | 1228 | return status; |
1211 | } | 1229 | } |
1212 | 1230 | ||
1213 | /* REVISIT we can't guarantee another device won't | 1231 | dev_warn(&spi->dev, "ASSUMING SPI bus stays unshared!\n"); |
1214 | * be added later. It's uncommon though ... for now, | ||
1215 | * work as if this is safe. | ||
1216 | */ | ||
1217 | dev_warn(&spi->dev, "ASSUMING unshared SPI bus!\n"); | ||
1218 | } | 1232 | } |
1219 | 1233 | ||
1220 | /* We need a supply of ones to transmit. This is the only time | 1234 | /* We need a supply of ones to transmit. This is the only time |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index d0eb0a2abf4d..95244a7e7353 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -20,11 +20,11 @@ | |||
20 | #include <linux/mmc/host.h> | 20 | #include <linux/mmc/host.h> |
21 | #include <linux/amba/bus.h> | 21 | #include <linux/amba/bus.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/scatterlist.h> | ||
23 | 24 | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
26 | #include <asm/io.h> | 27 | #include <asm/io.h> |
27 | #include <asm/scatterlist.h> | ||
28 | #include <asm/sizes.h> | 28 | #include <asm/sizes.h> |
29 | #include <asm/mach/mmc.h> | 29 | #include <asm/mach/mmc.h> |
30 | 30 | ||
@@ -167,7 +167,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, | |||
167 | * partially written to a page is properly coherent. | 167 | * partially written to a page is properly coherent. |
168 | */ | 168 | */ |
169 | if (host->sg_len && data->flags & MMC_DATA_READ) | 169 | if (host->sg_len && data->flags & MMC_DATA_READ) |
170 | flush_dcache_page(host->sg_ptr->page); | 170 | flush_dcache_page(sg_page(host->sg_ptr)); |
171 | } | 171 | } |
172 | if (status & MCI_DATAEND) { | 172 | if (status & MCI_DATAEND) { |
173 | mmci_stop_data(host); | 173 | mmci_stop_data(host); |
@@ -319,7 +319,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) | |||
319 | * page, ensure that the data cache is coherent. | 319 | * page, ensure that the data cache is coherent. |
320 | */ | 320 | */ |
321 | if (status & MCI_RXACTIVE) | 321 | if (status & MCI_RXACTIVE) |
322 | flush_dcache_page(host->sg_ptr->page); | 322 | flush_dcache_page(sg_page(host->sg_ptr)); |
323 | 323 | ||
324 | if (!mmci_next_sg(host)) | 324 | if (!mmci_next_sg(host)) |
325 | break; | 325 | break; |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 0601e01aa2c2..a25ee71998a9 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <asm/dma.h> | 30 | #include <asm/dma.h> |
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | #include <asm/scatterlist.h> | ||
33 | #include <asm/sizes.h> | 32 | #include <asm/sizes.h> |
34 | 33 | ||
35 | #include <asm/arch/pxa-regs.h> | 34 | #include <asm/arch/pxa-regs.h> |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d7c5b94d8c58..6b80bf77a4ef 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -17,8 +17,6 @@ | |||
17 | 17 | ||
18 | #include <linux/mmc/host.h> | 18 | #include <linux/mmc/host.h> |
19 | 19 | ||
20 | #include <asm/scatterlist.h> | ||
21 | |||
22 | #include "sdhci.h" | 20 | #include "sdhci.h" |
23 | 21 | ||
24 | #define DRIVER_NAME "sdhci" | 22 | #define DRIVER_NAME "sdhci" |
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index fa4c8c53cc7a..4d5f37421874 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c | |||
@@ -33,10 +33,10 @@ | |||
33 | #include <linux/pnp.h> | 33 | #include <linux/pnp.h> |
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <linux/mmc/host.h> | 35 | #include <linux/mmc/host.h> |
36 | #include <linux/scatterlist.h> | ||
36 | 37 | ||
37 | #include <asm/io.h> | 38 | #include <asm/io.h> |
38 | #include <asm/dma.h> | 39 | #include <asm/dma.h> |
39 | #include <asm/scatterlist.h> | ||
40 | 40 | ||
41 | #include "wbsd.h" | 41 | #include "wbsd.h" |
42 | 42 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 86b8641b4664..867cb7345b5f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -166,13 +166,14 @@ config NET_SB1000 | |||
166 | If you don't have this card, of course say N. | 166 | If you don't have this card, of course say N. |
167 | 167 | ||
168 | config IP1000 | 168 | config IP1000 |
169 | tristate "IP1000 Gigabit Ethernet support" | 169 | tristate "IP1000 Gigabit Ethernet support" |
170 | depends on PCI && EXPERIMENTAL | 170 | depends on PCI && EXPERIMENTAL |
171 | ---help--- | 171 | select MII |
172 | This driver supports IP1000 gigabit Ethernet cards. | 172 | ---help--- |
173 | This driver supports IP1000 gigabit Ethernet cards. | ||
173 | 174 | ||
174 | To compile this driver as a module, choose M here: the module | 175 | To compile this driver as a module, choose M here: the module |
175 | will be called ipg. This is recommended. | 176 | will be called ipg. This is recommended. |
176 | 177 | ||
177 | source "drivers/net/arcnet/Kconfig" | 178 | source "drivers/net/arcnet/Kconfig" |
178 | 179 | ||
@@ -1880,6 +1881,30 @@ config FEC2 | |||
1880 | Say Y here if you want to use the second built-in 10/100 Fast | 1881 | Say Y here if you want to use the second built-in 10/100 Fast |
1881 | ethernet controller on some Motorola ColdFire processors. | 1882 | ethernet controller on some Motorola ColdFire processors. |
1882 | 1883 | ||
1884 | config FEC_MPC52xx | ||
1885 | tristate "MPC52xx FEC driver" | ||
1886 | depends on PPC_MPC52xx | ||
1887 | select PPC_BESTCOMM | ||
1888 | select PPC_BESTCOMM_FEC | ||
1889 | select CRC32 | ||
1890 | select PHYLIB | ||
1891 | ---help--- | ||
1892 | This option enables support for the MPC5200's on-chip | ||
1893 | Fast Ethernet Controller | ||
1894 | If compiled as module, it will be called 'fec_mpc52xx.ko'. | ||
1895 | |||
1896 | config FEC_MPC52xx_MDIO | ||
1897 | bool "MPC52xx FEC MDIO bus driver" | ||
1898 | depends on FEC_MPC52xx | ||
1899 | default y | ||
1900 | ---help--- | ||
1901 | The MPC5200's FEC can connect to the Ethernet either with | ||
1902 | an external MII PHY chip or 10 Mbps 7-wire interface | ||
1903 | (Motorola? industry standard). | ||
1904 | If your board uses an external PHY connected to FEC, enable this. | ||
1905 | If not sure, enable. | ||
1906 | If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. | ||
1907 | |||
1883 | config NE_H8300 | 1908 | config NE_H8300 |
1884 | tristate "NE2000 compatible support for H8/300" | 1909 | tristate "NE2000 compatible support for H8/300" |
1885 | depends on H8300 | 1910 | depends on H8300 |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 593262065c9b..0e5fde4a1b2c 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -96,6 +96,10 @@ obj-$(CONFIG_SHAPER) += shaper.o | |||
96 | obj-$(CONFIG_HP100) += hp100.o | 96 | obj-$(CONFIG_HP100) += hp100.o |
97 | obj-$(CONFIG_SMC9194) += smc9194.o | 97 | obj-$(CONFIG_SMC9194) += smc9194.o |
98 | obj-$(CONFIG_FEC) += fec.o | 98 | obj-$(CONFIG_FEC) += fec.o |
99 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o | ||
100 | ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) | ||
101 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o | ||
102 | endif | ||
99 | obj-$(CONFIG_68360_ENET) += 68360enet.o | 103 | obj-$(CONFIG_68360_ENET) += 68360enet.o |
100 | obj-$(CONFIG_WD80x3) += wd.o 8390.o | 104 | obj-$(CONFIG_WD80x3) += wd.o 8390.o |
101 | obj-$(CONFIG_EL2) += 3c503.o 8390.o | 105 | obj-$(CONFIG_EL2) += 3c503.o 8390.o |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6909becb10f6..6937ef0e7275 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -188,6 +188,7 @@ struct bond_parm_tbl arp_validate_tbl[] = { | |||
188 | /*-------------------------- Forward declarations ---------------------------*/ | 188 | /*-------------------------- Forward declarations ---------------------------*/ |
189 | 189 | ||
190 | static void bond_send_gratuitous_arp(struct bonding *bond); | 190 | static void bond_send_gratuitous_arp(struct bonding *bond); |
191 | static void bond_deinit(struct net_device *bond_dev); | ||
191 | 192 | ||
192 | /*---------------------------- General routines -----------------------------*/ | 193 | /*---------------------------- General routines -----------------------------*/ |
193 | 194 | ||
@@ -3681,7 +3682,7 @@ static int bond_open(struct net_device *bond_dev) | |||
3681 | } | 3682 | } |
3682 | 3683 | ||
3683 | if (bond->params.mode == BOND_MODE_8023AD) { | 3684 | if (bond->params.mode == BOND_MODE_8023AD) { |
3684 | INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor); | 3685 | INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); |
3685 | queue_delayed_work(bond->wq, &bond->ad_work, 0); | 3686 | queue_delayed_work(bond->wq, &bond->ad_work, 0); |
3686 | /* register to receive LACPDUs */ | 3687 | /* register to receive LACPDUs */ |
3687 | bond_register_lacpdu(bond); | 3688 | bond_register_lacpdu(bond); |
@@ -4449,7 +4450,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) | |||
4449 | /* De-initialize device specific data. | 4450 | /* De-initialize device specific data. |
4450 | * Caller must hold rtnl_lock. | 4451 | * Caller must hold rtnl_lock. |
4451 | */ | 4452 | */ |
4452 | void bond_deinit(struct net_device *bond_dev) | 4453 | static void bond_deinit(struct net_device *bond_dev) |
4453 | { | 4454 | { |
4454 | struct bonding *bond = bond_dev->priv; | 4455 | struct bonding *bond = bond_dev->priv; |
4455 | 4456 | ||
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index d1ed14bf1ccb..61c1b4536d34 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -302,7 +302,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de | |||
302 | int bond_create(char *name, struct bond_params *params, struct bonding **newbond); | 302 | int bond_create(char *name, struct bond_params *params, struct bonding **newbond); |
303 | void bond_destroy(struct bonding *bond); | 303 | void bond_destroy(struct bonding *bond); |
304 | int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); | 304 | int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); |
305 | void bond_deinit(struct net_device *bond_dev); | ||
306 | int bond_create_sysfs(void); | 305 | int bond_create_sysfs(void); |
307 | void bond_destroy_sysfs(void); | 306 | void bond_destroy_sysfs(void); |
308 | void bond_destroy_sysfs_entry(struct bonding *bond); | 307 | void bond_destroy_sysfs_entry(struct bonding *bond); |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 57541d2d9e1e..6fd95a2c8cec 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
35 | #include <linux/mii.h> | 35 | #include <linux/mii.h> |
36 | #include <linux/phy.h> | 36 | #include <linux/phy.h> |
37 | #include <linux/phy_fixed.h> | ||
37 | #include <linux/platform_device.h> | 38 | #include <linux/platform_device.h> |
38 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
39 | #include <asm/gpio.h> | 40 | #include <asm/gpio.h> |
@@ -53,12 +54,6 @@ MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); | |||
53 | MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); | 54 | MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); |
54 | 55 | ||
55 | #define CPMAC_VERSION "0.5.0" | 56 | #define CPMAC_VERSION "0.5.0" |
56 | /* stolen from net/ieee80211.h */ | ||
57 | #ifndef MAC_FMT | ||
58 | #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" | ||
59 | #define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \ | ||
60 | ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5] | ||
61 | #endif | ||
62 | /* frame size + 802.1q tag */ | 57 | /* frame size + 802.1q tag */ |
63 | #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) | 58 | #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) |
64 | #define CPMAC_QUEUES 8 | 59 | #define CPMAC_QUEUES 8 |
@@ -211,6 +206,7 @@ struct cpmac_priv { | |||
211 | struct net_device *dev; | 206 | struct net_device *dev; |
212 | struct work_struct reset_work; | 207 | struct work_struct reset_work; |
213 | struct platform_device *pdev; | 208 | struct platform_device *pdev; |
209 | struct napi_struct napi; | ||
214 | }; | 210 | }; |
215 | 211 | ||
216 | static irqreturn_t cpmac_irq(int, void *); | 212 | static irqreturn_t cpmac_irq(int, void *); |
@@ -362,47 +358,48 @@ static void cpmac_set_multicast_list(struct net_device *dev) | |||
362 | } | 358 | } |
363 | } | 359 | } |
364 | 360 | ||
365 | static struct sk_buff *cpmac_rx_one(struct net_device *dev, | 361 | static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, |
366 | struct cpmac_priv *priv, | ||
367 | struct cpmac_desc *desc) | 362 | struct cpmac_desc *desc) |
368 | { | 363 | { |
369 | struct sk_buff *skb, *result = NULL; | 364 | struct sk_buff *skb, *result = NULL; |
370 | 365 | ||
371 | if (unlikely(netif_msg_hw(priv))) | 366 | if (unlikely(netif_msg_hw(priv))) |
372 | cpmac_dump_desc(dev, desc); | 367 | cpmac_dump_desc(priv->dev, desc); |
373 | cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); | 368 | cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); |
374 | if (unlikely(!desc->datalen)) { | 369 | if (unlikely(!desc->datalen)) { |
375 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 370 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
376 | printk(KERN_WARNING "%s: rx: spurious interrupt\n", | 371 | printk(KERN_WARNING "%s: rx: spurious interrupt\n", |
377 | dev->name); | 372 | priv->dev->name); |
378 | return NULL; | 373 | return NULL; |
379 | } | 374 | } |
380 | 375 | ||
381 | skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); | 376 | skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); |
382 | if (likely(skb)) { | 377 | if (likely(skb)) { |
383 | skb_reserve(skb, 2); | 378 | skb_reserve(skb, 2); |
384 | skb_put(desc->skb, desc->datalen); | 379 | skb_put(desc->skb, desc->datalen); |
385 | desc->skb->protocol = eth_type_trans(desc->skb, dev); | 380 | desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); |
386 | desc->skb->ip_summed = CHECKSUM_NONE; | 381 | desc->skb->ip_summed = CHECKSUM_NONE; |
387 | dev->stats.rx_packets++; | 382 | priv->dev->stats.rx_packets++; |
388 | dev->stats.rx_bytes += desc->datalen; | 383 | priv->dev->stats.rx_bytes += desc->datalen; |
389 | result = desc->skb; | 384 | result = desc->skb; |
390 | dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, | 385 | dma_unmap_single(&priv->dev->dev, desc->data_mapping, |
391 | DMA_FROM_DEVICE); | 386 | CPMAC_SKB_SIZE, DMA_FROM_DEVICE); |
392 | desc->skb = skb; | 387 | desc->skb = skb; |
393 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, | 388 | desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, |
394 | CPMAC_SKB_SIZE, | 389 | CPMAC_SKB_SIZE, |
395 | DMA_FROM_DEVICE); | 390 | DMA_FROM_DEVICE); |
396 | desc->hw_data = (u32)desc->data_mapping; | 391 | desc->hw_data = (u32)desc->data_mapping; |
397 | if (unlikely(netif_msg_pktdata(priv))) { | 392 | if (unlikely(netif_msg_pktdata(priv))) { |
398 | printk(KERN_DEBUG "%s: received packet:\n", dev->name); | 393 | printk(KERN_DEBUG "%s: received packet:\n", |
399 | cpmac_dump_skb(dev, result); | 394 | priv->dev->name); |
395 | cpmac_dump_skb(priv->dev, result); | ||
400 | } | 396 | } |
401 | } else { | 397 | } else { |
402 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 398 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
403 | printk(KERN_WARNING | 399 | printk(KERN_WARNING |
404 | "%s: low on skbs, dropping packet\n", dev->name); | 400 | "%s: low on skbs, dropping packet\n", |
405 | dev->stats.rx_dropped++; | 401 | priv->dev->name); |
402 | priv->dev->stats.rx_dropped++; | ||
406 | } | 403 | } |
407 | 404 | ||
408 | desc->buflen = CPMAC_SKB_SIZE; | 405 | desc->buflen = CPMAC_SKB_SIZE; |
@@ -411,25 +408,25 @@ static struct sk_buff *cpmac_rx_one(struct net_device *dev, | |||
411 | return result; | 408 | return result; |
412 | } | 409 | } |
413 | 410 | ||
414 | static int cpmac_poll(struct net_device *dev, int *budget) | 411 | static int cpmac_poll(struct napi_struct *napi, int budget) |
415 | { | 412 | { |
416 | struct sk_buff *skb; | 413 | struct sk_buff *skb; |
417 | struct cpmac_desc *desc; | 414 | struct cpmac_desc *desc; |
418 | int received = 0, quota = min(dev->quota, *budget); | 415 | int received = 0; |
419 | struct cpmac_priv *priv = netdev_priv(dev); | 416 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); |
420 | 417 | ||
421 | spin_lock(&priv->rx_lock); | 418 | spin_lock(&priv->rx_lock); |
422 | if (unlikely(!priv->rx_head)) { | 419 | if (unlikely(!priv->rx_head)) { |
423 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 420 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
424 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | 421 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", |
425 | dev->name); | 422 | priv->dev->name); |
426 | netif_rx_complete(dev); | 423 | netif_rx_complete(priv->dev, napi); |
427 | return 0; | 424 | return 0; |
428 | } | 425 | } |
429 | 426 | ||
430 | desc = priv->rx_head; | 427 | desc = priv->rx_head; |
431 | while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) { | 428 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { |
432 | skb = cpmac_rx_one(dev, priv, desc); | 429 | skb = cpmac_rx_one(priv, desc); |
433 | if (likely(skb)) { | 430 | if (likely(skb)) { |
434 | netif_receive_skb(skb); | 431 | netif_receive_skb(skb); |
435 | received++; | 432 | received++; |
@@ -439,13 +436,11 @@ static int cpmac_poll(struct net_device *dev, int *budget) | |||
439 | 436 | ||
440 | priv->rx_head = desc; | 437 | priv->rx_head = desc; |
441 | spin_unlock(&priv->rx_lock); | 438 | spin_unlock(&priv->rx_lock); |
442 | *budget -= received; | ||
443 | dev->quota -= received; | ||
444 | if (unlikely(netif_msg_rx_status(priv))) | 439 | if (unlikely(netif_msg_rx_status(priv))) |
445 | printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name, | 440 | printk(KERN_DEBUG "%s: poll processed %d packets\n", |
446 | received); | 441 | priv->dev->name, received); |
447 | if (desc->dataflags & CPMAC_OWN) { | 442 | if (desc->dataflags & CPMAC_OWN) { |
448 | netif_rx_complete(dev); | 443 | netif_rx_complete(priv->dev, napi); |
449 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); | 444 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); |
450 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | 445 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); |
451 | return 0; | 446 | return 0; |
@@ -655,6 +650,7 @@ static void cpmac_hw_error(struct work_struct *work) | |||
655 | spin_unlock(&priv->rx_lock); | 650 | spin_unlock(&priv->rx_lock); |
656 | cpmac_clear_tx(priv->dev); | 651 | cpmac_clear_tx(priv->dev); |
657 | cpmac_hw_start(priv->dev); | 652 | cpmac_hw_start(priv->dev); |
653 | napi_enable(&priv->napi); | ||
658 | netif_start_queue(priv->dev); | 654 | netif_start_queue(priv->dev); |
659 | } | 655 | } |
660 | 656 | ||
@@ -681,8 +677,10 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
681 | 677 | ||
682 | if (status & MAC_INT_RX) { | 678 | if (status & MAC_INT_RX) { |
683 | queue = (status >> 8) & 7; | 679 | queue = (status >> 8) & 7; |
684 | netif_rx_schedule(dev); | 680 | if (netif_rx_schedule_prep(dev, &priv->napi)) { |
685 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); | 681 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); |
682 | __netif_rx_schedule(dev, &priv->napi); | ||
683 | } | ||
686 | } | 684 | } |
687 | 685 | ||
688 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | 686 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); |
@@ -692,6 +690,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
692 | printk(KERN_ERR "%s: hw error, resetting...\n", | 690 | printk(KERN_ERR "%s: hw error, resetting...\n", |
693 | dev->name); | 691 | dev->name); |
694 | netif_stop_queue(dev); | 692 | netif_stop_queue(dev); |
693 | napi_disable(&priv->napi); | ||
695 | cpmac_hw_stop(dev); | 694 | cpmac_hw_stop(dev); |
696 | schedule_work(&priv->reset_work); | 695 | schedule_work(&priv->reset_work); |
697 | if (unlikely(netif_msg_hw(priv))) | 696 | if (unlikely(netif_msg_hw(priv))) |
@@ -849,6 +848,15 @@ static void cpmac_adjust_link(struct net_device *dev) | |||
849 | spin_unlock(&priv->lock); | 848 | spin_unlock(&priv->lock); |
850 | } | 849 | } |
851 | 850 | ||
851 | static int cpmac_link_update(struct net_device *dev, | ||
852 | struct fixed_phy_status *status) | ||
853 | { | ||
854 | status->link = 1; | ||
855 | status->speed = 100; | ||
856 | status->duplex = 1; | ||
857 | return 0; | ||
858 | } | ||
859 | |||
852 | static int cpmac_open(struct net_device *dev) | 860 | static int cpmac_open(struct net_device *dev) |
853 | { | 861 | { |
854 | int i, size, res; | 862 | int i, size, res; |
@@ -857,15 +865,6 @@ static int cpmac_open(struct net_device *dev) | |||
857 | struct cpmac_desc *desc; | 865 | struct cpmac_desc *desc; |
858 | struct sk_buff *skb; | 866 | struct sk_buff *skb; |
859 | 867 | ||
860 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, | ||
861 | 0, PHY_INTERFACE_MODE_MII); | ||
862 | if (IS_ERR(priv->phy)) { | ||
863 | if (netif_msg_drv(priv)) | ||
864 | printk(KERN_ERR "%s: Could not attach to PHY\n", | ||
865 | dev->name); | ||
866 | return PTR_ERR(priv->phy); | ||
867 | } | ||
868 | |||
869 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); | 868 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); |
870 | if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { | 869 | if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { |
871 | if (netif_msg_drv(priv)) | 870 | if (netif_msg_drv(priv)) |
@@ -927,6 +926,7 @@ static int cpmac_open(struct net_device *dev) | |||
927 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | 926 | INIT_WORK(&priv->reset_work, cpmac_hw_error); |
928 | cpmac_hw_start(dev); | 927 | cpmac_hw_start(dev); |
929 | 928 | ||
929 | napi_enable(&priv->napi); | ||
930 | priv->phy->state = PHY_CHANGELINK; | 930 | priv->phy->state = PHY_CHANGELINK; |
931 | phy_start(priv->phy); | 931 | phy_start(priv->phy); |
932 | 932 | ||
@@ -951,8 +951,6 @@ fail_remap: | |||
951 | release_mem_region(mem->start, mem->end - mem->start); | 951 | release_mem_region(mem->start, mem->end - mem->start); |
952 | 952 | ||
953 | fail_reserve: | 953 | fail_reserve: |
954 | phy_disconnect(priv->phy); | ||
955 | |||
956 | return res; | 954 | return res; |
957 | } | 955 | } |
958 | 956 | ||
@@ -965,9 +963,8 @@ static int cpmac_stop(struct net_device *dev) | |||
965 | netif_stop_queue(dev); | 963 | netif_stop_queue(dev); |
966 | 964 | ||
967 | cancel_work_sync(&priv->reset_work); | 965 | cancel_work_sync(&priv->reset_work); |
966 | napi_disable(&priv->napi); | ||
968 | phy_stop(priv->phy); | 967 | phy_stop(priv->phy); |
969 | phy_disconnect(priv->phy); | ||
970 | priv->phy = NULL; | ||
971 | 968 | ||
972 | cpmac_hw_stop(dev); | 969 | cpmac_hw_stop(dev); |
973 | 970 | ||
@@ -1001,11 +998,13 @@ static int external_switch; | |||
1001 | 998 | ||
1002 | static int __devinit cpmac_probe(struct platform_device *pdev) | 999 | static int __devinit cpmac_probe(struct platform_device *pdev) |
1003 | { | 1000 | { |
1004 | int rc, phy_id; | 1001 | int rc, phy_id, i; |
1005 | struct resource *mem; | 1002 | struct resource *mem; |
1006 | struct cpmac_priv *priv; | 1003 | struct cpmac_priv *priv; |
1007 | struct net_device *dev; | 1004 | struct net_device *dev; |
1008 | struct plat_cpmac_data *pdata; | 1005 | struct plat_cpmac_data *pdata; |
1006 | struct fixed_info *fixed_phy; | ||
1007 | DECLARE_MAC_BUF(mac); | ||
1009 | 1008 | ||
1010 | pdata = pdev->dev.platform_data; | 1009 | pdata = pdev->dev.platform_data; |
1011 | 1010 | ||
@@ -1053,21 +1052,51 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1053 | dev->set_multicast_list = cpmac_set_multicast_list; | 1052 | dev->set_multicast_list = cpmac_set_multicast_list; |
1054 | dev->tx_timeout = cpmac_tx_timeout; | 1053 | dev->tx_timeout = cpmac_tx_timeout; |
1055 | dev->ethtool_ops = &cpmac_ethtool_ops; | 1054 | dev->ethtool_ops = &cpmac_ethtool_ops; |
1056 | dev->poll = cpmac_poll; | ||
1057 | dev->weight = 64; | ||
1058 | dev->features |= NETIF_F_MULTI_QUEUE; | 1055 | dev->features |= NETIF_F_MULTI_QUEUE; |
1059 | 1056 | ||
1057 | netif_napi_add(dev, &priv->napi, cpmac_poll, 64); | ||
1058 | |||
1060 | spin_lock_init(&priv->lock); | 1059 | spin_lock_init(&priv->lock); |
1061 | spin_lock_init(&priv->rx_lock); | 1060 | spin_lock_init(&priv->rx_lock); |
1062 | priv->dev = dev; | 1061 | priv->dev = dev; |
1063 | priv->ring_size = 64; | 1062 | priv->ring_size = 64; |
1064 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | 1063 | priv->msg_enable = netif_msg_init(debug_level, 0xff); |
1065 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | 1064 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); |
1065 | |||
1066 | if (phy_id == 31) { | 1066 | if (phy_id == 31) { |
1067 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, | 1067 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id, |
1068 | cpmac_mii.id, phy_id); | 1068 | phy_id); |
1069 | } else | 1069 | } else { |
1070 | snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1); | 1070 | /* Let's try to get a free fixed phy... */ |
1071 | for (i = 0; i < MAX_PHY_AMNT; i++) { | ||
1072 | fixed_phy = fixed_mdio_get_phydev(i); | ||
1073 | if (!fixed_phy) | ||
1074 | continue; | ||
1075 | if (!fixed_phy->phydev->attached_dev) { | ||
1076 | strncpy(priv->phy_name, | ||
1077 | fixed_phy->phydev->dev.bus_id, | ||
1078 | BUS_ID_SIZE); | ||
1079 | fixed_mdio_set_link_update(fixed_phy->phydev, | ||
1080 | &cpmac_link_update); | ||
1081 | goto phy_found; | ||
1082 | } | ||
1083 | } | ||
1084 | if (netif_msg_drv(priv)) | ||
1085 | printk(KERN_ERR "%s: Could not find fixed PHY\n", | ||
1086 | dev->name); | ||
1087 | rc = -ENODEV; | ||
1088 | goto fail; | ||
1089 | } | ||
1090 | |||
1091 | phy_found: | ||
1092 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, | ||
1093 | PHY_INTERFACE_MODE_MII); | ||
1094 | if (IS_ERR(priv->phy)) { | ||
1095 | if (netif_msg_drv(priv)) | ||
1096 | printk(KERN_ERR "%s: Could not attach to PHY\n", | ||
1097 | dev->name); | ||
1098 | return PTR_ERR(priv->phy); | ||
1099 | } | ||
1071 | 1100 | ||
1072 | if ((rc = register_netdev(dev))) { | 1101 | if ((rc = register_netdev(dev))) { |
1073 | printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, | 1102 | printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, |
@@ -1077,9 +1106,9 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
1077 | 1106 | ||
1078 | if (netif_msg_probe(priv)) { | 1107 | if (netif_msg_probe(priv)) { |
1079 | printk(KERN_INFO | 1108 | printk(KERN_INFO |
1080 | "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: " | 1109 | "cpmac: device %s (regs: %p, irq: %d, phy: %s, " |
1081 | MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq, | 1110 | "mac: %s)\n", dev->name, (void *)mem->start, dev->irq, |
1082 | priv->phy_name, MAC_ARG(dev->dev_addr)); | 1111 | priv->phy_name, print_mac(mac, dev->dev_addr)); |
1083 | } | 1112 | } |
1084 | return 0; | 1113 | return 0; |
1085 | 1114 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index d2499bb07c13..473f78de4be0 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -122,7 +122,8 @@ struct e1000_buffer { | |||
122 | u16 next_to_watch; | 122 | u16 next_to_watch; |
123 | }; | 123 | }; |
124 | /* RX */ | 124 | /* RX */ |
125 | struct page *page; | 125 | /* arrays of page information for packet split */ |
126 | struct e1000_ps_page *ps_pages; | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | }; | 129 | }; |
@@ -142,8 +143,6 @@ struct e1000_ring { | |||
142 | /* array of buffer information structs */ | 143 | /* array of buffer information structs */ |
143 | struct e1000_buffer *buffer_info; | 144 | struct e1000_buffer *buffer_info; |
144 | 145 | ||
145 | /* arrays of page information for packet split */ | ||
146 | struct e1000_ps_page *ps_pages; | ||
147 | struct sk_buff *rx_skb_top; | 146 | struct sk_buff *rx_skb_top; |
148 | 147 | ||
149 | struct e1000_queue_stats stats; | 148 | struct e1000_queue_stats stats; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 033e124d1c1f..4fd2e23720b6 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
245 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 245 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
246 | 246 | ||
247 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | 247 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
248 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) | 248 | ps_page = &buffer_info->ps_pages[j]; |
249 | + j]; | 249 | if (j >= adapter->rx_ps_pages) { |
250 | if (j < adapter->rx_ps_pages) { | 250 | /* all unused desc entries get hw null ptr */ |
251 | rx_desc->read.buffer_addr[j+1] = ~0; | ||
252 | continue; | ||
253 | } | ||
254 | if (!ps_page->page) { | ||
255 | ps_page->page = alloc_page(GFP_ATOMIC); | ||
251 | if (!ps_page->page) { | 256 | if (!ps_page->page) { |
252 | ps_page->page = alloc_page(GFP_ATOMIC); | 257 | adapter->alloc_rx_buff_failed++; |
253 | if (!ps_page->page) { | 258 | goto no_buffers; |
254 | adapter->alloc_rx_buff_failed++; | 259 | } |
255 | goto no_buffers; | 260 | ps_page->dma = pci_map_page(pdev, |
256 | } | 261 | ps_page->page, |
257 | ps_page->dma = pci_map_page(pdev, | 262 | 0, PAGE_SIZE, |
258 | ps_page->page, | 263 | PCI_DMA_FROMDEVICE); |
259 | 0, PAGE_SIZE, | 264 | if (pci_dma_mapping_error(ps_page->dma)) { |
260 | PCI_DMA_FROMDEVICE); | 265 | dev_err(&adapter->pdev->dev, |
261 | if (pci_dma_mapping_error( | 266 | "RX DMA page map failed\n"); |
262 | ps_page->dma)) { | 267 | adapter->rx_dma_failed++; |
263 | dev_err(&adapter->pdev->dev, | 268 | goto no_buffers; |
264 | "RX DMA page map failed\n"); | ||
265 | adapter->rx_dma_failed++; | ||
266 | goto no_buffers; | ||
267 | } | ||
268 | } | 269 | } |
269 | /* | ||
270 | * Refresh the desc even if buffer_addrs | ||
271 | * didn't change because each write-back | ||
272 | * erases this info. | ||
273 | */ | ||
274 | rx_desc->read.buffer_addr[j+1] = | ||
275 | cpu_to_le64(ps_page->dma); | ||
276 | } else { | ||
277 | rx_desc->read.buffer_addr[j+1] = ~0; | ||
278 | } | 270 | } |
271 | /* | ||
272 | * Refresh the desc even if buffer_addrs | ||
273 | * didn't change because each write-back | ||
274 | * erases this info. | ||
275 | */ | ||
276 | rx_desc->read.buffer_addr[j+1] = | ||
277 | cpu_to_le64(ps_page->dma); | ||
279 | } | 278 | } |
280 | 279 | ||
281 | skb = netdev_alloc_skb(netdev, | 280 | skb = netdev_alloc_skb(netdev, |
@@ -334,94 +333,6 @@ no_buffers: | |||
334 | } | 333 | } |
335 | 334 | ||
336 | /** | 335 | /** |
337 | * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers | ||
338 | * | ||
339 | * @adapter: address of board private structure | ||
340 | * @cleaned_count: number of buffers to allocate this pass | ||
341 | **/ | ||
342 | static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter, | ||
343 | int cleaned_count) | ||
344 | { | ||
345 | struct net_device *netdev = adapter->netdev; | ||
346 | struct pci_dev *pdev = adapter->pdev; | ||
347 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
348 | struct e1000_rx_desc *rx_desc; | ||
349 | struct e1000_buffer *buffer_info; | ||
350 | struct sk_buff *skb; | ||
351 | unsigned int i; | ||
352 | unsigned int bufsz = 256 - | ||
353 | 16 /*for skb_reserve */ - | ||
354 | NET_IP_ALIGN; | ||
355 | |||
356 | i = rx_ring->next_to_use; | ||
357 | buffer_info = &rx_ring->buffer_info[i]; | ||
358 | |||
359 | while (cleaned_count--) { | ||
360 | skb = buffer_info->skb; | ||
361 | if (skb) { | ||
362 | skb_trim(skb, 0); | ||
363 | goto check_page; | ||
364 | } | ||
365 | |||
366 | skb = netdev_alloc_skb(netdev, bufsz); | ||
367 | if (!skb) { | ||
368 | /* Better luck next round */ | ||
369 | adapter->alloc_rx_buff_failed++; | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
374 | * this will result in a 16 byte aligned IP header after | ||
375 | * the 14 byte MAC header is removed | ||
376 | */ | ||
377 | skb_reserve(skb, NET_IP_ALIGN); | ||
378 | |||
379 | buffer_info->skb = skb; | ||
380 | check_page: | ||
381 | /* allocate a new page if necessary */ | ||
382 | if (!buffer_info->page) { | ||
383 | buffer_info->page = alloc_page(GFP_ATOMIC); | ||
384 | if (!buffer_info->page) { | ||
385 | adapter->alloc_rx_buff_failed++; | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | if (!buffer_info->dma) | ||
391 | buffer_info->dma = pci_map_page(pdev, | ||
392 | buffer_info->page, 0, | ||
393 | PAGE_SIZE, | ||
394 | PCI_DMA_FROMDEVICE); | ||
395 | if (pci_dma_mapping_error(buffer_info->dma)) { | ||
396 | dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); | ||
397 | adapter->rx_dma_failed++; | ||
398 | break; | ||
399 | } | ||
400 | |||
401 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
402 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
403 | |||
404 | i++; | ||
405 | if (i == rx_ring->count) | ||
406 | i = 0; | ||
407 | buffer_info = &rx_ring->buffer_info[i]; | ||
408 | } | ||
409 | |||
410 | if (rx_ring->next_to_use != i) { | ||
411 | rx_ring->next_to_use = i; | ||
412 | if (i-- == 0) | ||
413 | i = (rx_ring->count - 1); | ||
414 | |||
415 | /* Force memory writes to complete before letting h/w | ||
416 | * know there are new descriptors to fetch. (Only | ||
417 | * applicable for weak-ordered memory model archs, | ||
418 | * such as IA-64). */ | ||
419 | wmb(); | ||
420 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | 336 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
426 | * @adapter: board private structure | 337 | * @adapter: board private structure |
427 | * | 338 | * |
@@ -495,10 +406,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
495 | goto next_desc; | 406 | goto next_desc; |
496 | } | 407 | } |
497 | 408 | ||
498 | /* adjust length to remove Ethernet CRC */ | ||
499 | length -= 4; | ||
500 | |||
501 | /* probably a little skewed due to removing CRC */ | ||
502 | total_rx_bytes += length; | 409 | total_rx_bytes += length; |
503 | total_rx_packets++; | 410 | total_rx_packets++; |
504 | 411 | ||
@@ -554,15 +461,6 @@ next_desc: | |||
554 | return cleaned; | 461 | return cleaned; |
555 | } | 462 | } |
556 | 463 | ||
557 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | ||
558 | u16 length) | ||
559 | { | ||
560 | bi->page = NULL; | ||
561 | skb->len += length; | ||
562 | skb->data_len += length; | ||
563 | skb->truesize += length; | ||
564 | } | ||
565 | |||
566 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | 464 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
567 | struct e1000_buffer *buffer_info) | 465 | struct e1000_buffer *buffer_info) |
568 | { | 466 | { |
@@ -699,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
699 | } | 597 | } |
700 | 598 | ||
701 | /** | 599 | /** |
702 | * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy | ||
703 | * @adapter: board private structure | ||
704 | * | ||
705 | * the return value indicates whether actual cleaning was done, there | ||
706 | * is no guarantee that everything was cleaned | ||
707 | **/ | ||
708 | static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter, | ||
709 | int *work_done, int work_to_do) | ||
710 | { | ||
711 | struct net_device *netdev = adapter->netdev; | ||
712 | struct pci_dev *pdev = adapter->pdev; | ||
713 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
714 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
715 | struct e1000_buffer *buffer_info, *next_buffer; | ||
716 | u32 length; | ||
717 | unsigned int i; | ||
718 | int cleaned_count = 0; | ||
719 | bool cleaned = 0; | ||
720 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
721 | |||
722 | i = rx_ring->next_to_clean; | ||
723 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
724 | buffer_info = &rx_ring->buffer_info[i]; | ||
725 | |||
726 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
727 | struct sk_buff *skb; | ||
728 | u8 status; | ||
729 | |||
730 | if (*work_done >= work_to_do) | ||
731 | break; | ||
732 | (*work_done)++; | ||
733 | |||
734 | status = rx_desc->status; | ||
735 | skb = buffer_info->skb; | ||
736 | buffer_info->skb = NULL; | ||
737 | |||
738 | i++; | ||
739 | if (i == rx_ring->count) | ||
740 | i = 0; | ||
741 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
742 | prefetch(next_rxd); | ||
743 | |||
744 | next_buffer = &rx_ring->buffer_info[i]; | ||
745 | |||
746 | cleaned = 1; | ||
747 | cleaned_count++; | ||
748 | pci_unmap_page(pdev, | ||
749 | buffer_info->dma, | ||
750 | PAGE_SIZE, | ||
751 | PCI_DMA_FROMDEVICE); | ||
752 | buffer_info->dma = 0; | ||
753 | |||
754 | length = le16_to_cpu(rx_desc->length); | ||
755 | |||
756 | /* errors is only valid for DD + EOP descriptors */ | ||
757 | if ((status & E1000_RXD_STAT_EOP) && | ||
758 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | ||
759 | /* recycle both page and skb */ | ||
760 | buffer_info->skb = skb; | ||
761 | /* an error means any chain goes out the window too */ | ||
762 | if (rx_ring->rx_skb_top) | ||
763 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
764 | rx_ring->rx_skb_top = NULL; | ||
765 | goto next_desc; | ||
766 | } | ||
767 | |||
768 | #define rxtop rx_ring->rx_skb_top | ||
769 | if (!(status & E1000_RXD_STAT_EOP)) { | ||
770 | /* this descriptor is only the beginning (or middle) */ | ||
771 | if (!rxtop) { | ||
772 | /* this is the beginning of a chain */ | ||
773 | rxtop = skb; | ||
774 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | ||
775 | 0, length); | ||
776 | } else { | ||
777 | /* this is the middle of a chain */ | ||
778 | skb_fill_page_desc(rxtop, | ||
779 | skb_shinfo(rxtop)->nr_frags, | ||
780 | buffer_info->page, 0, | ||
781 | length); | ||
782 | /* re-use the skb, only consumed the page */ | ||
783 | buffer_info->skb = skb; | ||
784 | } | ||
785 | e1000_consume_page(buffer_info, rxtop, length); | ||
786 | goto next_desc; | ||
787 | } else { | ||
788 | if (rxtop) { | ||
789 | /* end of the chain */ | ||
790 | skb_fill_page_desc(rxtop, | ||
791 | skb_shinfo(rxtop)->nr_frags, | ||
792 | buffer_info->page, 0, length); | ||
793 | /* re-use the current skb, we only consumed the | ||
794 | * page */ | ||
795 | buffer_info->skb = skb; | ||
796 | skb = rxtop; | ||
797 | rxtop = NULL; | ||
798 | e1000_consume_page(buffer_info, skb, length); | ||
799 | } else { | ||
800 | /* no chain, got EOP, this buf is the packet | ||
801 | * copybreak to save the put_page/alloc_page */ | ||
802 | if (length <= copybreak && | ||
803 | skb_tailroom(skb) >= length) { | ||
804 | u8 *vaddr; | ||
805 | vaddr = kmap_atomic(buffer_info->page, | ||
806 | KM_SKB_DATA_SOFTIRQ); | ||
807 | memcpy(skb_tail_pointer(skb), | ||
808 | vaddr, length); | ||
809 | kunmap_atomic(vaddr, | ||
810 | KM_SKB_DATA_SOFTIRQ); | ||
811 | /* re-use the page, so don't erase | ||
812 | * buffer_info->page */ | ||
813 | skb_put(skb, length); | ||
814 | } else { | ||
815 | skb_fill_page_desc(skb, 0, | ||
816 | buffer_info->page, 0, | ||
817 | length); | ||
818 | e1000_consume_page(buffer_info, skb, | ||
819 | length); | ||
820 | } | ||
821 | } | ||
822 | } | ||
823 | |||
824 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | ||
825 | e1000_rx_checksum(adapter, | ||
826 | (u32)(status) | | ||
827 | ((u32)(rx_desc->errors) << 24), | ||
828 | le16_to_cpu(rx_desc->csum), skb); | ||
829 | |||
830 | pskb_trim(skb, skb->len - 4); | ||
831 | |||
832 | /* probably a little skewed due to removing CRC */ | ||
833 | total_rx_bytes += skb->len; | ||
834 | total_rx_packets++; | ||
835 | |||
836 | /* eth type trans needs skb->data to point to something */ | ||
837 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
838 | ndev_err(netdev, "__pskb_pull_tail failed.\n"); | ||
839 | dev_kfree_skb(skb); | ||
840 | goto next_desc; | ||
841 | } | ||
842 | |||
843 | e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); | ||
844 | |||
845 | next_desc: | ||
846 | rx_desc->status = 0; | ||
847 | |||
848 | /* return some buffers to hardware, one at a time is too slow */ | ||
849 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | ||
850 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
851 | cleaned_count = 0; | ||
852 | } | ||
853 | |||
854 | /* use prefetched values */ | ||
855 | rx_desc = next_rxd; | ||
856 | buffer_info = next_buffer; | ||
857 | } | ||
858 | rx_ring->next_to_clean = i; | ||
859 | |||
860 | cleaned_count = e1000_desc_unused(rx_ring); | ||
861 | if (cleaned_count) | ||
862 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
863 | |||
864 | adapter->total_rx_packets += total_rx_packets; | ||
865 | adapter->total_rx_bytes += total_rx_bytes; | ||
866 | return cleaned; | ||
867 | } | ||
868 | |||
869 | /** | ||
870 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split | 600 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split |
871 | * @adapter: board private structure | 601 | * @adapter: board private structure |
872 | * | 602 | * |
@@ -953,7 +683,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
953 | ((length + l1) <= adapter->rx_ps_bsize0)) { | 683 | ((length + l1) <= adapter->rx_ps_bsize0)) { |
954 | u8 *vaddr; | 684 | u8 *vaddr; |
955 | 685 | ||
956 | ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; | 686 | ps_page = &buffer_info->ps_pages[0]; |
957 | 687 | ||
958 | /* there is no documentation about how to call | 688 | /* there is no documentation about how to call |
959 | * kmap_atomic, so we can't hold the mapping | 689 | * kmap_atomic, so we can't hold the mapping |
@@ -965,8 +695,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
965 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | 695 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
966 | pci_dma_sync_single_for_device(pdev, ps_page->dma, | 696 | pci_dma_sync_single_for_device(pdev, ps_page->dma, |
967 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 697 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
968 | /* remove the CRC */ | 698 | |
969 | l1 -= 4; | ||
970 | skb_put(skb, l1); | 699 | skb_put(skb, l1); |
971 | goto copydone; | 700 | goto copydone; |
972 | } /* if */ | 701 | } /* if */ |
@@ -977,7 +706,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
977 | if (!length) | 706 | if (!length) |
978 | break; | 707 | break; |
979 | 708 | ||
980 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; | 709 | ps_page = &buffer_info->ps_pages[j]; |
981 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 710 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
982 | PCI_DMA_FROMDEVICE); | 711 | PCI_DMA_FROMDEVICE); |
983 | ps_page->dma = 0; | 712 | ps_page->dma = 0; |
@@ -988,10 +717,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
988 | skb->truesize += length; | 717 | skb->truesize += length; |
989 | } | 718 | } |
990 | 719 | ||
991 | /* strip the ethernet crc, problem is we're using pages now so | ||
992 | * this whole operation can get a little cpu intensive */ | ||
993 | pskb_trim(skb, skb->len - 4); | ||
994 | |||
995 | copydone: | 720 | copydone: |
996 | total_rx_bytes += skb->len; | 721 | total_rx_bytes += skb->len; |
997 | total_rx_packets++; | 722 | total_rx_packets++; |
@@ -1043,7 +768,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1043 | struct e1000_buffer *buffer_info; | 768 | struct e1000_buffer *buffer_info; |
1044 | struct e1000_ps_page *ps_page; | 769 | struct e1000_ps_page *ps_page; |
1045 | struct pci_dev *pdev = adapter->pdev; | 770 | struct pci_dev *pdev = adapter->pdev; |
1046 | unsigned long size; | ||
1047 | unsigned int i, j; | 771 | unsigned int i, j; |
1048 | 772 | ||
1049 | /* Free all the Rx ring sk_buffs */ | 773 | /* Free all the Rx ring sk_buffs */ |
@@ -1054,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1054 | pci_unmap_single(pdev, buffer_info->dma, | 778 | pci_unmap_single(pdev, buffer_info->dma, |
1055 | adapter->rx_buffer_len, | 779 | adapter->rx_buffer_len, |
1056 | PCI_DMA_FROMDEVICE); | 780 | PCI_DMA_FROMDEVICE); |
1057 | else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo) | ||
1058 | pci_unmap_page(pdev, buffer_info->dma, | ||
1059 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
1060 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | 781 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
1061 | pci_unmap_single(pdev, buffer_info->dma, | 782 | pci_unmap_single(pdev, buffer_info->dma, |
1062 | adapter->rx_ps_bsize0, | 783 | adapter->rx_ps_bsize0, |
@@ -1064,19 +785,13 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1064 | buffer_info->dma = 0; | 785 | buffer_info->dma = 0; |
1065 | } | 786 | } |
1066 | 787 | ||
1067 | if (buffer_info->page) { | ||
1068 | put_page(buffer_info->page); | ||
1069 | buffer_info->page = NULL; | ||
1070 | } | ||
1071 | |||
1072 | if (buffer_info->skb) { | 788 | if (buffer_info->skb) { |
1073 | dev_kfree_skb(buffer_info->skb); | 789 | dev_kfree_skb(buffer_info->skb); |
1074 | buffer_info->skb = NULL; | 790 | buffer_info->skb = NULL; |
1075 | } | 791 | } |
1076 | 792 | ||
1077 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | 793 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
1078 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) | 794 | ps_page = &buffer_info->ps_pages[j]; |
1079 | + j]; | ||
1080 | if (!ps_page->page) | 795 | if (!ps_page->page) |
1081 | break; | 796 | break; |
1082 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 797 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
@@ -1093,12 +808,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1093 | rx_ring->rx_skb_top = NULL; | 808 | rx_ring->rx_skb_top = NULL; |
1094 | } | 809 | } |
1095 | 810 | ||
1096 | size = sizeof(struct e1000_buffer) * rx_ring->count; | ||
1097 | memset(rx_ring->buffer_info, 0, size); | ||
1098 | size = sizeof(struct e1000_ps_page) | ||
1099 | * (rx_ring->count * PS_PAGE_BUFFERS); | ||
1100 | memset(rx_ring->ps_pages, 0, size); | ||
1101 | |||
1102 | /* Zero out the descriptor ring */ | 811 | /* Zero out the descriptor ring */ |
1103 | memset(rx_ring->desc, 0, rx_ring->size); | 812 | memset(rx_ring->desc, 0, rx_ring->size); |
1104 | 813 | ||
@@ -1421,7 +1130,8 @@ err: | |||
1421 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | 1130 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) |
1422 | { | 1131 | { |
1423 | struct e1000_ring *rx_ring = adapter->rx_ring; | 1132 | struct e1000_ring *rx_ring = adapter->rx_ring; |
1424 | int size, desc_len, err = -ENOMEM; | 1133 | struct e1000_buffer *buffer_info; |
1134 | int i, size, desc_len, err = -ENOMEM; | ||
1425 | 1135 | ||
1426 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 1136 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
1427 | rx_ring->buffer_info = vmalloc(size); | 1137 | rx_ring->buffer_info = vmalloc(size); |
@@ -1429,11 +1139,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |||
1429 | goto err; | 1139 | goto err; |
1430 | memset(rx_ring->buffer_info, 0, size); | 1140 | memset(rx_ring->buffer_info, 0, size); |
1431 | 1141 | ||
1432 | rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, | 1142 | for (i = 0; i < rx_ring->count; i++) { |
1433 | sizeof(struct e1000_ps_page), | 1143 | buffer_info = &rx_ring->buffer_info[i]; |
1434 | GFP_KERNEL); | 1144 | buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, |
1435 | if (!rx_ring->ps_pages) | 1145 | sizeof(struct e1000_ps_page), |
1436 | goto err; | 1146 | GFP_KERNEL); |
1147 | if (!buffer_info->ps_pages) | ||
1148 | goto err_pages; | ||
1149 | } | ||
1437 | 1150 | ||
1438 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1151 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
1439 | 1152 | ||
@@ -1443,16 +1156,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |||
1443 | 1156 | ||
1444 | err = e1000_alloc_ring_dma(adapter, rx_ring); | 1157 | err = e1000_alloc_ring_dma(adapter, rx_ring); |
1445 | if (err) | 1158 | if (err) |
1446 | goto err; | 1159 | goto err_pages; |
1447 | 1160 | ||
1448 | rx_ring->next_to_clean = 0; | 1161 | rx_ring->next_to_clean = 0; |
1449 | rx_ring->next_to_use = 0; | 1162 | rx_ring->next_to_use = 0; |
1450 | rx_ring->rx_skb_top = NULL; | 1163 | rx_ring->rx_skb_top = NULL; |
1451 | 1164 | ||
1452 | return 0; | 1165 | return 0; |
1166 | |||
1167 | err_pages: | ||
1168 | for (i = 0; i < rx_ring->count; i++) { | ||
1169 | buffer_info = &rx_ring->buffer_info[i]; | ||
1170 | kfree(buffer_info->ps_pages); | ||
1171 | } | ||
1453 | err: | 1172 | err: |
1454 | vfree(rx_ring->buffer_info); | 1173 | vfree(rx_ring->buffer_info); |
1455 | kfree(rx_ring->ps_pages); | ||
1456 | ndev_err(adapter->netdev, | 1174 | ndev_err(adapter->netdev, |
1457 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1175 | "Unable to allocate memory for the transmit descriptor ring\n"); |
1458 | return err; | 1176 | return err; |
@@ -1518,15 +1236,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |||
1518 | { | 1236 | { |
1519 | struct pci_dev *pdev = adapter->pdev; | 1237 | struct pci_dev *pdev = adapter->pdev; |
1520 | struct e1000_ring *rx_ring = adapter->rx_ring; | 1238 | struct e1000_ring *rx_ring = adapter->rx_ring; |
1239 | int i; | ||
1521 | 1240 | ||
1522 | e1000_clean_rx_ring(adapter); | 1241 | e1000_clean_rx_ring(adapter); |
1523 | 1242 | ||
1243 | for (i = 0; i < rx_ring->count; i++) { | ||
1244 | kfree(rx_ring->buffer_info[i].ps_pages); | ||
1245 | } | ||
1246 | |||
1524 | vfree(rx_ring->buffer_info); | 1247 | vfree(rx_ring->buffer_info); |
1525 | rx_ring->buffer_info = NULL; | 1248 | rx_ring->buffer_info = NULL; |
1526 | 1249 | ||
1527 | kfree(rx_ring->ps_pages); | ||
1528 | rx_ring->ps_pages = NULL; | ||
1529 | |||
1530 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 1250 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
1531 | rx_ring->dma); | 1251 | rx_ring->dma); |
1532 | rx_ring->desc = NULL; | 1252 | rx_ring->desc = NULL; |
@@ -2032,9 +1752,11 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2032 | 1752 | ||
2033 | ew32(RFCTL, rfctl); | 1753 | ew32(RFCTL, rfctl); |
2034 | 1754 | ||
2035 | /* disable the stripping of CRC because it breaks | 1755 | /* Enable Packet split descriptors */ |
2036 | * BMC firmware connected over SMBUS */ | 1756 | rctl |= E1000_RCTL_DTYP_PS; |
2037 | rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; | 1757 | |
1758 | /* Enable hardware CRC frame stripping */ | ||
1759 | rctl |= E1000_RCTL_SECRC; | ||
2038 | 1760 | ||
2039 | psrctl |= adapter->rx_ps_bsize0 >> | 1761 | psrctl |= adapter->rx_ps_bsize0 >> |
2040 | E1000_PSRCTL_BSIZE0_SHIFT; | 1762 | E1000_PSRCTL_BSIZE0_SHIFT; |
@@ -2077,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2077 | sizeof(union e1000_rx_desc_packet_split); | 1799 | sizeof(union e1000_rx_desc_packet_split); |
2078 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 1800 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
2079 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 1801 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2080 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) { | ||
2081 | rdlen = rx_ring->count * | ||
2082 | sizeof(struct e1000_rx_desc); | ||
2083 | adapter->clean_rx = e1000_clean_rx_irq_jumbo; | ||
2084 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo; | ||
2085 | } else { | 1802 | } else { |
2086 | rdlen = rx_ring->count * | 1803 | rdlen = rx_ring->count * |
2087 | sizeof(struct e1000_rx_desc); | 1804 | sizeof(struct e1000_rx_desc); |
@@ -2326,8 +2043,11 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2326 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2043 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2327 | struct e1000_hw *hw = &adapter->hw; | 2044 | struct e1000_hw *hw = &adapter->hw; |
2328 | u32 tx_space, min_tx_space, min_rx_space; | 2045 | u32 tx_space, min_tx_space, min_rx_space; |
2046 | u32 pba; | ||
2329 | u16 hwm; | 2047 | u16 hwm; |
2330 | 2048 | ||
2049 | ew32(PBA, adapter->pba); | ||
2050 | |||
2331 | if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { | 2051 | if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { |
2332 | /* To maintain wire speed transmits, the Tx FIFO should be | 2052 | /* To maintain wire speed transmits, the Tx FIFO should be |
2333 | * large enough to accommodate two full transmit packets, | 2053 | * large enough to accommodate two full transmit packets, |
@@ -2335,11 +2055,11 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2335 | * the Rx FIFO should be large enough to accommodate at least | 2055 | * the Rx FIFO should be large enough to accommodate at least |
2336 | * one full receive packet and is similarly rounded up and | 2056 | * one full receive packet and is similarly rounded up and |
2337 | * expressed in KB. */ | 2057 | * expressed in KB. */ |
2338 | adapter->pba = er32(PBA); | 2058 | pba = er32(PBA); |
2339 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 2059 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
2340 | tx_space = adapter->pba >> 16; | 2060 | tx_space = pba >> 16; |
2341 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 2061 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
2342 | adapter->pba &= 0xffff; | 2062 | pba &= 0xffff; |
2343 | /* the tx fifo also stores 16 bytes of information about the tx | 2063 | /* the tx fifo also stores 16 bytes of information about the tx |
2344 | * but don't include ethernet FCS because hardware appends it */ | 2064 | * but don't include ethernet FCS because hardware appends it */ |
2345 | min_tx_space = (mac->max_frame_size + | 2065 | min_tx_space = (mac->max_frame_size + |
@@ -2355,20 +2075,21 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2355 | /* If current Tx allocation is less than the min Tx FIFO size, | 2075 | /* If current Tx allocation is less than the min Tx FIFO size, |
2356 | * and the min Tx FIFO size is less than the current Rx FIFO | 2076 | * and the min Tx FIFO size is less than the current Rx FIFO |
2357 | * allocation, take space away from current Rx allocation */ | 2077 | * allocation, take space away from current Rx allocation */ |
2358 | if (tx_space < min_tx_space && | 2078 | if ((tx_space < min_tx_space) && |
2359 | ((min_tx_space - tx_space) < adapter->pba)) { | 2079 | ((min_tx_space - tx_space) < pba)) { |
2360 | adapter->pba -= - (min_tx_space - tx_space); | 2080 | pba -= min_tx_space - tx_space; |
2361 | 2081 | ||
2362 | /* if short on rx space, rx wins and must trump tx | 2082 | /* if short on rx space, rx wins and must trump tx |
2363 | * adjustment or use Early Receive if available */ | 2083 | * adjustment or use Early Receive if available */ |
2364 | if ((adapter->pba < min_rx_space) && | 2084 | if ((pba < min_rx_space) && |
2365 | (!(adapter->flags & FLAG_HAS_ERT))) | 2085 | (!(adapter->flags & FLAG_HAS_ERT))) |
2366 | /* ERT enabled in e1000_configure_rx */ | 2086 | /* ERT enabled in e1000_configure_rx */ |
2367 | adapter->pba = min_rx_space; | 2087 | pba = min_rx_space; |
2368 | } | 2088 | } |
2089 | |||
2090 | ew32(PBA, pba); | ||
2369 | } | 2091 | } |
2370 | 2092 | ||
2371 | ew32(PBA, adapter->pba); | ||
2372 | 2093 | ||
2373 | /* flow control settings */ | 2094 | /* flow control settings */ |
2374 | /* The high water mark must be low enough to fit one full frame | 2095 | /* The high water mark must be low enough to fit one full frame |
@@ -3624,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3624 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3345 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3625 | * means we reserve 2 more, this pushes us to allocate from the next | 3346 | * means we reserve 2 more, this pushes us to allocate from the next |
3626 | * larger slab size. | 3347 | * larger slab size. |
3627 | * i.e. RXBUFFER_2048 --> size-4096 slab | 3348 | * i.e. RXBUFFER_2048 --> size-4096 slab */ |
3628 | * however with the new *_jumbo* routines, jumbo receives will use | ||
3629 | * fragmented skbs */ | ||
3630 | 3349 | ||
3631 | if (max_frame <= 256) | 3350 | if (max_frame <= 256) |
3632 | adapter->rx_buffer_len = 256; | 3351 | adapter->rx_buffer_len = 256; |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index b557bb44a36f..f78e5bf7cb33 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0078" | 43 | #define DRV_VERSION "EHEA_0080" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 2809c99906e0..f0319f1e8e05 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include <linux/if.h> | 33 | #include <linux/if.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/if_ether.h> | 35 | #include <linux/if_ether.h> |
36 | #include <linux/notifier.h> | ||
37 | #include <linux/reboot.h> | ||
38 | |||
36 | #include <net/ip.h> | 39 | #include <net/ip.h> |
37 | 40 | ||
38 | #include "ehea.h" | 41 | #include "ehea.h" |
@@ -2329,7 +2332,7 @@ static void port_napi_disable(struct ehea_port *port) | |||
2329 | { | 2332 | { |
2330 | int i; | 2333 | int i; |
2331 | 2334 | ||
2332 | for (i = 0; i < port->num_def_qps; i++) | 2335 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) |
2333 | napi_disable(&port->port_res[i].napi); | 2336 | napi_disable(&port->port_res[i].napi); |
2334 | } | 2337 | } |
2335 | 2338 | ||
@@ -2337,7 +2340,7 @@ static void port_napi_enable(struct ehea_port *port) | |||
2337 | { | 2340 | { |
2338 | int i; | 2341 | int i; |
2339 | 2342 | ||
2340 | for (i = 0; i < port->num_def_qps; i++) | 2343 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) |
2341 | napi_enable(&port->port_res[i].napi); | 2344 | napi_enable(&port->port_res[i].napi); |
2342 | } | 2345 | } |
2343 | 2346 | ||
@@ -2373,8 +2376,6 @@ static int ehea_down(struct net_device *dev) | |||
2373 | ehea_drop_multicast_list(dev); | 2376 | ehea_drop_multicast_list(dev); |
2374 | ehea_free_interrupts(dev); | 2377 | ehea_free_interrupts(dev); |
2375 | 2378 | ||
2376 | port_napi_disable(port); | ||
2377 | |||
2378 | port->state = EHEA_PORT_DOWN; | 2379 | port->state = EHEA_PORT_DOWN; |
2379 | 2380 | ||
2380 | ret = ehea_clean_all_portres(port); | 2381 | ret = ehea_clean_all_portres(port); |
@@ -2396,6 +2397,7 @@ static int ehea_stop(struct net_device *dev) | |||
2396 | flush_scheduled_work(); | 2397 | flush_scheduled_work(); |
2397 | down(&port->port_lock); | 2398 | down(&port->port_lock); |
2398 | netif_stop_queue(dev); | 2399 | netif_stop_queue(dev); |
2400 | port_napi_disable(port); | ||
2399 | ret = ehea_down(dev); | 2401 | ret = ehea_down(dev); |
2400 | up(&port->port_lock); | 2402 | up(&port->port_lock); |
2401 | return ret; | 2403 | return ret; |
@@ -3296,6 +3298,20 @@ static int __devexit ehea_remove(struct of_device *dev) | |||
3296 | return 0; | 3298 | return 0; |
3297 | } | 3299 | } |
3298 | 3300 | ||
3301 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
3302 | unsigned long action, void *unused) | ||
3303 | { | ||
3304 | if (action == SYS_RESTART) { | ||
3305 | ehea_info("Reboot: freeing all eHEA resources"); | ||
3306 | ibmebus_unregister_driver(&ehea_driver); | ||
3307 | } | ||
3308 | return NOTIFY_DONE; | ||
3309 | } | ||
3310 | |||
3311 | static struct notifier_block ehea_reboot_nb = { | ||
3312 | .notifier_call = ehea_reboot_notifier, | ||
3313 | }; | ||
3314 | |||
3299 | static int check_module_parm(void) | 3315 | static int check_module_parm(void) |
3300 | { | 3316 | { |
3301 | int ret = 0; | 3317 | int ret = 0; |
@@ -3352,6 +3368,8 @@ int __init ehea_module_init(void) | |||
3352 | if (ret) | 3368 | if (ret) |
3353 | goto out; | 3369 | goto out; |
3354 | 3370 | ||
3371 | register_reboot_notifier(&ehea_reboot_nb); | ||
3372 | |||
3355 | ret = ibmebus_register_driver(&ehea_driver); | 3373 | ret = ibmebus_register_driver(&ehea_driver); |
3356 | if (ret) { | 3374 | if (ret) { |
3357 | ehea_error("failed registering eHEA device driver on ebus"); | 3375 | ehea_error("failed registering eHEA device driver on ebus"); |
@@ -3363,6 +3381,7 @@ int __init ehea_module_init(void) | |||
3363 | if (ret) { | 3381 | if (ret) { |
3364 | ehea_error("failed to register capabilities attribute, ret=%d", | 3382 | ehea_error("failed to register capabilities attribute, ret=%d", |
3365 | ret); | 3383 | ret); |
3384 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3366 | ibmebus_unregister_driver(&ehea_driver); | 3385 | ibmebus_unregister_driver(&ehea_driver); |
3367 | goto out; | 3386 | goto out; |
3368 | } | 3387 | } |
@@ -3376,6 +3395,7 @@ static void __exit ehea_module_exit(void) | |||
3376 | flush_scheduled_work(); | 3395 | flush_scheduled_work(); |
3377 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); | 3396 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); |
3378 | ibmebus_unregister_driver(&ehea_driver); | 3397 | ibmebus_unregister_driver(&ehea_driver); |
3398 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3379 | ehea_destroy_busmap(); | 3399 | ehea_destroy_busmap(); |
3380 | } | 3400 | } |
3381 | 3401 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c new file mode 100644 index 000000000000..fc1cf0b742b0 --- /dev/null +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -0,0 +1,1112 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller | ||
3 | * | ||
4 | * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and | ||
5 | * now maintained by Sylvain Munaut <tnt@246tNt.com> | ||
6 | * | ||
7 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
8 | * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> | ||
9 | * Copyright (C) 2003-2004 MontaVista, Software, Inc. | ||
10 | * | ||
11 | * This file is licensed under the terms of the GNU General Public License | ||
12 | * version 2. This program is licensed "as is" without any warranty of any | ||
13 | * kind, whether express or implied. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/crc32.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/of_device.h> | ||
28 | #include <linux/of_platform.h> | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/ethtool.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | |||
35 | #include <asm/io.h> | ||
36 | #include <asm/delay.h> | ||
37 | #include <asm/mpc52xx.h> | ||
38 | |||
39 | #include <sysdev/bestcomm/bestcomm.h> | ||
40 | #include <sysdev/bestcomm/fec.h> | ||
41 | |||
42 | #include "fec_mpc52xx.h" | ||
43 | |||
44 | #define DRIVER_NAME "mpc52xx-fec" | ||
45 | |||
46 | static irqreturn_t mpc52xx_fec_interrupt(int, void *); | ||
47 | static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); | ||
48 | static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); | ||
49 | static void mpc52xx_fec_stop(struct net_device *dev); | ||
50 | static void mpc52xx_fec_start(struct net_device *dev); | ||
51 | static void mpc52xx_fec_reset(struct net_device *dev); | ||
52 | |||
53 | static u8 mpc52xx_fec_mac_addr[6]; | ||
54 | module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); | ||
55 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); | ||
56 | |||
57 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
58 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN ) | ||
59 | static int debug = -1; /* the above default */ | ||
60 | module_param(debug, int, 0); | ||
61 | MODULE_PARM_DESC(debug, "debugging messages level"); | ||
62 | |||
63 | static void mpc52xx_fec_tx_timeout(struct net_device *dev) | ||
64 | { | ||
65 | dev_warn(&dev->dev, "transmit timed out\n"); | ||
66 | |||
67 | mpc52xx_fec_reset(dev); | ||
68 | |||
69 | dev->stats.tx_errors++; | ||
70 | |||
71 | netif_wake_queue(dev); | ||
72 | } | ||
73 | |||
74 | static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) | ||
75 | { | ||
76 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
77 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
78 | |||
79 | out_be32(&fec->paddr1, *(u32 *)(&mac[0])); | ||
80 | out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); | ||
81 | } | ||
82 | |||
83 | static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac) | ||
84 | { | ||
85 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
86 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
87 | |||
88 | *(u32 *)(&mac[0]) = in_be32(&fec->paddr1); | ||
89 | *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16; | ||
90 | } | ||
91 | |||
92 | static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) | ||
93 | { | ||
94 | struct sockaddr *sock = addr; | ||
95 | |||
96 | memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); | ||
97 | |||
98 | mpc52xx_fec_set_paddr(dev, sock->sa_data); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s) | ||
103 | { | ||
104 | while (!bcom_queue_empty(s)) { | ||
105 | struct bcom_fec_bd *bd; | ||
106 | struct sk_buff *skb; | ||
107 | |||
108 | skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); | ||
109 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); | ||
110 | kfree_skb(skb); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) | ||
115 | { | ||
116 | while (!bcom_queue_full(rxtsk)) { | ||
117 | struct sk_buff *skb; | ||
118 | struct bcom_fec_bd *bd; | ||
119 | |||
120 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
121 | if (skb == NULL) | ||
122 | return -EAGAIN; | ||
123 | |||
124 | /* zero out the initial receive buffers to aid debugging */ | ||
125 | memset(skb->data, 0, FEC_RX_BUFFER_SIZE); | ||
126 | |||
127 | bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk); | ||
128 | |||
129 | bd->status = FEC_RX_BUFFER_SIZE; | ||
130 | bd->skb_pa = dma_map_single(&dev->dev, skb->data, | ||
131 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
132 | |||
133 | bcom_submit_next_buffer(rxtsk, skb); | ||
134 | } | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* based on generic_adjust_link from fs_enet-main.c */ | ||
140 | static void mpc52xx_fec_adjust_link(struct net_device *dev) | ||
141 | { | ||
142 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
143 | struct phy_device *phydev = priv->phydev; | ||
144 | int new_state = 0; | ||
145 | |||
146 | if (phydev->link != PHY_DOWN) { | ||
147 | if (phydev->duplex != priv->duplex) { | ||
148 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
149 | u32 rcntrl; | ||
150 | u32 tcntrl; | ||
151 | |||
152 | new_state = 1; | ||
153 | priv->duplex = phydev->duplex; | ||
154 | |||
155 | rcntrl = in_be32(&fec->r_cntrl); | ||
156 | tcntrl = in_be32(&fec->x_cntrl); | ||
157 | |||
158 | rcntrl &= ~FEC_RCNTRL_DRT; | ||
159 | tcntrl &= ~FEC_TCNTRL_FDEN; | ||
160 | if (phydev->duplex == DUPLEX_FULL) | ||
161 | tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */ | ||
162 | else | ||
163 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
164 | |||
165 | out_be32(&fec->r_cntrl, rcntrl); | ||
166 | out_be32(&fec->x_cntrl, tcntrl); | ||
167 | } | ||
168 | |||
169 | if (phydev->speed != priv->speed) { | ||
170 | new_state = 1; | ||
171 | priv->speed = phydev->speed; | ||
172 | } | ||
173 | |||
174 | if (priv->link == PHY_DOWN) { | ||
175 | new_state = 1; | ||
176 | priv->link = phydev->link; | ||
177 | netif_schedule(dev); | ||
178 | netif_carrier_on(dev); | ||
179 | netif_start_queue(dev); | ||
180 | } | ||
181 | |||
182 | } else if (priv->link) { | ||
183 | new_state = 1; | ||
184 | priv->link = PHY_DOWN; | ||
185 | priv->speed = 0; | ||
186 | priv->duplex = -1; | ||
187 | netif_stop_queue(dev); | ||
188 | netif_carrier_off(dev); | ||
189 | } | ||
190 | |||
191 | if (new_state && netif_msg_link(priv)) | ||
192 | phy_print_status(phydev); | ||
193 | } | ||
194 | |||
195 | static int mpc52xx_fec_init_phy(struct net_device *dev) | ||
196 | { | ||
197 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
198 | struct phy_device *phydev; | ||
199 | char phy_id[BUS_ID_SIZE]; | ||
200 | |||
201 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, | ||
202 | (unsigned int)dev->base_addr, priv->phy_addr); | ||
203 | |||
204 | priv->link = PHY_DOWN; | ||
205 | priv->speed = 0; | ||
206 | priv->duplex = -1; | ||
207 | |||
208 | phydev = phy_connect(dev, phy_id, &mpc52xx_fec_adjust_link, 0, PHY_INTERFACE_MODE_MII); | ||
209 | if (IS_ERR(phydev)) { | ||
210 | dev_err(&dev->dev, "phy_connect failed\n"); | ||
211 | return PTR_ERR(phydev); | ||
212 | } | ||
213 | dev_info(&dev->dev, "attached phy %i to driver %s\n", | ||
214 | phydev->addr, phydev->drv->name); | ||
215 | |||
216 | priv->phydev = phydev; | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int mpc52xx_fec_phy_start(struct net_device *dev) | ||
222 | { | ||
223 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
224 | int err; | ||
225 | |||
226 | if (!priv->has_phy) | ||
227 | return 0; | ||
228 | |||
229 | err = mpc52xx_fec_init_phy(dev); | ||
230 | if (err) { | ||
231 | dev_err(&dev->dev, "mpc52xx_fec_init_phy failed\n"); | ||
232 | return err; | ||
233 | } | ||
234 | |||
235 | /* reset phy - this also wakes it from PDOWN */ | ||
236 | phy_write(priv->phydev, MII_BMCR, BMCR_RESET); | ||
237 | phy_start(priv->phydev); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static void mpc52xx_fec_phy_stop(struct net_device *dev) | ||
243 | { | ||
244 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
245 | |||
246 | if (!priv->has_phy) | ||
247 | return; | ||
248 | |||
249 | phy_disconnect(priv->phydev); | ||
250 | /* power down phy */ | ||
251 | phy_stop(priv->phydev); | ||
252 | phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN); | ||
253 | } | ||
254 | |||
255 | static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv, | ||
256 | struct mii_ioctl_data *mii_data, int cmd) | ||
257 | { | ||
258 | if (!priv->has_phy) | ||
259 | return -ENOTSUPP; | ||
260 | |||
261 | return phy_mii_ioctl(priv->phydev, mii_data, cmd); | ||
262 | } | ||
263 | |||
264 | static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv) | ||
265 | { | ||
266 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
267 | |||
268 | if (!priv->has_phy) | ||
269 | return; | ||
270 | |||
271 | out_be32(&fec->mii_speed, priv->phy_speed); | ||
272 | } | ||
273 | |||
274 | static int mpc52xx_fec_open(struct net_device *dev) | ||
275 | { | ||
276 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
277 | int err = -EBUSY; | ||
278 | |||
279 | if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED, | ||
280 | DRIVER_NAME "_ctrl", dev)) { | ||
281 | dev_err(&dev->dev, "ctrl interrupt request failed\n"); | ||
282 | goto out; | ||
283 | } | ||
284 | if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0, | ||
285 | DRIVER_NAME "_rx", dev)) { | ||
286 | dev_err(&dev->dev, "rx interrupt request failed\n"); | ||
287 | goto free_ctrl_irq; | ||
288 | } | ||
289 | if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0, | ||
290 | DRIVER_NAME "_tx", dev)) { | ||
291 | dev_err(&dev->dev, "tx interrupt request failed\n"); | ||
292 | goto free_2irqs; | ||
293 | } | ||
294 | |||
295 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
296 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
297 | |||
298 | err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
299 | if (err) { | ||
300 | dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n"); | ||
301 | goto free_irqs; | ||
302 | } | ||
303 | |||
304 | err = mpc52xx_fec_phy_start(dev); | ||
305 | if (err) | ||
306 | goto free_skbs; | ||
307 | |||
308 | bcom_enable(priv->rx_dmatsk); | ||
309 | bcom_enable(priv->tx_dmatsk); | ||
310 | |||
311 | mpc52xx_fec_start(dev); | ||
312 | |||
313 | netif_start_queue(dev); | ||
314 | |||
315 | return 0; | ||
316 | |||
317 | free_skbs: | ||
318 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
319 | |||
320 | free_irqs: | ||
321 | free_irq(priv->t_irq, dev); | ||
322 | free_2irqs: | ||
323 | free_irq(priv->r_irq, dev); | ||
324 | free_ctrl_irq: | ||
325 | free_irq(dev->irq, dev); | ||
326 | out: | ||
327 | |||
328 | return err; | ||
329 | } | ||
330 | |||
331 | static int mpc52xx_fec_close(struct net_device *dev) | ||
332 | { | ||
333 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
334 | |||
335 | netif_stop_queue(dev); | ||
336 | |||
337 | mpc52xx_fec_stop(dev); | ||
338 | |||
339 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
340 | |||
341 | free_irq(dev->irq, dev); | ||
342 | free_irq(priv->r_irq, dev); | ||
343 | free_irq(priv->t_irq, dev); | ||
344 | |||
345 | mpc52xx_fec_phy_stop(dev); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /* This will only be invoked if your driver is _not_ in XOFF state. | ||
351 | * What this means is that you need not check it, and that this | ||
352 | * invariant will hold if you make sure that the netif_*_queue() | ||
353 | * calls are done at the proper times. | ||
354 | */ | ||
355 | static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
356 | { | ||
357 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
358 | struct bcom_fec_bd *bd; | ||
359 | |||
360 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
361 | if (net_ratelimit()) | ||
362 | dev_err(&dev->dev, "transmit queue overrun\n"); | ||
363 | return 1; | ||
364 | } | ||
365 | |||
366 | spin_lock_irq(&priv->lock); | ||
367 | dev->trans_start = jiffies; | ||
368 | |||
369 | bd = (struct bcom_fec_bd *) | ||
370 | bcom_prepare_next_buffer(priv->tx_dmatsk); | ||
371 | |||
372 | bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; | ||
373 | bd->skb_pa = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE); | ||
374 | |||
375 | bcom_submit_next_buffer(priv->tx_dmatsk, skb); | ||
376 | |||
377 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
378 | netif_stop_queue(dev); | ||
379 | } | ||
380 | |||
381 | spin_unlock_irq(&priv->lock); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | /* This handles BestComm transmit task interrupts | ||
387 | */ | ||
388 | static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | ||
389 | { | ||
390 | struct net_device *dev = dev_id; | ||
391 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
392 | |||
393 | spin_lock(&priv->lock); | ||
394 | |||
395 | while (bcom_buffer_done(priv->tx_dmatsk)) { | ||
396 | struct sk_buff *skb; | ||
397 | struct bcom_fec_bd *bd; | ||
398 | skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, | ||
399 | (struct bcom_bd **)&bd); | ||
400 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_TO_DEVICE); | ||
401 | |||
402 | dev_kfree_skb_irq(skb); | ||
403 | } | ||
404 | |||
405 | netif_wake_queue(dev); | ||
406 | |||
407 | spin_unlock(&priv->lock); | ||
408 | |||
409 | return IRQ_HANDLED; | ||
410 | } | ||
411 | |||
412 | static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) | ||
413 | { | ||
414 | struct net_device *dev = dev_id; | ||
415 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
416 | |||
417 | while (bcom_buffer_done(priv->rx_dmatsk)) { | ||
418 | struct sk_buff *skb; | ||
419 | struct sk_buff *rskb; | ||
420 | struct bcom_fec_bd *bd; | ||
421 | u32 status; | ||
422 | |||
423 | rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, | ||
424 | (struct bcom_bd **)&bd); | ||
425 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); | ||
426 | |||
427 | /* Test for errors in received frame */ | ||
428 | if (status & BCOM_FEC_RX_BD_ERRORS) { | ||
429 | /* Drop packet and reuse the buffer */ | ||
430 | bd = (struct bcom_fec_bd *) | ||
431 | bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
432 | |||
433 | bd->status = FEC_RX_BUFFER_SIZE; | ||
434 | bd->skb_pa = dma_map_single(&dev->dev, rskb->data, | ||
435 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
436 | |||
437 | bcom_submit_next_buffer(priv->rx_dmatsk, rskb); | ||
438 | |||
439 | dev->stats.rx_dropped++; | ||
440 | |||
441 | continue; | ||
442 | } | ||
443 | |||
444 | /* skbs are allocated on open, so now we allocate a new one, | ||
445 | * and remove the old (with the packet) */ | ||
446 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
447 | if (skb) { | ||
448 | /* Process the received skb */ | ||
449 | int length = status & BCOM_FEC_RX_BD_LEN_MASK; | ||
450 | |||
451 | skb_put(rskb, length - 4); /* length without CRC32 */ | ||
452 | |||
453 | rskb->dev = dev; | ||
454 | rskb->protocol = eth_type_trans(rskb, dev); | ||
455 | |||
456 | netif_rx(rskb); | ||
457 | dev->last_rx = jiffies; | ||
458 | } else { | ||
459 | /* Can't get a new one : reuse the same & drop pkt */ | ||
460 | dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n"); | ||
461 | dev->stats.rx_dropped++; | ||
462 | |||
463 | skb = rskb; | ||
464 | } | ||
465 | |||
466 | bd = (struct bcom_fec_bd *) | ||
467 | bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
468 | |||
469 | bd->status = FEC_RX_BUFFER_SIZE; | ||
470 | bd->skb_pa = dma_map_single(&dev->dev, rskb->data, | ||
471 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
472 | |||
473 | bcom_submit_next_buffer(priv->rx_dmatsk, skb); | ||
474 | } | ||
475 | |||
476 | return IRQ_HANDLED; | ||
477 | } | ||
478 | |||
479 | static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | ||
480 | { | ||
481 | struct net_device *dev = dev_id; | ||
482 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
483 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
484 | u32 ievent; | ||
485 | |||
486 | ievent = in_be32(&fec->ievent); | ||
487 | |||
488 | ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */ | ||
489 | if (!ievent) | ||
490 | return IRQ_NONE; | ||
491 | |||
492 | out_be32(&fec->ievent, ievent); /* clear pending events */ | ||
493 | |||
494 | if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { | ||
495 | if (ievent & ~FEC_IEVENT_TFINT) | ||
496 | dev_dbg(&dev->dev, "ievent: %08x\n", ievent); | ||
497 | return IRQ_HANDLED; | ||
498 | } | ||
499 | |||
500 | if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) | ||
501 | dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); | ||
502 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | ||
503 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | ||
504 | |||
505 | mpc52xx_fec_reset(dev); | ||
506 | |||
507 | netif_wake_queue(dev); | ||
508 | return IRQ_HANDLED; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * Get the current statistics. | ||
513 | * This may be called with the card open or closed. | ||
514 | */ | ||
515 | static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev) | ||
516 | { | ||
517 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
518 | struct net_device_stats *stats = &dev->stats; | ||
519 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
520 | |||
521 | stats->rx_bytes = in_be32(&fec->rmon_r_octets); | ||
522 | stats->rx_packets = in_be32(&fec->rmon_r_packets); | ||
523 | stats->rx_errors = in_be32(&fec->rmon_r_crc_align) + | ||
524 | in_be32(&fec->rmon_r_undersize) + | ||
525 | in_be32(&fec->rmon_r_oversize) + | ||
526 | in_be32(&fec->rmon_r_frag) + | ||
527 | in_be32(&fec->rmon_r_jab); | ||
528 | |||
529 | stats->tx_bytes = in_be32(&fec->rmon_t_octets); | ||
530 | stats->tx_packets = in_be32(&fec->rmon_t_packets); | ||
531 | stats->tx_errors = in_be32(&fec->rmon_t_crc_align) + | ||
532 | in_be32(&fec->rmon_t_undersize) + | ||
533 | in_be32(&fec->rmon_t_oversize) + | ||
534 | in_be32(&fec->rmon_t_frag) + | ||
535 | in_be32(&fec->rmon_t_jab); | ||
536 | |||
537 | stats->multicast = in_be32(&fec->rmon_r_mc_pkt); | ||
538 | stats->collisions = in_be32(&fec->rmon_t_col); | ||
539 | |||
540 | /* detailed rx_errors: */ | ||
541 | stats->rx_length_errors = in_be32(&fec->rmon_r_undersize) | ||
542 | + in_be32(&fec->rmon_r_oversize) | ||
543 | + in_be32(&fec->rmon_r_frag) | ||
544 | + in_be32(&fec->rmon_r_jab); | ||
545 | stats->rx_over_errors = in_be32(&fec->r_macerr); | ||
546 | stats->rx_crc_errors = in_be32(&fec->ieee_r_crc); | ||
547 | stats->rx_frame_errors = in_be32(&fec->ieee_r_align); | ||
548 | stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop); | ||
549 | stats->rx_missed_errors = in_be32(&fec->rmon_r_drop); | ||
550 | |||
551 | /* detailed tx_errors: */ | ||
552 | stats->tx_aborted_errors = 0; | ||
553 | stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr); | ||
554 | stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop); | ||
555 | stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe); | ||
556 | stats->tx_window_errors = in_be32(&fec->ieee_t_lcol); | ||
557 | |||
558 | return stats; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Read MIB counters in order to reset them, | ||
563 | * then zero all the stats fields in memory | ||
564 | */ | ||
565 | static void mpc52xx_fec_reset_stats(struct net_device *dev) | ||
566 | { | ||
567 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
568 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
569 | |||
570 | out_be32(&fec->mib_control, FEC_MIB_DISABLE); | ||
571 | memset_io(&fec->rmon_t_drop, 0, (__force u32)&fec->reserved10 - | ||
572 | (__force u32)&fec->rmon_t_drop); | ||
573 | out_be32(&fec->mib_control, 0); | ||
574 | |||
575 | memset(&dev->stats, 0, sizeof(dev->stats)); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Set or clear the multicast filter for this adaptor. | ||
580 | */ | ||
581 | static void mpc52xx_fec_set_multicast_list(struct net_device *dev) | ||
582 | { | ||
583 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
584 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
585 | u32 rx_control; | ||
586 | |||
587 | rx_control = in_be32(&fec->r_cntrl); | ||
588 | |||
589 | if (dev->flags & IFF_PROMISC) { | ||
590 | rx_control |= FEC_RCNTRL_PROM; | ||
591 | out_be32(&fec->r_cntrl, rx_control); | ||
592 | } else { | ||
593 | rx_control &= ~FEC_RCNTRL_PROM; | ||
594 | out_be32(&fec->r_cntrl, rx_control); | ||
595 | |||
596 | if (dev->flags & IFF_ALLMULTI) { | ||
597 | out_be32(&fec->gaddr1, 0xffffffff); | ||
598 | out_be32(&fec->gaddr2, 0xffffffff); | ||
599 | } else { | ||
600 | u32 crc; | ||
601 | int i; | ||
602 | struct dev_mc_list *dmi; | ||
603 | u32 gaddr1 = 0x00000000; | ||
604 | u32 gaddr2 = 0x00000000; | ||
605 | |||
606 | dmi = dev->mc_list; | ||
607 | for (i=0; i<dev->mc_count; i++) { | ||
608 | crc = ether_crc_le(6, dmi->dmi_addr) >> 26; | ||
609 | if (crc >= 32) | ||
610 | gaddr1 |= 1 << (crc-32); | ||
611 | else | ||
612 | gaddr2 |= 1 << crc; | ||
613 | dmi = dmi->next; | ||
614 | } | ||
615 | out_be32(&fec->gaddr1, gaddr1); | ||
616 | out_be32(&fec->gaddr2, gaddr2); | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * mpc52xx_fec_hw_init | ||
623 | * @dev: network device | ||
624 | * | ||
625 | * Setup various hardware setting, only needed once on start | ||
626 | */ | ||
627 | static void mpc52xx_fec_hw_init(struct net_device *dev) | ||
628 | { | ||
629 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
630 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
631 | int i; | ||
632 | |||
633 | /* Whack a reset. We should wait for this. */ | ||
634 | out_be32(&fec->ecntrl, FEC_ECNTRL_RESET); | ||
635 | for (i = 0; i < FEC_RESET_DELAY; ++i) { | ||
636 | if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0) | ||
637 | break; | ||
638 | udelay(1); | ||
639 | } | ||
640 | if (i == FEC_RESET_DELAY) | ||
641 | dev_err(&dev->dev, "FEC Reset timeout!\n"); | ||
642 | |||
643 | /* set pause to 0x20 frames */ | ||
644 | out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20); | ||
645 | |||
646 | /* high service request will be deasserted when there's < 7 bytes in fifo | ||
647 | * low service request will be deasserted when there's < 4*7 bytes in fifo | ||
648 | */ | ||
649 | out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
650 | out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
651 | |||
652 | /* alarm when <= x bytes in FIFO */ | ||
653 | out_be32(&fec->rfifo_alarm, 0x0000030c); | ||
654 | out_be32(&fec->tfifo_alarm, 0x00000100); | ||
655 | |||
656 | /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */ | ||
657 | out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B); | ||
658 | |||
659 | /* enable crc generation */ | ||
660 | out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC); | ||
661 | out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */ | ||
662 | out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */ | ||
663 | |||
664 | /* set phy speed. | ||
665 | * this can't be done in phy driver, since it needs to be called | ||
666 | * before fec stuff (even on resume) */ | ||
667 | mpc52xx_fec_phy_hw_init(priv); | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * mpc52xx_fec_start | ||
672 | * @dev: network device | ||
673 | * | ||
674 | * This function is called to start or restart the FEC during a link | ||
675 | * change. This happens on fifo errors or when switching between half | ||
676 | * and full duplex. | ||
677 | */ | ||
678 | static void mpc52xx_fec_start(struct net_device *dev) | ||
679 | { | ||
680 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
681 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
682 | u32 rcntrl; | ||
683 | u32 tcntrl; | ||
684 | u32 tmp; | ||
685 | |||
686 | /* clear sticky error bits */ | ||
687 | tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF; | ||
688 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp); | ||
689 | out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp); | ||
690 | |||
691 | /* FIFOs will reset on mpc52xx_fec_enable */ | ||
692 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET); | ||
693 | |||
694 | /* Set station address. */ | ||
695 | mpc52xx_fec_set_paddr(dev, dev->dev_addr); | ||
696 | |||
697 | mpc52xx_fec_set_multicast_list(dev); | ||
698 | |||
699 | /* set max frame len, enable flow control, select mii mode */ | ||
700 | rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ | ||
701 | rcntrl |= FEC_RCNTRL_FCE; | ||
702 | |||
703 | if (priv->has_phy) | ||
704 | rcntrl |= FEC_RCNTRL_MII_MODE; | ||
705 | |||
706 | if (priv->duplex == DUPLEX_FULL) | ||
707 | tcntrl = FEC_TCNTRL_FDEN; /* FD enable */ | ||
708 | else { | ||
709 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
710 | tcntrl = 0; | ||
711 | } | ||
712 | out_be32(&fec->r_cntrl, rcntrl); | ||
713 | out_be32(&fec->x_cntrl, tcntrl); | ||
714 | |||
715 | /* Clear any outstanding interrupt. */ | ||
716 | out_be32(&fec->ievent, 0xffffffff); | ||
717 | |||
718 | /* Enable interrupts we wish to service. */ | ||
719 | out_be32(&fec->imask, FEC_IMASK_ENABLE); | ||
720 | |||
721 | /* And last, enable the transmit and receive processing. */ | ||
722 | out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN); | ||
723 | out_be32(&fec->r_des_active, 0x01000000); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * mpc52xx_fec_stop | ||
728 | * @dev: network device | ||
729 | * | ||
730 | * stop all activity on fec and empty dma buffers | ||
731 | */ | ||
732 | static void mpc52xx_fec_stop(struct net_device *dev) | ||
733 | { | ||
734 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
735 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
736 | unsigned long timeout; | ||
737 | |||
738 | /* disable all interrupts */ | ||
739 | out_be32(&fec->imask, 0); | ||
740 | |||
741 | /* Disable the rx task. */ | ||
742 | bcom_disable(priv->rx_dmatsk); | ||
743 | |||
744 | /* Wait for tx queue to drain, but only if we're in process context */ | ||
745 | if (!in_interrupt()) { | ||
746 | timeout = jiffies + msecs_to_jiffies(2000); | ||
747 | while (time_before(jiffies, timeout) && | ||
748 | !bcom_queue_empty(priv->tx_dmatsk)) | ||
749 | msleep(100); | ||
750 | |||
751 | if (time_after_eq(jiffies, timeout)) | ||
752 | dev_err(&dev->dev, "queues didn't drain\n"); | ||
753 | #if 1 | ||
754 | if (time_after_eq(jiffies, timeout)) { | ||
755 | dev_err(&dev->dev, " tx: index: %i, outdex: %i\n", | ||
756 | priv->tx_dmatsk->index, | ||
757 | priv->tx_dmatsk->outdex); | ||
758 | dev_err(&dev->dev, " rx: index: %i, outdex: %i\n", | ||
759 | priv->rx_dmatsk->index, | ||
760 | priv->rx_dmatsk->outdex); | ||
761 | } | ||
762 | #endif | ||
763 | } | ||
764 | |||
765 | bcom_disable(priv->tx_dmatsk); | ||
766 | |||
767 | /* Stop FEC */ | ||
768 | out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN); | ||
769 | |||
770 | return; | ||
771 | } | ||
772 | |||
773 | /* reset fec and bestcomm tasks */ | ||
774 | static void mpc52xx_fec_reset(struct net_device *dev) | ||
775 | { | ||
776 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
777 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
778 | |||
779 | mpc52xx_fec_stop(dev); | ||
780 | |||
781 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status)); | ||
782 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO); | ||
783 | |||
784 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
785 | |||
786 | mpc52xx_fec_hw_init(dev); | ||
787 | |||
788 | phy_stop(priv->phydev); | ||
789 | phy_write(priv->phydev, MII_BMCR, BMCR_RESET); | ||
790 | phy_start(priv->phydev); | ||
791 | |||
792 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
793 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
794 | |||
795 | mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
796 | |||
797 | bcom_enable(priv->rx_dmatsk); | ||
798 | bcom_enable(priv->tx_dmatsk); | ||
799 | |||
800 | mpc52xx_fec_start(dev); | ||
801 | } | ||
802 | |||
803 | |||
804 | /* ethtool interface */ | ||
805 | static void mpc52xx_fec_get_drvinfo(struct net_device *dev, | ||
806 | struct ethtool_drvinfo *info) | ||
807 | { | ||
808 | strcpy(info->driver, DRIVER_NAME); | ||
809 | } | ||
810 | |||
811 | static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
812 | { | ||
813 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
814 | return phy_ethtool_gset(priv->phydev, cmd); | ||
815 | } | ||
816 | |||
817 | static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
818 | { | ||
819 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
820 | return phy_ethtool_sset(priv->phydev, cmd); | ||
821 | } | ||
822 | |||
823 | static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) | ||
824 | { | ||
825 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
826 | return priv->msg_enable; | ||
827 | } | ||
828 | |||
829 | static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) | ||
830 | { | ||
831 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
832 | priv->msg_enable = level; | ||
833 | } | ||
834 | |||
835 | static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { | ||
836 | .get_drvinfo = mpc52xx_fec_get_drvinfo, | ||
837 | .get_settings = mpc52xx_fec_get_settings, | ||
838 | .set_settings = mpc52xx_fec_set_settings, | ||
839 | .get_link = ethtool_op_get_link, | ||
840 | .get_msglevel = mpc52xx_fec_get_msglevel, | ||
841 | .set_msglevel = mpc52xx_fec_set_msglevel, | ||
842 | }; | ||
843 | |||
844 | |||
845 | static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
846 | { | ||
847 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
848 | |||
849 | return mpc52xx_fec_phy_mii_ioctl(priv, if_mii(rq), cmd); | ||
850 | } | ||
851 | |||
852 | /* ======================================================================== */ | ||
853 | /* OF Driver */ | ||
854 | /* ======================================================================== */ | ||
855 | |||
856 | static int __devinit | ||
857 | mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) | ||
858 | { | ||
859 | int rv; | ||
860 | struct net_device *ndev; | ||
861 | struct mpc52xx_fec_priv *priv = NULL; | ||
862 | struct resource mem; | ||
863 | const phandle *ph; | ||
864 | |||
865 | phys_addr_t rx_fifo; | ||
866 | phys_addr_t tx_fifo; | ||
867 | |||
868 | /* Get the ether ndev & it's private zone */ | ||
869 | ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv)); | ||
870 | if (!ndev) | ||
871 | return -ENOMEM; | ||
872 | |||
873 | priv = netdev_priv(ndev); | ||
874 | |||
875 | /* Reserve FEC control zone */ | ||
876 | rv = of_address_to_resource(op->node, 0, &mem); | ||
877 | if (rv) { | ||
878 | printk(KERN_ERR DRIVER_NAME ": " | ||
879 | "Error while parsing device node resource\n" ); | ||
880 | return rv; | ||
881 | } | ||
882 | if ((mem.end - mem.start + 1) != sizeof(struct mpc52xx_fec)) { | ||
883 | printk(KERN_ERR DRIVER_NAME | ||
884 | " - invalid resource size (%lx != %x), check mpc52xx_devices.c\n", | ||
885 | (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec)); | ||
886 | return -EINVAL; | ||
887 | } | ||
888 | |||
889 | if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), DRIVER_NAME)) | ||
890 | return -EBUSY; | ||
891 | |||
892 | /* Init ether ndev with what we have */ | ||
893 | ndev->open = mpc52xx_fec_open; | ||
894 | ndev->stop = mpc52xx_fec_close; | ||
895 | ndev->hard_start_xmit = mpc52xx_fec_hard_start_xmit; | ||
896 | ndev->do_ioctl = mpc52xx_fec_ioctl; | ||
897 | ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; | ||
898 | ndev->get_stats = mpc52xx_fec_get_stats; | ||
899 | ndev->set_mac_address = mpc52xx_fec_set_mac_address; | ||
900 | ndev->set_multicast_list = mpc52xx_fec_set_multicast_list; | ||
901 | ndev->tx_timeout = mpc52xx_fec_tx_timeout; | ||
902 | ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; | ||
903 | ndev->base_addr = mem.start; | ||
904 | |||
905 | priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ | ||
906 | |||
907 | spin_lock_init(&priv->lock); | ||
908 | |||
909 | /* ioremap the zones */ | ||
910 | priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec)); | ||
911 | |||
912 | if (!priv->fec) { | ||
913 | rv = -ENOMEM; | ||
914 | goto probe_error; | ||
915 | } | ||
916 | |||
917 | /* Bestcomm init */ | ||
918 | rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data); | ||
919 | tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data); | ||
920 | |||
921 | priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE); | ||
922 | priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); | ||
923 | |||
924 | if (!priv->rx_dmatsk || !priv->tx_dmatsk) { | ||
925 | printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); | ||
926 | rv = -ENOMEM; | ||
927 | goto probe_error; | ||
928 | } | ||
929 | |||
930 | /* Get the IRQ we need one by one */ | ||
931 | /* Control */ | ||
932 | ndev->irq = irq_of_parse_and_map(op->node, 0); | ||
933 | |||
934 | /* RX */ | ||
935 | priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); | ||
936 | |||
937 | /* TX */ | ||
938 | priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); | ||
939 | |||
940 | /* MAC address init */ | ||
941 | if (!is_zero_ether_addr(mpc52xx_fec_mac_addr)) | ||
942 | memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6); | ||
943 | else | ||
944 | mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); | ||
945 | |||
946 | priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); | ||
947 | priv->duplex = DUPLEX_FULL; | ||
948 | |||
949 | /* is the phy present in device tree? */ | ||
950 | ph = of_get_property(op->node, "phy-handle", NULL); | ||
951 | if (ph) { | ||
952 | const unsigned int *prop; | ||
953 | struct device_node *phy_dn; | ||
954 | priv->has_phy = 1; | ||
955 | |||
956 | phy_dn = of_find_node_by_phandle(*ph); | ||
957 | prop = of_get_property(phy_dn, "reg", NULL); | ||
958 | priv->phy_addr = *prop; | ||
959 | |||
960 | of_node_put(phy_dn); | ||
961 | |||
962 | /* Phy speed */ | ||
963 | priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; | ||
964 | } else { | ||
965 | dev_info(&ndev->dev, "can't find \"phy-handle\" in device" | ||
966 | " tree, using 7-wire mode\n"); | ||
967 | } | ||
968 | |||
969 | /* Hardware init */ | ||
970 | mpc52xx_fec_hw_init(ndev); | ||
971 | |||
972 | mpc52xx_fec_reset_stats(ndev); | ||
973 | |||
974 | /* Register the new network device */ | ||
975 | rv = register_netdev(ndev); | ||
976 | if (rv < 0) | ||
977 | goto probe_error; | ||
978 | |||
979 | /* We're done ! */ | ||
980 | dev_set_drvdata(&op->dev, ndev); | ||
981 | |||
982 | return 0; | ||
983 | |||
984 | |||
985 | /* Error handling - free everything that might be allocated */ | ||
986 | probe_error: | ||
987 | |||
988 | irq_dispose_mapping(ndev->irq); | ||
989 | |||
990 | if (priv->rx_dmatsk) | ||
991 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
992 | if (priv->tx_dmatsk) | ||
993 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
994 | |||
995 | if (priv->fec) | ||
996 | iounmap(priv->fec); | ||
997 | |||
998 | release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); | ||
999 | |||
1000 | free_netdev(ndev); | ||
1001 | |||
1002 | return rv; | ||
1003 | } | ||
1004 | |||
1005 | static int | ||
1006 | mpc52xx_fec_remove(struct of_device *op) | ||
1007 | { | ||
1008 | struct net_device *ndev; | ||
1009 | struct mpc52xx_fec_priv *priv; | ||
1010 | |||
1011 | ndev = dev_get_drvdata(&op->dev); | ||
1012 | priv = netdev_priv(ndev); | ||
1013 | |||
1014 | unregister_netdev(ndev); | ||
1015 | |||
1016 | irq_dispose_mapping(ndev->irq); | ||
1017 | |||
1018 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
1019 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
1020 | |||
1021 | iounmap(priv->fec); | ||
1022 | |||
1023 | release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec)); | ||
1024 | |||
1025 | free_netdev(ndev); | ||
1026 | |||
1027 | dev_set_drvdata(&op->dev, NULL); | ||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | #ifdef CONFIG_PM | ||
1032 | static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state) | ||
1033 | { | ||
1034 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1035 | |||
1036 | if (netif_running(dev)) | ||
1037 | mpc52xx_fec_close(dev); | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static int mpc52xx_fec_of_resume(struct of_device *op) | ||
1043 | { | ||
1044 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1045 | |||
1046 | mpc52xx_fec_hw_init(dev); | ||
1047 | mpc52xx_fec_reset_stats(dev); | ||
1048 | |||
1049 | if (netif_running(dev)) | ||
1050 | mpc52xx_fec_open(dev); | ||
1051 | |||
1052 | return 0; | ||
1053 | } | ||
1054 | #endif | ||
1055 | |||
1056 | static struct of_device_id mpc52xx_fec_match[] = { | ||
1057 | { | ||
1058 | .type = "network", | ||
1059 | .compatible = "mpc5200-fec", | ||
1060 | }, | ||
1061 | { } | ||
1062 | }; | ||
1063 | |||
1064 | MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); | ||
1065 | |||
1066 | static struct of_platform_driver mpc52xx_fec_driver = { | ||
1067 | .owner = THIS_MODULE, | ||
1068 | .name = DRIVER_NAME, | ||
1069 | .match_table = mpc52xx_fec_match, | ||
1070 | .probe = mpc52xx_fec_probe, | ||
1071 | .remove = mpc52xx_fec_remove, | ||
1072 | #ifdef CONFIG_PM | ||
1073 | .suspend = mpc52xx_fec_of_suspend, | ||
1074 | .resume = mpc52xx_fec_of_resume, | ||
1075 | #endif | ||
1076 | }; | ||
1077 | |||
1078 | |||
1079 | /* ======================================================================== */ | ||
1080 | /* Module */ | ||
1081 | /* ======================================================================== */ | ||
1082 | |||
1083 | static int __init | ||
1084 | mpc52xx_fec_init(void) | ||
1085 | { | ||
1086 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1087 | int ret; | ||
1088 | ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver); | ||
1089 | if (ret) { | ||
1090 | printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); | ||
1091 | return ret; | ||
1092 | } | ||
1093 | #endif | ||
1094 | return of_register_platform_driver(&mpc52xx_fec_driver); | ||
1095 | } | ||
1096 | |||
1097 | static void __exit | ||
1098 | mpc52xx_fec_exit(void) | ||
1099 | { | ||
1100 | of_unregister_platform_driver(&mpc52xx_fec_driver); | ||
1101 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1102 | of_unregister_platform_driver(&mpc52xx_fec_mdio_driver); | ||
1103 | #endif | ||
1104 | } | ||
1105 | |||
1106 | |||
1107 | module_init(mpc52xx_fec_init); | ||
1108 | module_exit(mpc52xx_fec_exit); | ||
1109 | |||
1110 | MODULE_LICENSE("GPL"); | ||
1111 | MODULE_AUTHOR("Dale Farnsworth"); | ||
1112 | MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC"); | ||
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h new file mode 100644 index 000000000000..8b1f75397b9a --- /dev/null +++ b/drivers/net/fec_mpc52xx.h | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * drivers/drivers/net/fec_mpc52xx/fec.h | ||
3 | * | ||
4 | * Driver for the MPC5200 Fast Ethernet Controller | ||
5 | * | ||
6 | * Author: Dale Farnsworth <dfarnsworth@mvista.com> | ||
7 | * | ||
8 | * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under | ||
9 | * the terms of the GNU General Public License version 2. This program | ||
10 | * is licensed "as is" without any warranty of any kind, whether express | ||
11 | * or implied. | ||
12 | */ | ||
13 | |||
14 | #ifndef __DRIVERS_NET_MPC52XX_FEC_H__ | ||
15 | #define __DRIVERS_NET_MPC52XX_FEC_H__ | ||
16 | |||
17 | #include <linux/phy.h> | ||
18 | |||
19 | /* Tunable constant */ | ||
20 | /* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */ | ||
21 | #define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */ | ||
22 | #define FEC_RX_NUM_BD 256 | ||
23 | #define FEC_TX_NUM_BD 64 | ||
24 | |||
25 | #define FEC_RESET_DELAY 50 /* uS */ | ||
26 | |||
27 | #define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) | ||
28 | |||
29 | struct mpc52xx_fec_priv { | ||
30 | int duplex; | ||
31 | int r_irq; | ||
32 | int t_irq; | ||
33 | struct mpc52xx_fec __iomem *fec; | ||
34 | struct bcom_task *rx_dmatsk; | ||
35 | struct bcom_task *tx_dmatsk; | ||
36 | spinlock_t lock; | ||
37 | int msg_enable; | ||
38 | |||
39 | int has_phy; | ||
40 | unsigned int phy_speed; | ||
41 | unsigned int phy_addr; | ||
42 | struct phy_device *phydev; | ||
43 | enum phy_state link; | ||
44 | int speed; | ||
45 | }; | ||
46 | |||
47 | |||
48 | /* ======================================================================== */ | ||
49 | /* Hardware register sets & bits */ | ||
50 | /* ======================================================================== */ | ||
51 | |||
52 | struct mpc52xx_fec { | ||
53 | u32 fec_id; /* FEC + 0x000 */ | ||
54 | u32 ievent; /* FEC + 0x004 */ | ||
55 | u32 imask; /* FEC + 0x008 */ | ||
56 | |||
57 | u32 reserved0[1]; /* FEC + 0x00C */ | ||
58 | u32 r_des_active; /* FEC + 0x010 */ | ||
59 | u32 x_des_active; /* FEC + 0x014 */ | ||
60 | u32 r_des_active_cl; /* FEC + 0x018 */ | ||
61 | u32 x_des_active_cl; /* FEC + 0x01C */ | ||
62 | u32 ivent_set; /* FEC + 0x020 */ | ||
63 | u32 ecntrl; /* FEC + 0x024 */ | ||
64 | |||
65 | u32 reserved1[6]; /* FEC + 0x028-03C */ | ||
66 | u32 mii_data; /* FEC + 0x040 */ | ||
67 | u32 mii_speed; /* FEC + 0x044 */ | ||
68 | u32 mii_status; /* FEC + 0x048 */ | ||
69 | |||
70 | u32 reserved2[5]; /* FEC + 0x04C-05C */ | ||
71 | u32 mib_data; /* FEC + 0x060 */ | ||
72 | u32 mib_control; /* FEC + 0x064 */ | ||
73 | |||
74 | u32 reserved3[6]; /* FEC + 0x068-7C */ | ||
75 | u32 r_activate; /* FEC + 0x080 */ | ||
76 | u32 r_cntrl; /* FEC + 0x084 */ | ||
77 | u32 r_hash; /* FEC + 0x088 */ | ||
78 | u32 r_data; /* FEC + 0x08C */ | ||
79 | u32 ar_done; /* FEC + 0x090 */ | ||
80 | u32 r_test; /* FEC + 0x094 */ | ||
81 | u32 r_mib; /* FEC + 0x098 */ | ||
82 | u32 r_da_low; /* FEC + 0x09C */ | ||
83 | u32 r_da_high; /* FEC + 0x0A0 */ | ||
84 | |||
85 | u32 reserved4[7]; /* FEC + 0x0A4-0BC */ | ||
86 | u32 x_activate; /* FEC + 0x0C0 */ | ||
87 | u32 x_cntrl; /* FEC + 0x0C4 */ | ||
88 | u32 backoff; /* FEC + 0x0C8 */ | ||
89 | u32 x_data; /* FEC + 0x0CC */ | ||
90 | u32 x_status; /* FEC + 0x0D0 */ | ||
91 | u32 x_mib; /* FEC + 0x0D4 */ | ||
92 | u32 x_test; /* FEC + 0x0D8 */ | ||
93 | u32 fdxfc_da1; /* FEC + 0x0DC */ | ||
94 | u32 fdxfc_da2; /* FEC + 0x0E0 */ | ||
95 | u32 paddr1; /* FEC + 0x0E4 */ | ||
96 | u32 paddr2; /* FEC + 0x0E8 */ | ||
97 | u32 op_pause; /* FEC + 0x0EC */ | ||
98 | |||
99 | u32 reserved5[4]; /* FEC + 0x0F0-0FC */ | ||
100 | u32 instr_reg; /* FEC + 0x100 */ | ||
101 | u32 context_reg; /* FEC + 0x104 */ | ||
102 | u32 test_cntrl; /* FEC + 0x108 */ | ||
103 | u32 acc_reg; /* FEC + 0x10C */ | ||
104 | u32 ones; /* FEC + 0x110 */ | ||
105 | u32 zeros; /* FEC + 0x114 */ | ||
106 | u32 iaddr1; /* FEC + 0x118 */ | ||
107 | u32 iaddr2; /* FEC + 0x11C */ | ||
108 | u32 gaddr1; /* FEC + 0x120 */ | ||
109 | u32 gaddr2; /* FEC + 0x124 */ | ||
110 | u32 random; /* FEC + 0x128 */ | ||
111 | u32 rand1; /* FEC + 0x12C */ | ||
112 | u32 tmp; /* FEC + 0x130 */ | ||
113 | |||
114 | u32 reserved6[3]; /* FEC + 0x134-13C */ | ||
115 | u32 fifo_id; /* FEC + 0x140 */ | ||
116 | u32 x_wmrk; /* FEC + 0x144 */ | ||
117 | u32 fcntrl; /* FEC + 0x148 */ | ||
118 | u32 r_bound; /* FEC + 0x14C */ | ||
119 | u32 r_fstart; /* FEC + 0x150 */ | ||
120 | u32 r_count; /* FEC + 0x154 */ | ||
121 | u32 r_lag; /* FEC + 0x158 */ | ||
122 | u32 r_read; /* FEC + 0x15C */ | ||
123 | u32 r_write; /* FEC + 0x160 */ | ||
124 | u32 x_count; /* FEC + 0x164 */ | ||
125 | u32 x_lag; /* FEC + 0x168 */ | ||
126 | u32 x_retry; /* FEC + 0x16C */ | ||
127 | u32 x_write; /* FEC + 0x170 */ | ||
128 | u32 x_read; /* FEC + 0x174 */ | ||
129 | |||
130 | u32 reserved7[2]; /* FEC + 0x178-17C */ | ||
131 | u32 fm_cntrl; /* FEC + 0x180 */ | ||
132 | u32 rfifo_data; /* FEC + 0x184 */ | ||
133 | u32 rfifo_status; /* FEC + 0x188 */ | ||
134 | u32 rfifo_cntrl; /* FEC + 0x18C */ | ||
135 | u32 rfifo_lrf_ptr; /* FEC + 0x190 */ | ||
136 | u32 rfifo_lwf_ptr; /* FEC + 0x194 */ | ||
137 | u32 rfifo_alarm; /* FEC + 0x198 */ | ||
138 | u32 rfifo_rdptr; /* FEC + 0x19C */ | ||
139 | u32 rfifo_wrptr; /* FEC + 0x1A0 */ | ||
140 | u32 tfifo_data; /* FEC + 0x1A4 */ | ||
141 | u32 tfifo_status; /* FEC + 0x1A8 */ | ||
142 | u32 tfifo_cntrl; /* FEC + 0x1AC */ | ||
143 | u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ | ||
144 | u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ | ||
145 | u32 tfifo_alarm; /* FEC + 0x1B8 */ | ||
146 | u32 tfifo_rdptr; /* FEC + 0x1BC */ | ||
147 | u32 tfifo_wrptr; /* FEC + 0x1C0 */ | ||
148 | |||
149 | u32 reset_cntrl; /* FEC + 0x1C4 */ | ||
150 | u32 xmit_fsm; /* FEC + 0x1C8 */ | ||
151 | |||
152 | u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ | ||
153 | u32 rdes_data0; /* FEC + 0x1D8 */ | ||
154 | u32 rdes_data1; /* FEC + 0x1DC */ | ||
155 | u32 r_length; /* FEC + 0x1E0 */ | ||
156 | u32 x_length; /* FEC + 0x1E4 */ | ||
157 | u32 x_addr; /* FEC + 0x1E8 */ | ||
158 | u32 cdes_data; /* FEC + 0x1EC */ | ||
159 | u32 status; /* FEC + 0x1F0 */ | ||
160 | u32 dma_control; /* FEC + 0x1F4 */ | ||
161 | u32 des_cmnd; /* FEC + 0x1F8 */ | ||
162 | u32 data; /* FEC + 0x1FC */ | ||
163 | |||
164 | u32 rmon_t_drop; /* FEC + 0x200 */ | ||
165 | u32 rmon_t_packets; /* FEC + 0x204 */ | ||
166 | u32 rmon_t_bc_pkt; /* FEC + 0x208 */ | ||
167 | u32 rmon_t_mc_pkt; /* FEC + 0x20C */ | ||
168 | u32 rmon_t_crc_align; /* FEC + 0x210 */ | ||
169 | u32 rmon_t_undersize; /* FEC + 0x214 */ | ||
170 | u32 rmon_t_oversize; /* FEC + 0x218 */ | ||
171 | u32 rmon_t_frag; /* FEC + 0x21C */ | ||
172 | u32 rmon_t_jab; /* FEC + 0x220 */ | ||
173 | u32 rmon_t_col; /* FEC + 0x224 */ | ||
174 | u32 rmon_t_p64; /* FEC + 0x228 */ | ||
175 | u32 rmon_t_p65to127; /* FEC + 0x22C */ | ||
176 | u32 rmon_t_p128to255; /* FEC + 0x230 */ | ||
177 | u32 rmon_t_p256to511; /* FEC + 0x234 */ | ||
178 | u32 rmon_t_p512to1023; /* FEC + 0x238 */ | ||
179 | u32 rmon_t_p1024to2047; /* FEC + 0x23C */ | ||
180 | u32 rmon_t_p_gte2048; /* FEC + 0x240 */ | ||
181 | u32 rmon_t_octets; /* FEC + 0x244 */ | ||
182 | u32 ieee_t_drop; /* FEC + 0x248 */ | ||
183 | u32 ieee_t_frame_ok; /* FEC + 0x24C */ | ||
184 | u32 ieee_t_1col; /* FEC + 0x250 */ | ||
185 | u32 ieee_t_mcol; /* FEC + 0x254 */ | ||
186 | u32 ieee_t_def; /* FEC + 0x258 */ | ||
187 | u32 ieee_t_lcol; /* FEC + 0x25C */ | ||
188 | u32 ieee_t_excol; /* FEC + 0x260 */ | ||
189 | u32 ieee_t_macerr; /* FEC + 0x264 */ | ||
190 | u32 ieee_t_cserr; /* FEC + 0x268 */ | ||
191 | u32 ieee_t_sqe; /* FEC + 0x26C */ | ||
192 | u32 t_fdxfc; /* FEC + 0x270 */ | ||
193 | u32 ieee_t_octets_ok; /* FEC + 0x274 */ | ||
194 | |||
195 | u32 reserved9[2]; /* FEC + 0x278-27C */ | ||
196 | u32 rmon_r_drop; /* FEC + 0x280 */ | ||
197 | u32 rmon_r_packets; /* FEC + 0x284 */ | ||
198 | u32 rmon_r_bc_pkt; /* FEC + 0x288 */ | ||
199 | u32 rmon_r_mc_pkt; /* FEC + 0x28C */ | ||
200 | u32 rmon_r_crc_align; /* FEC + 0x290 */ | ||
201 | u32 rmon_r_undersize; /* FEC + 0x294 */ | ||
202 | u32 rmon_r_oversize; /* FEC + 0x298 */ | ||
203 | u32 rmon_r_frag; /* FEC + 0x29C */ | ||
204 | u32 rmon_r_jab; /* FEC + 0x2A0 */ | ||
205 | |||
206 | u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ | ||
207 | |||
208 | u32 rmon_r_p64; /* FEC + 0x2A8 */ | ||
209 | u32 rmon_r_p65to127; /* FEC + 0x2AC */ | ||
210 | u32 rmon_r_p128to255; /* FEC + 0x2B0 */ | ||
211 | u32 rmon_r_p256to511; /* FEC + 0x2B4 */ | ||
212 | u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ | ||
213 | u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ | ||
214 | u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ | ||
215 | u32 rmon_r_octets; /* FEC + 0x2C4 */ | ||
216 | u32 ieee_r_drop; /* FEC + 0x2C8 */ | ||
217 | u32 ieee_r_frame_ok; /* FEC + 0x2CC */ | ||
218 | u32 ieee_r_crc; /* FEC + 0x2D0 */ | ||
219 | u32 ieee_r_align; /* FEC + 0x2D4 */ | ||
220 | u32 r_macerr; /* FEC + 0x2D8 */ | ||
221 | u32 r_fdxfc; /* FEC + 0x2DC */ | ||
222 | u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ | ||
223 | |||
224 | u32 reserved10[7]; /* FEC + 0x2E4-2FC */ | ||
225 | |||
226 | u32 reserved11[64]; /* FEC + 0x300-3FF */ | ||
227 | }; | ||
228 | |||
229 | #define FEC_MIB_DISABLE 0x80000000 | ||
230 | |||
231 | #define FEC_IEVENT_HBERR 0x80000000 | ||
232 | #define FEC_IEVENT_BABR 0x40000000 | ||
233 | #define FEC_IEVENT_BABT 0x20000000 | ||
234 | #define FEC_IEVENT_GRA 0x10000000 | ||
235 | #define FEC_IEVENT_TFINT 0x08000000 | ||
236 | #define FEC_IEVENT_MII 0x00800000 | ||
237 | #define FEC_IEVENT_LATE_COL 0x00200000 | ||
238 | #define FEC_IEVENT_COL_RETRY_LIM 0x00100000 | ||
239 | #define FEC_IEVENT_XFIFO_UN 0x00080000 | ||
240 | #define FEC_IEVENT_XFIFO_ERROR 0x00040000 | ||
241 | #define FEC_IEVENT_RFIFO_ERROR 0x00020000 | ||
242 | |||
243 | #define FEC_IMASK_HBERR 0x80000000 | ||
244 | #define FEC_IMASK_BABR 0x40000000 | ||
245 | #define FEC_IMASK_BABT 0x20000000 | ||
246 | #define FEC_IMASK_GRA 0x10000000 | ||
247 | #define FEC_IMASK_MII 0x00800000 | ||
248 | #define FEC_IMASK_LATE_COL 0x00200000 | ||
249 | #define FEC_IMASK_COL_RETRY_LIM 0x00100000 | ||
250 | #define FEC_IMASK_XFIFO_UN 0x00080000 | ||
251 | #define FEC_IMASK_XFIFO_ERROR 0x00040000 | ||
252 | #define FEC_IMASK_RFIFO_ERROR 0x00020000 | ||
253 | |||
254 | /* all but MII, which is enabled separately */ | ||
255 | #define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \ | ||
256 | FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \ | ||
257 | FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \ | ||
258 | FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR) | ||
259 | |||
260 | #define FEC_RCNTRL_MAX_FL_SHIFT 16 | ||
261 | #define FEC_RCNTRL_LOOP 0x01 | ||
262 | #define FEC_RCNTRL_DRT 0x02 | ||
263 | #define FEC_RCNTRL_MII_MODE 0x04 | ||
264 | #define FEC_RCNTRL_PROM 0x08 | ||
265 | #define FEC_RCNTRL_BC_REJ 0x10 | ||
266 | #define FEC_RCNTRL_FCE 0x20 | ||
267 | |||
268 | #define FEC_TCNTRL_GTS 0x00000001 | ||
269 | #define FEC_TCNTRL_HBC 0x00000002 | ||
270 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
271 | #define FEC_TCNTRL_TFC_PAUSE 0x00000008 | ||
272 | #define FEC_TCNTRL_RFC_PAUSE 0x00000010 | ||
273 | |||
274 | #define FEC_ECNTRL_RESET 0x00000001 | ||
275 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
276 | |||
277 | #define FEC_MII_DATA_ST 0x40000000 /* Start frame */ | ||
278 | #define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ | ||
279 | #define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ | ||
280 | #define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ | ||
281 | #define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ | ||
282 | #define FEC_MII_DATA_TA 0x00020000 /* Turnaround */ | ||
283 | #define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */ | ||
284 | |||
285 | #define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA) | ||
286 | #define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA) | ||
287 | |||
288 | #define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ | ||
289 | #define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ | ||
290 | |||
291 | #define FEC_PADDR2_TYPE 0x8808 | ||
292 | |||
293 | #define FEC_OP_PAUSE_OPCODE 0x00010000 | ||
294 | |||
295 | #define FEC_FIFO_WMRK_256B 0x3 | ||
296 | |||
297 | #define FEC_FIFO_STATUS_ERR 0x00400000 | ||
298 | #define FEC_FIFO_STATUS_UF 0x00200000 | ||
299 | #define FEC_FIFO_STATUS_OF 0x00100000 | ||
300 | |||
301 | #define FEC_FIFO_CNTRL_FRAME 0x08000000 | ||
302 | #define FEC_FIFO_CNTRL_LTG_7 0x07000000 | ||
303 | |||
304 | #define FEC_RESET_CNTRL_RESET_FIFO 0x02000000 | ||
305 | #define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000 | ||
306 | |||
307 | #define FEC_XMIT_FSM_APPEND_CRC 0x02000000 | ||
308 | #define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 | ||
309 | |||
310 | |||
311 | extern struct of_platform_driver mpc52xx_fec_mdio_driver; | ||
312 | |||
313 | #endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */ | ||
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c new file mode 100644 index 000000000000..ba6e8b218e0a --- /dev/null +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/phy.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <asm/mpc52xx.h> | ||
18 | #include "fec_mpc52xx.h" | ||
19 | |||
20 | struct mpc52xx_fec_mdio_priv { | ||
21 | struct mpc52xx_fec __iomem *regs; | ||
22 | }; | ||
23 | |||
24 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
25 | { | ||
26 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
27 | struct mpc52xx_fec __iomem *fec; | ||
28 | int tries = 100; | ||
29 | u32 request = FEC_MII_READ_FRAME; | ||
30 | |||
31 | fec = priv->regs; | ||
32 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
33 | |||
34 | request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
35 | request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
36 | |||
37 | out_be32(&priv->regs->mii_data, request); | ||
38 | |||
39 | /* wait for it to finish, this takes about 23 us on lite5200b */ | ||
40 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
41 | udelay(5); | ||
42 | |||
43 | if (tries == 0) | ||
44 | return -ETIMEDOUT; | ||
45 | |||
46 | return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; | ||
47 | } | ||
48 | |||
49 | static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) | ||
50 | { | ||
51 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
52 | struct mpc52xx_fec __iomem *fec; | ||
53 | u32 value = data; | ||
54 | int tries = 100; | ||
55 | |||
56 | fec = priv->regs; | ||
57 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
58 | |||
59 | value |= FEC_MII_WRITE_FRAME; | ||
60 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
61 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
62 | |||
63 | out_be32(&priv->regs->mii_data, value); | ||
64 | |||
65 | /* wait for request to finish */ | ||
66 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
67 | udelay(5); | ||
68 | |||
69 | if (tries == 0) | ||
70 | return -ETIMEDOUT; | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) | ||
76 | { | ||
77 | struct device *dev = &of->dev; | ||
78 | struct device_node *np = of->node; | ||
79 | struct device_node *child = NULL; | ||
80 | struct mii_bus *bus; | ||
81 | struct mpc52xx_fec_mdio_priv *priv; | ||
82 | struct resource res = {}; | ||
83 | int err; | ||
84 | int i; | ||
85 | |||
86 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | ||
87 | if (bus == NULL) | ||
88 | return -ENOMEM; | ||
89 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
90 | if (priv == NULL) { | ||
91 | err = -ENOMEM; | ||
92 | goto out_free; | ||
93 | } | ||
94 | |||
95 | bus->name = "mpc52xx MII bus"; | ||
96 | bus->read = mpc52xx_fec_mdio_read; | ||
97 | bus->write = mpc52xx_fec_mdio_write; | ||
98 | |||
99 | /* setup irqs */ | ||
100 | bus->irq = kmalloc(sizeof(bus->irq[0]) * PHY_MAX_ADDR, GFP_KERNEL); | ||
101 | if (bus->irq == NULL) { | ||
102 | err = -ENOMEM; | ||
103 | goto out_free; | ||
104 | } | ||
105 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
106 | bus->irq[i] = PHY_POLL; | ||
107 | |||
108 | while ((child = of_get_next_child(np, child)) != NULL) { | ||
109 | int irq = irq_of_parse_and_map(child, 0); | ||
110 | if (irq != NO_IRQ) { | ||
111 | const u32 *id = of_get_property(child, "reg", NULL); | ||
112 | bus->irq[*id] = irq; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* setup registers */ | ||
117 | err = of_address_to_resource(np, 0, &res); | ||
118 | if (err) | ||
119 | goto out_free; | ||
120 | priv->regs = ioremap(res.start, res.end - res.start + 1); | ||
121 | if (priv->regs == NULL) { | ||
122 | err = -ENOMEM; | ||
123 | goto out_free; | ||
124 | } | ||
125 | |||
126 | bus->id = res.start; | ||
127 | bus->priv = priv; | ||
128 | |||
129 | bus->dev = dev; | ||
130 | dev_set_drvdata(dev, bus); | ||
131 | |||
132 | /* set MII speed */ | ||
133 | out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); | ||
134 | |||
135 | /* enable MII interrupt */ | ||
136 | out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); | ||
137 | |||
138 | err = mdiobus_register(bus); | ||
139 | if (err) | ||
140 | goto out_unmap; | ||
141 | |||
142 | return 0; | ||
143 | |||
144 | out_unmap: | ||
145 | iounmap(priv->regs); | ||
146 | out_free: | ||
147 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
148 | if (bus->irq[i] != PHY_POLL) | ||
149 | irq_dispose_mapping(bus->irq[i]); | ||
150 | kfree(bus->irq); | ||
151 | kfree(priv); | ||
152 | kfree(bus); | ||
153 | |||
154 | return err; | ||
155 | } | ||
156 | |||
157 | static int mpc52xx_fec_mdio_remove(struct of_device *of) | ||
158 | { | ||
159 | struct device *dev = &of->dev; | ||
160 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
161 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
162 | int i; | ||
163 | |||
164 | mdiobus_unregister(bus); | ||
165 | dev_set_drvdata(dev, NULL); | ||
166 | |||
167 | iounmap(priv->regs); | ||
168 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
169 | if (bus->irq[i]) | ||
170 | irq_dispose_mapping(bus->irq[i]); | ||
171 | kfree(priv); | ||
172 | kfree(bus->irq); | ||
173 | kfree(bus); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | |||
179 | static struct of_device_id mpc52xx_fec_mdio_match[] = { | ||
180 | { | ||
181 | .type = "mdio", | ||
182 | .compatible = "mpc5200b-fec-phy", | ||
183 | }, | ||
184 | {}, | ||
185 | }; | ||
186 | |||
187 | struct of_platform_driver mpc52xx_fec_mdio_driver = { | ||
188 | .name = "mpc5200b-fec-phy", | ||
189 | .probe = mpc52xx_fec_mdio_probe, | ||
190 | .remove = mpc52xx_fec_mdio_remove, | ||
191 | .match_table = mpc52xx_fec_mdio_match, | ||
192 | }; | ||
193 | |||
194 | /* let fec driver call it, since this has to be registered before it */ | ||
195 | EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); | ||
196 | |||
197 | |||
198 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 70ddf1acfd88..92ce2e38f0d5 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5597,6 +5597,22 @@ static struct pci_device_id pci_tbl[] = { | |||
5597 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), | 5597 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), |
5598 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, | 5598 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, |
5599 | }, | 5599 | }, |
5600 | { /* MCP77 Ethernet Controller */ | ||
5601 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), | ||
5602 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
5603 | }, | ||
5604 | { /* MCP77 Ethernet Controller */ | ||
5605 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), | ||
5606 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
5607 | }, | ||
5608 | { /* MCP77 Ethernet Controller */ | ||
5609 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), | ||
5610 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
5611 | }, | ||
5612 | { /* MCP77 Ethernet Controller */ | ||
5613 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), | ||
5614 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, | ||
5615 | }, | ||
5600 | {0,}, | 5616 | {0,}, |
5601 | }; | 5617 | }; |
5602 | 5618 | ||
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 68887235d7e9..dbd23bb65d1e 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c | |||
@@ -55,6 +55,26 @@ MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver " | |||
55 | DrvVer); | 55 | DrvVer); |
56 | MODULE_LICENSE("GPL"); | 56 | MODULE_LICENSE("GPL"); |
57 | 57 | ||
58 | //variable record -- index by leading revision/length | ||
59 | //Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN | ||
60 | static unsigned short DefaultPhyParam[] = { | ||
61 | // 11/12/03 IP1000A v1-3 rev=0x40 | ||
62 | /*-------------------------------------------------------------------------- | ||
63 | (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, | ||
64 | 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, | ||
65 | 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, | ||
66 | --------------------------------------------------------------------------*/ | ||
67 | // 12/17/03 IP1000A v1-4 rev=0x40 | ||
68 | (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
69 | 0x0000, | ||
70 | 30, 0x005e, 9, 0x0700, | ||
71 | // 01/09/04 IP1000A v1-5 rev=0x41 | ||
72 | (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
73 | 0x0000, | ||
74 | 30, 0x005e, 9, 0x0700, | ||
75 | 0x0000 | ||
76 | }; | ||
77 | |||
58 | static const char *ipg_brand_name[] = { | 78 | static const char *ipg_brand_name[] = { |
59 | "IC PLUS IP1000 1000/100/10 based NIC", | 79 | "IC PLUS IP1000 1000/100/10 based NIC", |
60 | "Sundance Technology ST2021 based NIC", | 80 | "Sundance Technology ST2021 based NIC", |
@@ -990,7 +1010,7 @@ static void ipg_nic_txcleanup(struct net_device *dev) | |||
990 | } | 1010 | } |
991 | 1011 | ||
992 | /* Provides statistical information about the IPG NIC. */ | 1012 | /* Provides statistical information about the IPG NIC. */ |
993 | struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) | 1013 | static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) |
994 | { | 1014 | { |
995 | struct ipg_nic_private *sp = netdev_priv(dev); | 1015 | struct ipg_nic_private *sp = netdev_priv(dev); |
996 | void __iomem *ioaddr = sp->ioaddr; | 1016 | void __iomem *ioaddr = sp->ioaddr; |
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index e418b9035cac..d5d092c9d0af 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h | |||
@@ -833,24 +833,4 @@ struct ipg_nic_private { | |||
833 | struct delayed_work task; | 833 | struct delayed_work task; |
834 | }; | 834 | }; |
835 | 835 | ||
836 | //variable record -- index by leading revision/length | ||
837 | //Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN | ||
838 | unsigned short DefaultPhyParam[] = { | ||
839 | // 11/12/03 IP1000A v1-3 rev=0x40 | ||
840 | /*-------------------------------------------------------------------------- | ||
841 | (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, | ||
842 | 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, | ||
843 | 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, | ||
844 | --------------------------------------------------------------------------*/ | ||
845 | // 12/17/03 IP1000A v1-4 rev=0x40 | ||
846 | (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
847 | 0x0000, | ||
848 | 30, 0x005e, 9, 0x0700, | ||
849 | // 01/09/04 IP1000A v1-5 rev=0x41 | ||
850 | (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
851 | 0x0000, | ||
852 | 30, 0x005e, 9, 0x0700, | ||
853 | 0x0000 | ||
854 | }; | ||
855 | |||
856 | #endif /* __LINUX_IPG_H */ | 836 | #endif /* __LINUX_IPG_H */ |
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 4dbdfaaf37bf..a1e4508717c8 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -627,19 +627,16 @@ static int au1k_irda_rx(struct net_device *dev) | |||
627 | } | 627 | } |
628 | 628 | ||
629 | 629 | ||
630 | void au1k_irda_interrupt(int irq, void *dev_id) | 630 | static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) |
631 | { | 631 | { |
632 | struct net_device *dev = (struct net_device *) dev_id; | 632 | struct net_device *dev = dev_id; |
633 | |||
634 | if (dev == NULL) { | ||
635 | printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); | ||
636 | return; | ||
637 | } | ||
638 | 633 | ||
639 | writel(0, IR_INT_CLEAR); /* ack irda interrupts */ | 634 | writel(0, IR_INT_CLEAR); /* ack irda interrupts */ |
640 | 635 | ||
641 | au1k_irda_rx(dev); | 636 | au1k_irda_rx(dev); |
642 | au1k_tx_ack(dev); | 637 | au1k_tx_ack(dev); |
638 | |||
639 | return IRQ_HANDLED; | ||
643 | } | 640 | } |
644 | 641 | ||
645 | 642 | ||
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 662b8d16803c..45f30a2974b8 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -284,7 +284,7 @@ static __net_exit void loopback_net_exit(struct net *net) | |||
284 | unregister_netdev(dev); | 284 | unregister_netdev(dev); |
285 | } | 285 | } |
286 | 286 | ||
287 | static struct pernet_operations __net_initdata loopback_net_ops = { | 287 | static struct pernet_operations loopback_net_ops = { |
288 | .init = loopback_net_init, | 288 | .init = loopback_net_init, |
289 | .exit = loopback_net_exit, | 289 | .exit = loopback_net_exit, |
290 | }; | 290 | }; |
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 887633b207d9..2a5bef6388fe 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -101,9 +101,7 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_ma | |||
101 | if (!page) | 101 | if (!page) |
102 | return -ENOMEM; | 102 | return -ENOMEM; |
103 | 103 | ||
104 | sg_set_page(mem, page); | 104 | sg_set_page(mem, page, PAGE_SIZE << order, 0); |
105 | mem->length = PAGE_SIZE << order; | ||
106 | mem->offset = 0; | ||
107 | return 0; | 105 | return 0; |
108 | } | 106 | } |
109 | 107 | ||
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 953117152bbd..87cde062fd63 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -864,6 +864,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
864 | 864 | ||
865 | np = netdev_priv(dev); | 865 | np = netdev_priv(dev); |
866 | netif_napi_add(dev, &np->napi, natsemi_poll, 64); | 866 | netif_napi_add(dev, &np->napi, natsemi_poll, 64); |
867 | np->dev = dev; | ||
867 | 868 | ||
868 | np->pci_dev = pdev; | 869 | np->pci_dev = pdev; |
869 | pci_set_drvdata(pdev, dev); | 870 | pci_set_drvdata(pdev, dev); |
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 73dcbb7296da..ad134a61302a 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -274,7 +274,7 @@ static int tc574_probe(struct pcmcia_device *link) | |||
274 | spin_lock_init(&lp->window_lock); | 274 | spin_lock_init(&lp->window_lock); |
275 | link->io.NumPorts1 = 32; | 275 | link->io.NumPorts1 = 32; |
276 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 276 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
277 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 277 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
278 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 278 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
279 | link->irq.Handler = &el3_interrupt; | 279 | link->irq.Handler = &el3_interrupt; |
280 | link->irq.Instance = dev; | 280 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 32076ca6a9e1..a98fe07cce70 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -188,7 +188,7 @@ static int tc589_probe(struct pcmcia_device *link) | |||
188 | spin_lock_init(&lp->lock); | 188 | spin_lock_init(&lp->lock); |
189 | link->io.NumPorts1 = 16; | 189 | link->io.NumPorts1 = 16; |
190 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 190 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
191 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 191 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
192 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 192 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
193 | link->irq.Handler = &el3_interrupt; | 193 | link->irq.Handler = &el3_interrupt; |
194 | link->irq.Instance = dev; | 194 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index a95a2cae6b23..8d910a372f89 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -158,7 +158,7 @@ static int axnet_probe(struct pcmcia_device *link) | |||
158 | info = PRIV(dev); | 158 | info = PRIV(dev); |
159 | info->p_dev = link; | 159 | info->p_dev = link; |
160 | link->priv = dev; | 160 | link->priv = dev; |
161 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 161 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
162 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 162 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
163 | link->conf.Attributes = CONF_ENABLE_IRQ; | 163 | link->conf.Attributes = CONF_ENABLE_IRQ; |
164 | link->conf.IntType = INT_MEMORY_AND_IO; | 164 | link->conf.IntType = INT_MEMORY_AND_IO; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 62844677c784..8c719b4df544 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -249,7 +249,7 @@ static int fmvj18x_probe(struct pcmcia_device *link) | |||
249 | link->io.IOAddrLines = 5; | 249 | link->io.IOAddrLines = 5; |
250 | 250 | ||
251 | /* Interrupt setup */ | 251 | /* Interrupt setup */ |
252 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 252 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
253 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 253 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
254 | link->irq.Handler = &fjn_interrupt; | 254 | link->irq.Handler = &fjn_interrupt; |
255 | link->irq.Instance = dev; | 255 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 9d45e9696e16..db6a97d1d7b1 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -254,7 +254,7 @@ static int pcnet_probe(struct pcmcia_device *link) | |||
254 | info->p_dev = link; | 254 | info->p_dev = link; |
255 | link->priv = dev; | 255 | link->priv = dev; |
256 | 256 | ||
257 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 257 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
258 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 258 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
259 | link->conf.Attributes = CONF_ENABLE_IRQ; | 259 | link->conf.Attributes = CONF_ENABLE_IRQ; |
260 | link->conf.IntType = INT_MEMORY_AND_IO; | 260 | link->conf.IntType = INT_MEMORY_AND_IO; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 58d716fd17cf..c9868e9dac4c 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -328,7 +328,7 @@ static int smc91c92_probe(struct pcmcia_device *link) | |||
328 | link->io.NumPorts1 = 16; | 328 | link->io.NumPorts1 = 16; |
329 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 329 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
330 | link->io.IOAddrLines = 4; | 330 | link->io.IOAddrLines = 4; |
331 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 331 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
332 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 332 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
333 | link->irq.Handler = &smc_interrupt; | 333 | link->irq.Handler = &smc_interrupt; |
334 | link->irq.Instance = dev; | 334 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index c3b69602e275..1f09bea6db5a 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -886,7 +886,7 @@ xirc2ps_config(struct pcmcia_device * link) | |||
886 | } | 886 | } |
887 | printk(KNOT_XIRC "no ports available\n"); | 887 | printk(KNOT_XIRC "no ports available\n"); |
888 | } else { | 888 | } else { |
889 | link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE; | 889 | link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING; |
890 | link->io.NumPorts1 = 16; | 890 | link->io.NumPorts1 = 16; |
891 | for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { | 891 | for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { |
892 | link->io.BasePort1 = ioaddr; | 892 | link->io.BasePort1 = ioaddr; |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index bcb0885011c8..b35d79449500 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -68,7 +68,7 @@ MODULE_VERSION("1.0.2"); | |||
68 | static unsigned int | 68 | static unsigned int |
69 | setup_sg(struct scatterlist *sg, const void *address, unsigned int length) | 69 | setup_sg(struct scatterlist *sg, const void *address, unsigned int length) |
70 | { | 70 | { |
71 | sg_init_one(sg, address, length); | 71 | sg_set_buf(sg, address, length); |
72 | return length; | 72 | return length; |
73 | } | 73 | } |
74 | 74 | ||
@@ -140,6 +140,8 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) | |||
140 | struct scatterlist sg[4]; | 140 | struct scatterlist sg[4]; |
141 | unsigned int nbytes; | 141 | unsigned int nbytes; |
142 | 142 | ||
143 | sg_init_table(sg, 4); | ||
144 | |||
143 | nbytes = setup_sg(&sg[0], state->master_key, state->keylen); | 145 | nbytes = setup_sg(&sg[0], state->master_key, state->keylen); |
144 | nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, | 146 | nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, |
145 | sizeof(sha_pad->sha_pad1)); | 147 | sizeof(sha_pad->sha_pad1)); |
@@ -166,6 +168,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | |||
166 | if (!initial_key) { | 168 | if (!initial_key) { |
167 | crypto_blkcipher_setkey(state->arc4, state->sha1_digest, | 169 | crypto_blkcipher_setkey(state->arc4, state->sha1_digest, |
168 | state->keylen); | 170 | state->keylen); |
171 | sg_init_table(sg_in, 1); | ||
172 | sg_init_table(sg_out, 1); | ||
169 | setup_sg(sg_in, state->sha1_digest, state->keylen); | 173 | setup_sg(sg_in, state->sha1_digest, state->keylen); |
170 | setup_sg(sg_out, state->session_key, state->keylen); | 174 | setup_sg(sg_out, state->session_key, state->keylen); |
171 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 175 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, |
@@ -421,6 +425,8 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
421 | isize -= 2; | 425 | isize -= 2; |
422 | 426 | ||
423 | /* Encrypt packet */ | 427 | /* Encrypt packet */ |
428 | sg_init_table(sg_in, 1); | ||
429 | sg_init_table(sg_out, 1); | ||
424 | setup_sg(sg_in, ibuf, isize); | 430 | setup_sg(sg_in, ibuf, isize); |
425 | setup_sg(sg_out, obuf, osize); | 431 | setup_sg(sg_out, obuf, osize); |
426 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { | 432 | if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { |
@@ -608,6 +614,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
608 | * Decrypt the first byte in order to check if it is | 614 | * Decrypt the first byte in order to check if it is |
609 | * a compressed or uncompressed protocol field. | 615 | * a compressed or uncompressed protocol field. |
610 | */ | 616 | */ |
617 | sg_init_table(sg_in, 1); | ||
618 | sg_init_table(sg_out, 1); | ||
611 | setup_sg(sg_in, ibuf, 1); | 619 | setup_sg(sg_in, ibuf, 1); |
612 | setup_sg(sg_out, obuf, 1); | 620 | setup_sg(sg_out, obuf, 1); |
613 | if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { | 621 | if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index e8960f294a6e..b94fa7ef1955 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -392,7 +392,9 @@ struct rtl8169_private { | |||
392 | void __iomem *mmio_addr; /* memory map physical address */ | 392 | void __iomem *mmio_addr; /* memory map physical address */ |
393 | struct pci_dev *pci_dev; /* Index of PCI device */ | 393 | struct pci_dev *pci_dev; /* Index of PCI device */ |
394 | struct net_device *dev; | 394 | struct net_device *dev; |
395 | #ifdef CONFIG_R8169_NAPI | ||
395 | struct napi_struct napi; | 396 | struct napi_struct napi; |
397 | #endif | ||
396 | spinlock_t lock; /* spin lock flag */ | 398 | spinlock_t lock; /* spin lock flag */ |
397 | u32 msg_enable; | 399 | u32 msg_enable; |
398 | int chipset; | 400 | int chipset; |
@@ -2989,13 +2991,16 @@ static void rtl8169_down(struct net_device *dev) | |||
2989 | { | 2991 | { |
2990 | struct rtl8169_private *tp = netdev_priv(dev); | 2992 | struct rtl8169_private *tp = netdev_priv(dev); |
2991 | void __iomem *ioaddr = tp->mmio_addr; | 2993 | void __iomem *ioaddr = tp->mmio_addr; |
2992 | unsigned int poll_locked = 0; | ||
2993 | unsigned int intrmask; | 2994 | unsigned int intrmask; |
2994 | 2995 | ||
2995 | rtl8169_delete_timer(dev); | 2996 | rtl8169_delete_timer(dev); |
2996 | 2997 | ||
2997 | netif_stop_queue(dev); | 2998 | netif_stop_queue(dev); |
2998 | 2999 | ||
3000 | #ifdef CONFIG_R8169_NAPI | ||
3001 | napi_disable(&tp->napi); | ||
3002 | #endif | ||
3003 | |||
2999 | core_down: | 3004 | core_down: |
3000 | spin_lock_irq(&tp->lock); | 3005 | spin_lock_irq(&tp->lock); |
3001 | 3006 | ||
@@ -3009,11 +3014,6 @@ core_down: | |||
3009 | 3014 | ||
3010 | synchronize_irq(dev->irq); | 3015 | synchronize_irq(dev->irq); |
3011 | 3016 | ||
3012 | if (!poll_locked) { | ||
3013 | napi_disable(&tp->napi); | ||
3014 | poll_locked++; | ||
3015 | } | ||
3016 | |||
3017 | /* Give a racing hard_start_xmit a few cycles to complete. */ | 3017 | /* Give a racing hard_start_xmit a few cycles to complete. */ |
3018 | synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ | 3018 | synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ |
3019 | 3019 | ||
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 19152f54ef2b..b822859c8de3 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c | |||
@@ -79,12 +79,10 @@ static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen | |||
79 | */ | 79 | */ |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * These are checked at init time to see if they are at least 256KB | 82 | * sysctl_[wr]mem_max are checked at init time to see if they are at |
83 | * and increased to 256KB if they are not. This is done to avoid ending | 83 | * least 256KB and increased to 256KB if they are not. This is done to |
84 | * up with socket buffers smaller than the MTU size, | 84 | * avoid ending up with socket buffers smaller than the MTU size, |
85 | */ | 85 | */ |
86 | extern __u32 sysctl_wmem_max; | ||
87 | extern __u32 sysctl_rmem_max; | ||
88 | 86 | ||
89 | static int __devinit rr_init_one(struct pci_dev *pdev, | 87 | static int __devinit rr_init_one(struct pci_dev *pdev, |
90 | const struct pci_device_id *ent) | 88 | const struct pci_device_id *ent) |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 9741d613ba6f..a3ff270593f1 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -2214,9 +2214,7 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2214 | struct dev_mc_list *dmi; | 2214 | struct dev_mc_list *dmi; |
2215 | struct ucc_fast *uf_regs; | 2215 | struct ucc_fast *uf_regs; |
2216 | struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; | 2216 | struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; |
2217 | u8 tempaddr[6]; | 2217 | int i; |
2218 | u8 *mcptr, *tdptr; | ||
2219 | int i, j; | ||
2220 | 2218 | ||
2221 | ugeth = netdev_priv(dev); | 2219 | ugeth = netdev_priv(dev); |
2222 | 2220 | ||
@@ -2255,19 +2253,10 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2255 | if (!(dmi->dmi_addr[0] & 1)) | 2253 | if (!(dmi->dmi_addr[0] & 1)) |
2256 | continue; | 2254 | continue; |
2257 | 2255 | ||
2258 | /* The address in dmi_addr is LSB first, | ||
2259 | * and taddr is MSB first. We have to | ||
2260 | * copy bytes MSB first from dmi_addr. | ||
2261 | */ | ||
2262 | mcptr = (u8 *) dmi->dmi_addr + 5; | ||
2263 | tdptr = (u8 *) tempaddr; | ||
2264 | for (j = 0; j < 6; j++) | ||
2265 | *tdptr++ = *mcptr--; | ||
2266 | |||
2267 | /* Ask CPM to run CRC and set bit in | 2256 | /* Ask CPM to run CRC and set bit in |
2268 | * filter mask. | 2257 | * filter mask. |
2269 | */ | 2258 | */ |
2270 | hw_add_addr_in_hash(ugeth, tempaddr); | 2259 | hw_add_addr_in_hash(ugeth, dmi->dmi_addr); |
2271 | } | 2260 | } |
2272 | } | 2261 | } |
2273 | } | 2262 | } |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index cd991a0f75bb..1ebe3259be0d 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -512,11 +512,19 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) | |||
512 | } | 512 | } |
513 | tmp = le32_to_cpu(u.init_c->max_transfer_size); | 513 | tmp = le32_to_cpu(u.init_c->max_transfer_size); |
514 | if (tmp < dev->hard_mtu) { | 514 | if (tmp < dev->hard_mtu) { |
515 | dev_err(&intf->dev, | 515 | if (tmp <= net->hard_header_len) { |
516 | "dev can't take %u byte packets (max %u)\n", | 516 | dev_err(&intf->dev, |
517 | dev->hard_mtu, tmp); | 517 | "dev can't take %u byte packets (max %u)\n", |
518 | retval = -EINVAL; | 518 | dev->hard_mtu, tmp); |
519 | goto fail_and_release; | 519 | retval = -EINVAL; |
520 | goto fail_and_release; | ||
521 | } | ||
522 | dev->hard_mtu = tmp; | ||
523 | net->mtu = dev->hard_mtu - net->hard_header_len; | ||
524 | dev_warn(&intf->dev, | ||
525 | "dev can't take %u byte packets (max %u), " | ||
526 | "adjusting MTU to %u\n", | ||
527 | dev->hard_mtu, tmp, net->mtu); | ||
520 | } | 528 | } |
521 | 529 | ||
522 | /* REVISIT: peripheral "alignment" request is ignored ... */ | 530 | /* REVISIT: peripheral "alignment" request is ignored ... */ |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 9d9ff76a9bc6..5058e60e5703 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2391,7 +2391,7 @@ out_requeue: | |||
2391 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) | 2391 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) |
2392 | delay = msecs_to_jiffies(50); | 2392 | delay = msecs_to_jiffies(50); |
2393 | else | 2393 | else |
2394 | delay = round_jiffies(HZ * 15); | 2394 | delay = round_jiffies_relative(HZ * 15); |
2395 | queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); | 2395 | queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); |
2396 | out: | 2396 | out: |
2397 | mutex_unlock(&wl->mutex); | 2397 | mutex_unlock(&wl->mutex); |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index d09479e816cd..f0e56dfc9ecf 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -2260,7 +2260,7 @@ out_requeue: | |||
2260 | if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) | 2260 | if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) |
2261 | delay = msecs_to_jiffies(50); | 2261 | delay = msecs_to_jiffies(50); |
2262 | else | 2262 | else |
2263 | delay = round_jiffies(HZ); | 2263 | delay = round_jiffies_relative(HZ); |
2264 | queue_delayed_work(dev->wl->hw->workqueue, | 2264 | queue_delayed_work(dev->wl->hw->workqueue, |
2265 | &dev->periodic_work, delay); | 2265 | &dev->periodic_work, delay); |
2266 | out: | 2266 | out: |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index a6c7904de282..8d53d08b9691 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1769,7 +1769,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1769 | if (priv->stop_rf_kill) { | 1769 | if (priv->stop_rf_kill) { |
1770 | priv->stop_rf_kill = 0; | 1770 | priv->stop_rf_kill = 0; |
1771 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1771 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
1772 | round_jiffies(HZ)); | 1772 | round_jiffies_relative(HZ)); |
1773 | } | 1773 | } |
1774 | 1774 | ||
1775 | deferred = 1; | 1775 | deferred = 1; |
@@ -2086,7 +2086,8 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | |||
2086 | /* Make sure the RF Kill check timer is running */ | 2086 | /* Make sure the RF Kill check timer is running */ |
2087 | priv->stop_rf_kill = 0; | 2087 | priv->stop_rf_kill = 0; |
2088 | cancel_delayed_work(&priv->rf_kill); | 2088 | cancel_delayed_work(&priv->rf_kill); |
2089 | queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ)); | 2089 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
2090 | round_jiffies_relative(HZ)); | ||
2090 | } | 2091 | } |
2091 | 2092 | ||
2092 | static void send_scan_event(void *data) | 2093 | static void send_scan_event(void *data) |
@@ -2123,7 +2124,7 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) | |||
2123 | if (!delayed_work_pending(&priv->scan_event_later)) | 2124 | if (!delayed_work_pending(&priv->scan_event_later)) |
2124 | queue_delayed_work(priv->workqueue, | 2125 | queue_delayed_work(priv->workqueue, |
2125 | &priv->scan_event_later, | 2126 | &priv->scan_event_later, |
2126 | round_jiffies(msecs_to_jiffies(4000))); | 2127 | round_jiffies_relative(msecs_to_jiffies(4000))); |
2127 | } else { | 2128 | } else { |
2128 | priv->user_requested_scan = 0; | 2129 | priv->user_requested_scan = 0; |
2129 | cancel_delayed_work(&priv->scan_event_later); | 2130 | cancel_delayed_work(&priv->scan_event_later); |
@@ -4242,7 +4243,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4242 | priv->stop_rf_kill = 0; | 4243 | priv->stop_rf_kill = 0; |
4243 | cancel_delayed_work(&priv->rf_kill); | 4244 | cancel_delayed_work(&priv->rf_kill); |
4244 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 4245 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
4245 | round_jiffies(HZ)); | 4246 | round_jiffies_relative(HZ)); |
4246 | } else | 4247 | } else |
4247 | schedule_reset(priv); | 4248 | schedule_reset(priv); |
4248 | } | 4249 | } |
@@ -5981,7 +5982,7 @@ static void ipw2100_rf_kill(struct work_struct *work) | |||
5981 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); | 5982 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); |
5982 | if (!priv->stop_rf_kill) | 5983 | if (!priv->stop_rf_kill) |
5983 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 5984 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
5984 | round_jiffies(HZ)); | 5985 | round_jiffies_relative(HZ)); |
5985 | goto exit_unlock; | 5986 | goto exit_unlock; |
5986 | } | 5987 | } |
5987 | 5988 | ||
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index e3c828401b9a..54f44e5473c0 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1753,7 +1753,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1753 | /* Make sure the RF_KILL check timer is running */ | 1753 | /* Make sure the RF_KILL check timer is running */ |
1754 | cancel_delayed_work(&priv->rf_kill); | 1754 | cancel_delayed_work(&priv->rf_kill); |
1755 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1755 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
1756 | round_jiffies(2 * HZ)); | 1756 | round_jiffies_relative(2 * HZ)); |
1757 | } else | 1757 | } else |
1758 | queue_work(priv->workqueue, &priv->up); | 1758 | queue_work(priv->workqueue, &priv->up); |
1759 | } | 1759 | } |
@@ -4364,7 +4364,7 @@ static void handle_scan_event(struct ipw_priv *priv) | |||
4364 | if (!priv->user_requested_scan) { | 4364 | if (!priv->user_requested_scan) { |
4365 | if (!delayed_work_pending(&priv->scan_event)) | 4365 | if (!delayed_work_pending(&priv->scan_event)) |
4366 | queue_delayed_work(priv->workqueue, &priv->scan_event, | 4366 | queue_delayed_work(priv->workqueue, &priv->scan_event, |
4367 | round_jiffies(msecs_to_jiffies(4000))); | 4367 | round_jiffies_relative(msecs_to_jiffies(4000))); |
4368 | } else { | 4368 | } else { |
4369 | union iwreq_data wrqu; | 4369 | union iwreq_data wrqu; |
4370 | 4370 | ||
@@ -4728,7 +4728,7 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4728 | && priv->status & STATUS_ASSOCIATED) | 4728 | && priv->status & STATUS_ASSOCIATED) |
4729 | queue_delayed_work(priv->workqueue, | 4729 | queue_delayed_work(priv->workqueue, |
4730 | &priv->request_scan, | 4730 | &priv->request_scan, |
4731 | round_jiffies(HZ)); | 4731 | round_jiffies_relative(HZ)); |
4732 | 4732 | ||
4733 | /* Send an empty event to user space. | 4733 | /* Send an empty event to user space. |
4734 | * We don't send the received data on the event because | 4734 | * We don't send the received data on the event because |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 557deebca1b9..891f90d2f019 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -3232,9 +3232,7 @@ int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, | |||
3232 | tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, | 3232 | tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, |
3233 | rate_flags); | 3233 | rate_flags); |
3234 | 3234 | ||
3235 | if (ieee80211_is_probe_request(fc)) | 3235 | if (ieee80211_is_back_request(fc)) |
3236 | tx->tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
3237 | else if (ieee80211_is_back_request(fc)) | ||
3238 | tx->tx_flags |= TX_CMD_FLG_ACK_MSK | | 3236 | tx->tx_flags |= TX_CMD_FLG_ACK_MSK | |
3239 | TX_CMD_FLG_IMM_BA_RSP_MASK; | 3237 | TX_CMD_FLG_IMM_BA_RSP_MASK; |
3240 | #ifdef CONFIG_IWLWIFI_HT | 3238 | #ifdef CONFIG_IWLWIFI_HT |
@@ -3872,7 +3870,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv, | |||
3872 | */ | 3870 | */ |
3873 | case IEEE80211_STYPE_ASSOC_RESP: | 3871 | case IEEE80211_STYPE_ASSOC_RESP: |
3874 | case IEEE80211_STYPE_REASSOC_RESP: | 3872 | case IEEE80211_STYPE_REASSOC_RESP: |
3875 | if (network_packet && iwl_is_associated(priv)) { | 3873 | if (network_packet) { |
3876 | #ifdef CONFIG_IWLWIFI_HT | 3874 | #ifdef CONFIG_IWLWIFI_HT |
3877 | u8 *pos = NULL; | 3875 | u8 *pos = NULL; |
3878 | struct ieee802_11_elems elems; | 3876 | struct ieee802_11_elems elems; |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 83019d1d7ccc..4f22a7174caf 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -6478,8 +6478,9 @@ static void iwl_bg_scan_check(struct work_struct *data) | |||
6478 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | 6478 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, |
6479 | "Scan completion watchdog resetting adapter (%dms)\n", | 6479 | "Scan completion watchdog resetting adapter (%dms)\n", |
6480 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | 6480 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); |
6481 | |||
6481 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | 6482 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) |
6482 | queue_work(priv->workqueue, &priv->restart); | 6483 | iwl_send_scan_abort(priv); |
6483 | } | 6484 | } |
6484 | mutex_unlock(&priv->mutex); | 6485 | mutex_unlock(&priv->mutex); |
6485 | } | 6486 | } |
@@ -6575,7 +6576,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6575 | spin_unlock_irqrestore(&priv->lock, flags); | 6576 | spin_unlock_irqrestore(&priv->lock, flags); |
6576 | 6577 | ||
6577 | scan->suspend_time = 0; | 6578 | scan->suspend_time = 0; |
6578 | scan->max_out_time = cpu_to_le32(600 * 1024); | 6579 | scan->max_out_time = cpu_to_le32(200 * 1024); |
6579 | if (!interval) | 6580 | if (!interval) |
6580 | interval = suspend_time; | 6581 | interval = suspend_time; |
6581 | /* | 6582 | /* |
@@ -6605,7 +6606,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6605 | memcpy(scan->direct_scan[0].ssid, | 6606 | memcpy(scan->direct_scan[0].ssid, |
6606 | priv->direct_ssid, priv->direct_ssid_len); | 6607 | priv->direct_ssid, priv->direct_ssid_len); |
6607 | direct_mask = 1; | 6608 | direct_mask = 1; |
6608 | } else if (!iwl_is_associated(priv)) { | 6609 | } else if (!iwl_is_associated(priv) && priv->essid_len) { |
6609 | scan->direct_scan[0].id = WLAN_EID_SSID; | 6610 | scan->direct_scan[0].id = WLAN_EID_SSID; |
6610 | scan->direct_scan[0].len = priv->essid_len; | 6611 | scan->direct_scan[0].len = priv->essid_len; |
6611 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | 6612 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); |
@@ -6744,6 +6745,12 @@ static void iwl_bg_post_associate(struct work_struct *data) | |||
6744 | 6745 | ||
6745 | mutex_lock(&priv->mutex); | 6746 | mutex_lock(&priv->mutex); |
6746 | 6747 | ||
6748 | if (!priv->interface_id || !priv->is_open) { | ||
6749 | mutex_unlock(&priv->mutex); | ||
6750 | return; | ||
6751 | } | ||
6752 | iwl_scan_cancel_timeout(priv, 200); | ||
6753 | |||
6747 | conf = ieee80211_get_hw_conf(priv->hw); | 6754 | conf = ieee80211_get_hw_conf(priv->hw); |
6748 | 6755 | ||
6749 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 6756 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
@@ -6882,9 +6889,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) | |||
6882 | struct iwl_priv *priv = hw->priv; | 6889 | struct iwl_priv *priv = hw->priv; |
6883 | 6890 | ||
6884 | IWL_DEBUG_MAC80211("enter\n"); | 6891 | IWL_DEBUG_MAC80211("enter\n"); |
6892 | |||
6893 | |||
6894 | mutex_lock(&priv->mutex); | ||
6895 | /* stop mac, cancel any scan request and clear | ||
6896 | * RXON_FILTER_ASSOC_MSK BIT | ||
6897 | */ | ||
6885 | priv->is_open = 0; | 6898 | priv->is_open = 0; |
6886 | /*netif_stop_queue(dev); */ | 6899 | iwl_scan_cancel_timeout(priv, 100); |
6887 | flush_workqueue(priv->workqueue); | 6900 | cancel_delayed_work(&priv->post_associate); |
6901 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
6902 | iwl_commit_rxon(priv); | ||
6903 | mutex_unlock(&priv->mutex); | ||
6904 | |||
6888 | IWL_DEBUG_MAC80211("leave\n"); | 6905 | IWL_DEBUG_MAC80211("leave\n"); |
6889 | } | 6906 | } |
6890 | 6907 | ||
@@ -7169,8 +7186,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7169 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 7186 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) |
7170 | iwl_config_ap(priv); | 7187 | iwl_config_ap(priv); |
7171 | else { | 7188 | else { |
7172 | priv->staging_rxon.filter_flags |= | ||
7173 | RXON_FILTER_ASSOC_MSK; | ||
7174 | rc = iwl_commit_rxon(priv); | 7189 | rc = iwl_commit_rxon(priv); |
7175 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | 7190 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) |
7176 | iwl_add_station(priv, | 7191 | iwl_add_station(priv, |
@@ -7178,6 +7193,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7178 | } | 7193 | } |
7179 | 7194 | ||
7180 | } else { | 7195 | } else { |
7196 | iwl_scan_cancel_timeout(priv, 100); | ||
7181 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7197 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
7182 | iwl_commit_rxon(priv); | 7198 | iwl_commit_rxon(priv); |
7183 | } | 7199 | } |
@@ -7217,6 +7233,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | |||
7217 | IWL_DEBUG_MAC80211("enter\n"); | 7233 | IWL_DEBUG_MAC80211("enter\n"); |
7218 | 7234 | ||
7219 | mutex_lock(&priv->mutex); | 7235 | mutex_lock(&priv->mutex); |
7236 | |||
7237 | iwl_scan_cancel_timeout(priv, 100); | ||
7238 | cancel_delayed_work(&priv->post_associate); | ||
7239 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7240 | iwl_commit_rxon(priv); | ||
7241 | |||
7220 | if (priv->interface_id == conf->if_id) { | 7242 | if (priv->interface_id == conf->if_id) { |
7221 | priv->interface_id = 0; | 7243 | priv->interface_id = 0; |
7222 | memset(priv->bssid, 0, ETH_ALEN); | 7244 | memset(priv->bssid, 0, ETH_ALEN); |
@@ -7238,6 +7260,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7238 | 7260 | ||
7239 | IWL_DEBUG_MAC80211("enter\n"); | 7261 | IWL_DEBUG_MAC80211("enter\n"); |
7240 | 7262 | ||
7263 | mutex_lock(&priv->mutex); | ||
7241 | spin_lock_irqsave(&priv->lock, flags); | 7264 | spin_lock_irqsave(&priv->lock, flags); |
7242 | 7265 | ||
7243 | if (!iwl_is_ready_rf(priv)) { | 7266 | if (!iwl_is_ready_rf(priv)) { |
@@ -7268,7 +7291,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7268 | priv->direct_ssid_len = (u8) | 7291 | priv->direct_ssid_len = (u8) |
7269 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | 7292 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); |
7270 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | 7293 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); |
7271 | } | 7294 | } else |
7295 | priv->one_direct_scan = 0; | ||
7272 | 7296 | ||
7273 | rc = iwl_scan_initiate(priv); | 7297 | rc = iwl_scan_initiate(priv); |
7274 | 7298 | ||
@@ -7276,6 +7300,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7276 | 7300 | ||
7277 | out_unlock: | 7301 | out_unlock: |
7278 | spin_unlock_irqrestore(&priv->lock, flags); | 7302 | spin_unlock_irqrestore(&priv->lock, flags); |
7303 | mutex_unlock(&priv->mutex); | ||
7279 | 7304 | ||
7280 | return rc; | 7305 | return rc; |
7281 | } | 7306 | } |
@@ -7310,6 +7335,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
7310 | 7335 | ||
7311 | mutex_lock(&priv->mutex); | 7336 | mutex_lock(&priv->mutex); |
7312 | 7337 | ||
7338 | iwl_scan_cancel_timeout(priv, 100); | ||
7339 | |||
7313 | switch (cmd) { | 7340 | switch (cmd) { |
7314 | case SET_KEY: | 7341 | case SET_KEY: |
7315 | rc = iwl_update_sta_key_info(priv, key, sta_id); | 7342 | rc = iwl_update_sta_key_info(priv, key, sta_id); |
@@ -7479,8 +7506,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | |||
7479 | 7506 | ||
7480 | spin_unlock_irqrestore(&priv->lock, flags); | 7507 | spin_unlock_irqrestore(&priv->lock, flags); |
7481 | 7508 | ||
7509 | /* we are restarting association process | ||
7510 | * clear RXON_FILTER_ASSOC_MSK bit | ||
7511 | */ | ||
7512 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
7513 | iwl_scan_cancel_timeout(priv, 100); | ||
7514 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7515 | iwl_commit_rxon(priv); | ||
7516 | } | ||
7517 | |||
7482 | /* Per mac80211.h: This is only used in IBSS mode... */ | 7518 | /* Per mac80211.h: This is only used in IBSS mode... */ |
7483 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | 7519 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { |
7520 | |||
7484 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | 7521 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); |
7485 | mutex_unlock(&priv->mutex); | 7522 | mutex_unlock(&priv->mutex); |
7486 | return; | 7523 | return; |
@@ -8558,6 +8595,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) | |||
8558 | iwl_rate_control_unregister(priv->hw); | 8595 | iwl_rate_control_unregister(priv->hw); |
8559 | } | 8596 | } |
8560 | 8597 | ||
8598 | /*netif_stop_queue(dev); */ | ||
8599 | flush_workqueue(priv->workqueue); | ||
8600 | |||
8561 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | 8601 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes |
8562 | * priv->workqueue... so we can't take down the workqueue | 8602 | * priv->workqueue... so we can't take down the workqueue |
8563 | * until now... */ | 8603 | * until now... */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index 5e1279263b22..d60adcb9bd4a 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c | |||
@@ -6845,8 +6845,9 @@ static void iwl_bg_scan_check(struct work_struct *data) | |||
6845 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | 6845 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, |
6846 | "Scan completion watchdog resetting adapter (%dms)\n", | 6846 | "Scan completion watchdog resetting adapter (%dms)\n", |
6847 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | 6847 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); |
6848 | |||
6848 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | 6849 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) |
6849 | queue_work(priv->workqueue, &priv->restart); | 6850 | iwl_send_scan_abort(priv); |
6850 | } | 6851 | } |
6851 | mutex_unlock(&priv->mutex); | 6852 | mutex_unlock(&priv->mutex); |
6852 | } | 6853 | } |
@@ -6942,7 +6943,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6942 | spin_unlock_irqrestore(&priv->lock, flags); | 6943 | spin_unlock_irqrestore(&priv->lock, flags); |
6943 | 6944 | ||
6944 | scan->suspend_time = 0; | 6945 | scan->suspend_time = 0; |
6945 | scan->max_out_time = cpu_to_le32(600 * 1024); | 6946 | scan->max_out_time = cpu_to_le32(200 * 1024); |
6946 | if (!interval) | 6947 | if (!interval) |
6947 | interval = suspend_time; | 6948 | interval = suspend_time; |
6948 | 6949 | ||
@@ -6965,7 +6966,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6965 | memcpy(scan->direct_scan[0].ssid, | 6966 | memcpy(scan->direct_scan[0].ssid, |
6966 | priv->direct_ssid, priv->direct_ssid_len); | 6967 | priv->direct_ssid, priv->direct_ssid_len); |
6967 | direct_mask = 1; | 6968 | direct_mask = 1; |
6968 | } else if (!iwl_is_associated(priv)) { | 6969 | } else if (!iwl_is_associated(priv) && priv->essid_len) { |
6969 | scan->direct_scan[0].id = WLAN_EID_SSID; | 6970 | scan->direct_scan[0].id = WLAN_EID_SSID; |
6970 | scan->direct_scan[0].len = priv->essid_len; | 6971 | scan->direct_scan[0].len = priv->essid_len; |
6971 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | 6972 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); |
@@ -7118,6 +7119,12 @@ static void iwl_bg_post_associate(struct work_struct *data) | |||
7118 | 7119 | ||
7119 | mutex_lock(&priv->mutex); | 7120 | mutex_lock(&priv->mutex); |
7120 | 7121 | ||
7122 | if (!priv->interface_id || !priv->is_open) { | ||
7123 | mutex_unlock(&priv->mutex); | ||
7124 | return; | ||
7125 | } | ||
7126 | iwl_scan_cancel_timeout(priv, 200); | ||
7127 | |||
7121 | conf = ieee80211_get_hw_conf(priv->hw); | 7128 | conf = ieee80211_get_hw_conf(priv->hw); |
7122 | 7129 | ||
7123 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7130 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
@@ -7271,9 +7278,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) | |||
7271 | struct iwl_priv *priv = hw->priv; | 7278 | struct iwl_priv *priv = hw->priv; |
7272 | 7279 | ||
7273 | IWL_DEBUG_MAC80211("enter\n"); | 7280 | IWL_DEBUG_MAC80211("enter\n"); |
7281 | |||
7282 | |||
7283 | mutex_lock(&priv->mutex); | ||
7284 | /* stop mac, cancel any scan request and clear | ||
7285 | * RXON_FILTER_ASSOC_MSK BIT | ||
7286 | */ | ||
7274 | priv->is_open = 0; | 7287 | priv->is_open = 0; |
7275 | /*netif_stop_queue(dev); */ | 7288 | iwl_scan_cancel_timeout(priv, 100); |
7276 | flush_workqueue(priv->workqueue); | 7289 | cancel_delayed_work(&priv->post_associate); |
7290 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7291 | iwl_commit_rxon(priv); | ||
7292 | mutex_unlock(&priv->mutex); | ||
7293 | |||
7277 | IWL_DEBUG_MAC80211("leave\n"); | 7294 | IWL_DEBUG_MAC80211("leave\n"); |
7278 | } | 7295 | } |
7279 | 7296 | ||
@@ -7573,8 +7590,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7573 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 7590 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) |
7574 | iwl_config_ap(priv); | 7591 | iwl_config_ap(priv); |
7575 | else { | 7592 | else { |
7576 | priv->staging_rxon.filter_flags |= | ||
7577 | RXON_FILTER_ASSOC_MSK; | ||
7578 | rc = iwl_commit_rxon(priv); | 7593 | rc = iwl_commit_rxon(priv); |
7579 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | 7594 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) |
7580 | iwl_rxon_add_station( | 7595 | iwl_rxon_add_station( |
@@ -7582,6 +7597,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7582 | } | 7597 | } |
7583 | 7598 | ||
7584 | } else { | 7599 | } else { |
7600 | iwl_scan_cancel_timeout(priv, 100); | ||
7585 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7601 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
7586 | iwl_commit_rxon(priv); | 7602 | iwl_commit_rxon(priv); |
7587 | } | 7603 | } |
@@ -7621,6 +7637,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | |||
7621 | IWL_DEBUG_MAC80211("enter\n"); | 7637 | IWL_DEBUG_MAC80211("enter\n"); |
7622 | 7638 | ||
7623 | mutex_lock(&priv->mutex); | 7639 | mutex_lock(&priv->mutex); |
7640 | |||
7641 | iwl_scan_cancel_timeout(priv, 100); | ||
7642 | cancel_delayed_work(&priv->post_associate); | ||
7643 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7644 | iwl_commit_rxon(priv); | ||
7645 | |||
7624 | if (priv->interface_id == conf->if_id) { | 7646 | if (priv->interface_id == conf->if_id) { |
7625 | priv->interface_id = 0; | 7647 | priv->interface_id = 0; |
7626 | memset(priv->bssid, 0, ETH_ALEN); | 7648 | memset(priv->bssid, 0, ETH_ALEN); |
@@ -7642,6 +7664,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7642 | 7664 | ||
7643 | IWL_DEBUG_MAC80211("enter\n"); | 7665 | IWL_DEBUG_MAC80211("enter\n"); |
7644 | 7666 | ||
7667 | mutex_lock(&priv->mutex); | ||
7645 | spin_lock_irqsave(&priv->lock, flags); | 7668 | spin_lock_irqsave(&priv->lock, flags); |
7646 | 7669 | ||
7647 | if (!iwl_is_ready_rf(priv)) { | 7670 | if (!iwl_is_ready_rf(priv)) { |
@@ -7672,7 +7695,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7672 | priv->direct_ssid_len = (u8) | 7695 | priv->direct_ssid_len = (u8) |
7673 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | 7696 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); |
7674 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | 7697 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); |
7675 | } | 7698 | } else |
7699 | priv->one_direct_scan = 0; | ||
7676 | 7700 | ||
7677 | rc = iwl_scan_initiate(priv); | 7701 | rc = iwl_scan_initiate(priv); |
7678 | 7702 | ||
@@ -7680,6 +7704,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7680 | 7704 | ||
7681 | out_unlock: | 7705 | out_unlock: |
7682 | spin_unlock_irqrestore(&priv->lock, flags); | 7706 | spin_unlock_irqrestore(&priv->lock, flags); |
7707 | mutex_unlock(&priv->mutex); | ||
7683 | 7708 | ||
7684 | return rc; | 7709 | return rc; |
7685 | } | 7710 | } |
@@ -7713,6 +7738,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
7713 | 7738 | ||
7714 | mutex_lock(&priv->mutex); | 7739 | mutex_lock(&priv->mutex); |
7715 | 7740 | ||
7741 | iwl_scan_cancel_timeout(priv, 100); | ||
7742 | |||
7716 | switch (cmd) { | 7743 | switch (cmd) { |
7717 | case SET_KEY: | 7744 | case SET_KEY: |
7718 | rc = iwl_update_sta_key_info(priv, key, sta_id); | 7745 | rc = iwl_update_sta_key_info(priv, key, sta_id); |
@@ -7903,8 +7930,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | |||
7903 | 7930 | ||
7904 | spin_unlock_irqrestore(&priv->lock, flags); | 7931 | spin_unlock_irqrestore(&priv->lock, flags); |
7905 | 7932 | ||
7933 | /* we are restarting association process | ||
7934 | * clear RXON_FILTER_ASSOC_MSK bit | ||
7935 | */ | ||
7936 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
7937 | iwl_scan_cancel_timeout(priv, 100); | ||
7938 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7939 | iwl_commit_rxon(priv); | ||
7940 | } | ||
7941 | |||
7906 | /* Per mac80211.h: This is only used in IBSS mode... */ | 7942 | /* Per mac80211.h: This is only used in IBSS mode... */ |
7907 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | 7943 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { |
7944 | |||
7908 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | 7945 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); |
7909 | mutex_unlock(&priv->mutex); | 7946 | mutex_unlock(&priv->mutex); |
7910 | return; | 7947 | return; |
@@ -9152,6 +9189,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) | |||
9152 | iwl_rate_control_unregister(priv->hw); | 9189 | iwl_rate_control_unregister(priv->hw); |
9153 | } | 9190 | } |
9154 | 9191 | ||
9192 | /*netif_stop_queue(dev); */ | ||
9193 | flush_workqueue(priv->workqueue); | ||
9194 | |||
9155 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | 9195 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes |
9156 | * priv->workqueue... so we can't take down the workqueue | 9196 | * priv->workqueue... so we can't take down the workqueue |
9157 | * until now... */ | 9197 | * until now... */ |
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 298faa9d3f61..06d9bc0015c0 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h | |||
@@ -30,7 +30,7 @@ | |||
30 | * Interval defines | 30 | * Interval defines |
31 | * Both the link tuner as the rfkill will be called once per second. | 31 | * Both the link tuner as the rfkill will be called once per second. |
32 | */ | 32 | */ |
33 | #define LINK_TUNE_INTERVAL ( round_jiffies(HZ) ) | 33 | #define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) ) |
34 | #define RFKILL_POLL_INTERVAL ( 1000 ) | 34 | #define RFKILL_POLL_INTERVAL ( 1000 ) |
35 | 35 | ||
36 | /* | 36 | /* |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index de61c8fe6492..e454ae83e97a 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -433,6 +433,9 @@ static int rtl8187_start(struct ieee80211_hw *dev) | |||
433 | 433 | ||
434 | rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); | 434 | rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); |
435 | 435 | ||
436 | rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); | ||
437 | rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); | ||
438 | |||
436 | rtl8187_init_urbs(dev); | 439 | rtl8187_init_urbs(dev); |
437 | 440 | ||
438 | reg = RTL818X_RX_CONF_ONLYERLPKT | | 441 | reg = RTL818X_RX_CONF_ONLYERLPKT | |
@@ -582,32 +585,31 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, | |||
582 | static void rtl8187_configure_filter(struct ieee80211_hw *dev, | 585 | static void rtl8187_configure_filter(struct ieee80211_hw *dev, |
583 | unsigned int changed_flags, | 586 | unsigned int changed_flags, |
584 | unsigned int *total_flags, | 587 | unsigned int *total_flags, |
585 | int mc_count, struct dev_addr_list *mc_list) | 588 | int mc_count, struct dev_addr_list *mclist) |
586 | { | 589 | { |
587 | struct rtl8187_priv *priv = dev->priv; | 590 | struct rtl8187_priv *priv = dev->priv; |
588 | 591 | ||
589 | *total_flags = 0; | ||
590 | |||
591 | if (changed_flags & FIF_ALLMULTI) | ||
592 | priv->rx_conf ^= RTL818X_RX_CONF_MULTICAST; | ||
593 | if (changed_flags & FIF_FCSFAIL) | 592 | if (changed_flags & FIF_FCSFAIL) |
594 | priv->rx_conf ^= RTL818X_RX_CONF_FCS; | 593 | priv->rx_conf ^= RTL818X_RX_CONF_FCS; |
595 | if (changed_flags & FIF_CONTROL) | 594 | if (changed_flags & FIF_CONTROL) |
596 | priv->rx_conf ^= RTL818X_RX_CONF_CTRL; | 595 | priv->rx_conf ^= RTL818X_RX_CONF_CTRL; |
597 | if (changed_flags & FIF_OTHER_BSS) | 596 | if (changed_flags & FIF_OTHER_BSS) |
598 | priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; | 597 | priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; |
599 | 598 | if (*total_flags & FIF_ALLMULTI || mc_count > 0) | |
600 | if (mc_count > 0) | ||
601 | priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; | 599 | priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; |
600 | else | ||
601 | priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST; | ||
602 | |||
603 | *total_flags = 0; | ||
602 | 604 | ||
603 | if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) | ||
604 | *total_flags |= FIF_ALLMULTI; | ||
605 | if (priv->rx_conf & RTL818X_RX_CONF_FCS) | 605 | if (priv->rx_conf & RTL818X_RX_CONF_FCS) |
606 | *total_flags |= FIF_FCSFAIL; | 606 | *total_flags |= FIF_FCSFAIL; |
607 | if (priv->rx_conf & RTL818X_RX_CONF_CTRL) | 607 | if (priv->rx_conf & RTL818X_RX_CONF_CTRL) |
608 | *total_flags |= FIF_CONTROL; | 608 | *total_flags |= FIF_CONTROL; |
609 | if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) | 609 | if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) |
610 | *total_flags |= FIF_OTHER_BSS; | 610 | *total_flags |= FIF_OTHER_BSS; |
611 | if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) | ||
612 | *total_flags |= FIF_ALLMULTI; | ||
611 | 613 | ||
612 | rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); | 614 | rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); |
613 | } | 615 | } |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 0c4ab3b07274..9b35259eecfa 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -745,7 +745,7 @@ static char *fault_reason_strings[] = | |||
745 | "non-zero reserved fields in PTE", | 745 | "non-zero reserved fields in PTE", |
746 | "Unknown" | 746 | "Unknown" |
747 | }; | 747 | }; |
748 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) | 748 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1 |
749 | 749 | ||
750 | char *dmar_get_fault_reason(u8 fault_reason) | 750 | char *dmar_get_fault_reason(u8 fault_reason) |
751 | { | 751 | { |
@@ -995,7 +995,6 @@ static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd) | |||
995 | return iommu; | 995 | return iommu; |
996 | error_unmap: | 996 | error_unmap: |
997 | iounmap(iommu->reg); | 997 | iounmap(iommu->reg); |
998 | iommu->reg = 0; | ||
999 | error: | 998 | error: |
1000 | kfree(iommu); | 999 | kfree(iommu); |
1001 | return NULL; | 1000 | return NULL; |
@@ -1808,7 +1807,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1808 | if (!domain) { | 1807 | if (!domain) { |
1809 | printk(KERN_ERR | 1808 | printk(KERN_ERR |
1810 | "Allocating domain for %s failed", pci_name(pdev)); | 1809 | "Allocating domain for %s failed", pci_name(pdev)); |
1811 | return 0; | 1810 | return NULL; |
1812 | } | 1811 | } |
1813 | 1812 | ||
1814 | /* make sure context mapping is ok */ | 1813 | /* make sure context mapping is ok */ |
@@ -1818,7 +1817,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1818 | printk(KERN_ERR | 1817 | printk(KERN_ERR |
1819 | "Domain context map for %s failed", | 1818 | "Domain context map for %s failed", |
1820 | pci_name(pdev)); | 1819 | pci_name(pdev)); |
1821 | return 0; | 1820 | return NULL; |
1822 | } | 1821 | } |
1823 | } | 1822 | } |
1824 | 1823 | ||
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index ee88dd2400cb..459ad1f9dc54 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -58,7 +58,7 @@ | |||
58 | hi = readl(dmar + reg + 4); \ | 58 | hi = readl(dmar + reg + 4); \ |
59 | (((u64) hi) << 32) + lo; }) | 59 | (((u64) hi) << 32) + lo; }) |
60 | */ | 60 | */ |
61 | static inline u64 dmar_readq(void *addr) | 61 | static inline u64 dmar_readq(void __iomem *addr) |
62 | { | 62 | { |
63 | u32 lo, hi; | 63 | u32 lo, hi; |
64 | lo = readl(addr); | 64 | lo = readl(addr); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index fd5d0c1570df..00118499018b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -562,8 +562,6 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | |||
562 | sg_init_table(sg_list->sg, sg_list->count); | 562 | sg_init_table(sg_list->sg, sg_list->count); |
563 | 563 | ||
564 | for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) { | 564 | for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) { |
565 | sg->length = min(size, PAGE_SIZE); | ||
566 | sg->offset = 0; | ||
567 | address = (void *) get_zeroed_page(GFP_KERNEL); | 565 | address = (void *) get_zeroed_page(GFP_KERNEL); |
568 | if (address == NULL) { | 566 | if (address == NULL) { |
569 | sg_list->count = i; | 567 | sg_list->count = i; |
@@ -571,7 +569,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | |||
571 | retval = -ENOMEM; | 569 | retval = -ENOMEM; |
572 | goto out; | 570 | goto out; |
573 | } | 571 | } |
574 | zfcp_address_to_sg(address, sg); | 572 | zfcp_address_to_sg(address, sg, min(size, PAGE_SIZE)); |
575 | size -= sg->length; | 573 | size -= sg->length; |
576 | } | 574 | } |
577 | 575 | ||
@@ -1518,13 +1516,13 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool) | |||
1518 | return -ENOMEM; | 1516 | return -ENOMEM; |
1519 | 1517 | ||
1520 | memset(data, 0, sizeof(*data)); | 1518 | memset(data, 0, sizeof(*data)); |
1519 | sg_init_table(&data->req , 1); | ||
1520 | sg_init_table(&data->resp , 1); | ||
1521 | data->ct.req = &data->req; | 1521 | data->ct.req = &data->req; |
1522 | data->ct.resp = &data->resp; | 1522 | data->ct.resp = &data->resp; |
1523 | data->ct.req_count = data->ct.resp_count = 1; | 1523 | data->ct.req_count = data->ct.resp_count = 1; |
1524 | zfcp_address_to_sg(&data->ct_iu_req, &data->req); | 1524 | zfcp_address_to_sg(&data->ct_iu_req, &data->req, sizeof(struct ct_iu_gid_pn_req)); |
1525 | zfcp_address_to_sg(&data->ct_iu_resp, &data->resp); | 1525 | zfcp_address_to_sg(&data->ct_iu_resp, &data->resp, sizeof(struct ct_iu_gid_pn_resp)); |
1526 | data->req.length = sizeof(struct ct_iu_gid_pn_req); | ||
1527 | data->resp.length = sizeof(struct ct_iu_gid_pn_resp); | ||
1528 | 1526 | ||
1529 | *gid_pn = data; | 1527 | *gid_pn = data; |
1530 | return 0; | 1528 | return 0; |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 326e7ee232cb..e268f79bdbd2 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -70,12 +70,12 @@ zfcp_sg_to_address(struct scatterlist *list) | |||
70 | * zfcp_address_to_sg - set up struct scatterlist from kernel address | 70 | * zfcp_address_to_sg - set up struct scatterlist from kernel address |
71 | * @address: kernel address | 71 | * @address: kernel address |
72 | * @list: struct scatterlist | 72 | * @list: struct scatterlist |
73 | * @size: buffer size | ||
73 | */ | 74 | */ |
74 | static inline void | 75 | static inline void |
75 | zfcp_address_to_sg(void *address, struct scatterlist *list) | 76 | zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size) |
76 | { | 77 | { |
77 | sg_set_page(list, virt_to_page(address)); | 78 | sg_set_buf(list, address, size); |
78 | list->offset = ((unsigned long) address) & (PAGE_SIZE - 1); | ||
79 | } | 79 | } |
80 | 80 | ||
81 | #define REQUEST_LIST_SIZE 128 | 81 | #define REQUEST_LIST_SIZE 128 |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 9438d0b28799..5552b755c08a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -322,9 +322,9 @@ zfcp_erp_adisc(struct zfcp_port *port) | |||
322 | if (address == NULL) | 322 | if (address == NULL) |
323 | goto nomem; | 323 | goto nomem; |
324 | 324 | ||
325 | zfcp_address_to_sg(address, send_els->req); | 325 | zfcp_address_to_sg(address, send_els->req, sizeof(struct zfcp_ls_adisc)); |
326 | address += PAGE_SIZE >> 1; | 326 | address += PAGE_SIZE >> 1; |
327 | zfcp_address_to_sg(address, send_els->resp); | 327 | zfcp_address_to_sg(address, send_els->resp, sizeof(struct zfcp_ls_adisc_acc)); |
328 | send_els->req_count = send_els->resp_count = 1; | 328 | send_els->req_count = send_els->resp_count = 1; |
329 | 329 | ||
330 | send_els->adapter = adapter; | 330 | send_els->adapter = adapter; |
@@ -336,9 +336,6 @@ zfcp_erp_adisc(struct zfcp_port *port) | |||
336 | adisc = zfcp_sg_to_address(send_els->req); | 336 | adisc = zfcp_sg_to_address(send_els->req); |
337 | send_els->ls_code = adisc->code = ZFCP_LS_ADISC; | 337 | send_els->ls_code = adisc->code = ZFCP_LS_ADISC; |
338 | 338 | ||
339 | send_els->req->length = sizeof(struct zfcp_ls_adisc); | ||
340 | send_els->resp->length = sizeof(struct zfcp_ls_adisc_acc); | ||
341 | |||
342 | /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports | 339 | /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports |
343 | without FC-AL-2 capability, so we don't set it */ | 340 | without FC-AL-2 capability, so we don't set it */ |
344 | adisc->wwpn = fc_host_port_name(adapter->scsi_host); | 341 | adisc->wwpn = fc_host_port_name(adapter->scsi_host); |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 72b0393b4596..1e6d7a9c75bf 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -391,7 +391,7 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) | |||
391 | /* | 391 | /* |
392 | * Extract the fibctx from the input parameters | 392 | * Extract the fibctx from the input parameters |
393 | */ | 393 | */ |
394 | if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */ | 394 | if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ |
395 | break; | 395 | break; |
396 | entry = entry->next; | 396 | entry = entry->next; |
397 | fibctx = NULL; | 397 | fibctx = NULL; |
@@ -590,7 +590,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
590 | } | 590 | } |
591 | addr = (u64)upsg->sg[i].addr[0]; | 591 | addr = (u64)upsg->sg[i].addr[0]; |
592 | addr += ((u64)upsg->sg[i].addr[1]) << 32; | 592 | addr += ((u64)upsg->sg[i].addr[1]) << 32; |
593 | sg_user[i] = (void __user *)(ptrdiff_t)addr; | 593 | sg_user[i] = (void __user *)(uintptr_t)addr; |
594 | sg_list[i] = p; // save so we can clean up later | 594 | sg_list[i] = p; // save so we can clean up later |
595 | sg_indx = i; | 595 | sg_indx = i; |
596 | 596 | ||
@@ -633,7 +633,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
633 | rcode = -ENOMEM; | 633 | rcode = -ENOMEM; |
634 | goto cleanup; | 634 | goto cleanup; |
635 | } | 635 | } |
636 | sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr; | 636 | sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; |
637 | sg_list[i] = p; // save so we can clean up later | 637 | sg_list[i] = p; // save so we can clean up later |
638 | sg_indx = i; | 638 | sg_indx = i; |
639 | 639 | ||
@@ -664,7 +664,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
664 | if (actual_fibsize64 == fibsize) { | 664 | if (actual_fibsize64 == fibsize) { |
665 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; | 665 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; |
666 | for (i = 0; i < upsg->count; i++) { | 666 | for (i = 0; i < upsg->count; i++) { |
667 | u64 addr; | 667 | uintptr_t addr; |
668 | void* p; | 668 | void* p; |
669 | /* Does this really need to be GFP_DMA? */ | 669 | /* Does this really need to be GFP_DMA? */ |
670 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); | 670 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); |
@@ -676,7 +676,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
676 | } | 676 | } |
677 | addr = (u64)usg->sg[i].addr[0]; | 677 | addr = (u64)usg->sg[i].addr[0]; |
678 | addr += ((u64)usg->sg[i].addr[1]) << 32; | 678 | addr += ((u64)usg->sg[i].addr[1]) << 32; |
679 | sg_user[i] = (void __user *)(ptrdiff_t)addr; | 679 | sg_user[i] = (void __user *)addr; |
680 | sg_list[i] = p; // save so we can clean up later | 680 | sg_list[i] = p; // save so we can clean up later |
681 | sg_indx = i; | 681 | sg_indx = i; |
682 | 682 | ||
@@ -704,7 +704,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
704 | rcode = -ENOMEM; | 704 | rcode = -ENOMEM; |
705 | goto cleanup; | 705 | goto cleanup; |
706 | } | 706 | } |
707 | sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr; | 707 | sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; |
708 | sg_list[i] = p; // save so we can clean up later | 708 | sg_list[i] = p; // save so we can clean up later |
709 | sg_indx = i; | 709 | sg_indx = i; |
710 | 710 | ||
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 3009ad8c4073..8736813a0296 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
110 | /* | 110 | /* |
111 | * Align the beginning of Headers to commalign | 111 | * Align the beginning of Headers to commalign |
112 | */ | 112 | */ |
113 | align = (commalign - ((ptrdiff_t)(base) & (commalign - 1))); | 113 | align = (commalign - ((uintptr_t)(base) & (commalign - 1))); |
114 | base = base + align; | 114 | base = base + align; |
115 | phys = phys + align; | 115 | phys = phys + align; |
116 | /* | 116 | /* |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index fcd25f7d0bc6..e6032ffc66a6 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -254,7 +254,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) | |||
254 | kfree (fib); | 254 | kfree (fib); |
255 | return 1; | 255 | return 1; |
256 | } | 256 | } |
257 | memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) + | 257 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + |
258 | (index & ~0x00000002L)), sizeof(struct hw_fib)); | 258 | (index & ~0x00000002L)), sizeof(struct hw_fib)); |
259 | INIT_LIST_HEAD(&fib->fiblink); | 259 | INIT_LIST_HEAD(&fib->fiblink); |
260 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 260 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ace7a15b413e..a67e29f83ae5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -141,14 +141,14 @@ struct CMD_MESSAGE_FIELD | |||
141 | #define IS_SG64_ADDR 0x01000000 /* bit24 */ | 141 | #define IS_SG64_ADDR 0x01000000 /* bit24 */ |
142 | struct SG32ENTRY | 142 | struct SG32ENTRY |
143 | { | 143 | { |
144 | uint32_t length; | 144 | __le32 length; |
145 | uint32_t address; | 145 | __le32 address; |
146 | }; | 146 | }; |
147 | struct SG64ENTRY | 147 | struct SG64ENTRY |
148 | { | 148 | { |
149 | uint32_t length; | 149 | __le32 length; |
150 | uint32_t address; | 150 | __le32 address; |
151 | uint32_t addresshigh; | 151 | __le32 addresshigh; |
152 | }; | 152 | }; |
153 | struct SGENTRY_UNION | 153 | struct SGENTRY_UNION |
154 | { | 154 | { |
@@ -339,23 +339,15 @@ struct MessageUnit_B | |||
339 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; | 339 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; |
340 | uint32_t postq_index; | 340 | uint32_t postq_index; |
341 | uint32_t doneq_index; | 341 | uint32_t doneq_index; |
342 | uint32_t *drv2iop_doorbell_reg; | 342 | uint32_t __iomem *drv2iop_doorbell_reg; |
343 | uint32_t *drv2iop_doorbell_mask_reg; | 343 | uint32_t __iomem *drv2iop_doorbell_mask_reg; |
344 | uint32_t *iop2drv_doorbell_reg; | 344 | uint32_t __iomem *iop2drv_doorbell_reg; |
345 | uint32_t *iop2drv_doorbell_mask_reg; | 345 | uint32_t __iomem *iop2drv_doorbell_mask_reg; |
346 | uint32_t *msgcode_rwbuffer_reg; | 346 | uint32_t __iomem *msgcode_rwbuffer_reg; |
347 | uint32_t *ioctl_wbuffer_reg; | 347 | uint32_t __iomem *ioctl_wbuffer_reg; |
348 | uint32_t *ioctl_rbuffer_reg; | 348 | uint32_t __iomem *ioctl_rbuffer_reg; |
349 | }; | 349 | }; |
350 | 350 | ||
351 | struct MessageUnit | ||
352 | { | ||
353 | union | ||
354 | { | ||
355 | struct MessageUnit_A pmu_A; | ||
356 | struct MessageUnit_B pmu_B; | ||
357 | } u; | ||
358 | }; | ||
359 | /* | 351 | /* |
360 | ******************************************************************************* | 352 | ******************************************************************************* |
361 | ** Adapter Control Block | 353 | ** Adapter Control Block |
@@ -374,7 +366,10 @@ struct AdapterControlBlock | |||
374 | /* Offset is used in making arc cdb physical to virtual calculations */ | 366 | /* Offset is used in making arc cdb physical to virtual calculations */ |
375 | uint32_t outbound_int_enable; | 367 | uint32_t outbound_int_enable; |
376 | 368 | ||
377 | struct MessageUnit * pmu; | 369 | union { |
370 | struct MessageUnit_A __iomem * pmuA; | ||
371 | struct MessageUnit_B * pmuB; | ||
372 | }; | ||
378 | /* message unit ATU inbound base address0 */ | 373 | /* message unit ATU inbound base address0 */ |
379 | 374 | ||
380 | uint32_t acb_flags; | 375 | uint32_t acb_flags; |
@@ -558,7 +553,7 @@ struct SENSE_DATA | |||
558 | 553 | ||
559 | extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); | 554 | extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); |
560 | extern void arcmsr_iop_message_read(struct AdapterControlBlock *); | 555 | extern void arcmsr_iop_message_read(struct AdapterControlBlock *); |
561 | extern struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); | 556 | extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); |
562 | extern struct class_device_attribute *arcmsr_host_attrs[]; | 557 | extern struct class_device_attribute *arcmsr_host_attrs[]; |
563 | extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); | 558 | extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); |
564 | void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); | 559 | void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); |
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index d04d1aa28fa4..7d7b0a554276 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c | |||
@@ -85,13 +85,13 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj, | |||
85 | allxfer_len++; | 85 | allxfer_len++; |
86 | } | 86 | } |
87 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 87 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
88 | struct QBUFFER *prbuffer; | 88 | struct QBUFFER __iomem *prbuffer; |
89 | uint8_t *iop_data; | 89 | uint8_t __iomem *iop_data; |
90 | int32_t iop_len; | 90 | int32_t iop_len; |
91 | 91 | ||
92 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 92 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
93 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 93 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
94 | iop_data = (uint8_t *)prbuffer->data; | 94 | iop_data = prbuffer->data; |
95 | iop_len = readl(&prbuffer->data_len); | 95 | iop_len = readl(&prbuffer->data_len); |
96 | while (iop_len > 0) { | 96 | while (iop_len > 0) { |
97 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); | 97 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index f7a252885a5c..d466a2dac1db 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -236,18 +236,22 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
236 | uint32_t intmask_org; | 236 | uint32_t intmask_org; |
237 | int i, j; | 237 | int i, j; |
238 | 238 | ||
239 | acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); | 239 | acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); |
240 | if (!acb->pmu) { | 240 | if (!acb->pmuA) { |
241 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", | 241 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", |
242 | acb->host->host_no); | 242 | acb->host->host_no); |
243 | return -ENOMEM; | ||
243 | } | 244 | } |
244 | 245 | ||
245 | dma_coherent = dma_alloc_coherent(&pdev->dev, | 246 | dma_coherent = dma_alloc_coherent(&pdev->dev, |
246 | ARCMSR_MAX_FREECCB_NUM * | 247 | ARCMSR_MAX_FREECCB_NUM * |
247 | sizeof (struct CommandControlBlock) + 0x20, | 248 | sizeof (struct CommandControlBlock) + 0x20, |
248 | &dma_coherent_handle, GFP_KERNEL); | 249 | &dma_coherent_handle, GFP_KERNEL); |
249 | if (!dma_coherent) | 250 | |
251 | if (!dma_coherent) { | ||
252 | iounmap(acb->pmuA); | ||
250 | return -ENOMEM; | 253 | return -ENOMEM; |
254 | } | ||
251 | 255 | ||
252 | acb->dma_coherent = dma_coherent; | 256 | acb->dma_coherent = dma_coherent; |
253 | acb->dma_coherent_handle = dma_coherent_handle; | 257 | acb->dma_coherent_handle = dma_coherent_handle; |
@@ -287,7 +291,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
287 | 291 | ||
288 | struct pci_dev *pdev = acb->pdev; | 292 | struct pci_dev *pdev = acb->pdev; |
289 | struct MessageUnit_B *reg; | 293 | struct MessageUnit_B *reg; |
290 | void *mem_base0, *mem_base1; | 294 | void __iomem *mem_base0, *mem_base1; |
291 | void *dma_coherent; | 295 | void *dma_coherent; |
292 | dma_addr_t dma_coherent_handle, dma_addr; | 296 | dma_addr_t dma_coherent_handle, dma_addr; |
293 | uint32_t intmask_org; | 297 | uint32_t intmask_org; |
@@ -328,25 +332,28 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
328 | 332 | ||
329 | reg = (struct MessageUnit_B *)(dma_coherent + | 333 | reg = (struct MessageUnit_B *)(dma_coherent + |
330 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); | 334 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); |
331 | acb->pmu = (struct MessageUnit *)reg; | 335 | acb->pmuB = reg; |
332 | mem_base0 = ioremap(pci_resource_start(pdev, 0), | 336 | mem_base0 = ioremap(pci_resource_start(pdev, 0), |
333 | pci_resource_len(pdev, 0)); | 337 | pci_resource_len(pdev, 0)); |
338 | if (!mem_base0) | ||
339 | goto out; | ||
340 | |||
334 | mem_base1 = ioremap(pci_resource_start(pdev, 2), | 341 | mem_base1 = ioremap(pci_resource_start(pdev, 2), |
335 | pci_resource_len(pdev, 2)); | 342 | pci_resource_len(pdev, 2)); |
336 | reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 + | 343 | if (!mem_base1) { |
337 | ARCMSR_DRV2IOP_DOORBELL); | 344 | iounmap(mem_base0); |
338 | reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + | 345 | goto out; |
339 | ARCMSR_DRV2IOP_DOORBELL_MASK); | 346 | } |
340 | reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 + | 347 | |
341 | ARCMSR_IOP2DRV_DOORBELL); | 348 | reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL; |
342 | reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + | 349 | reg->drv2iop_doorbell_mask_reg = mem_base0 + |
343 | ARCMSR_IOP2DRV_DOORBELL_MASK); | 350 | ARCMSR_DRV2IOP_DOORBELL_MASK; |
344 | reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 + | 351 | reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL; |
345 | ARCMSR_IOCTL_WBUFFER); | 352 | reg->iop2drv_doorbell_mask_reg = mem_base0 + |
346 | reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 + | 353 | ARCMSR_IOP2DRV_DOORBELL_MASK; |
347 | ARCMSR_IOCTL_RBUFFER); | 354 | reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER; |
348 | reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 + | 355 | reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER; |
349 | ARCMSR_MSGCODE_RWBUFFER); | 356 | reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER; |
350 | 357 | ||
351 | acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; | 358 | acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; |
352 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 359 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
@@ -362,6 +369,12 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
362 | break; | 369 | break; |
363 | } | 370 | } |
364 | return 0; | 371 | return 0; |
372 | |||
373 | out: | ||
374 | dma_free_coherent(&acb->pdev->dev, | ||
375 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20, | ||
376 | acb->dma_coherent, acb->dma_coherent_handle); | ||
377 | return -ENOMEM; | ||
365 | } | 378 | } |
366 | 379 | ||
367 | static int arcmsr_probe(struct pci_dev *pdev, | 380 | static int arcmsr_probe(struct pci_dev *pdev, |
@@ -454,7 +467,6 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
454 | free_irq(pdev->irq, acb); | 467 | free_irq(pdev->irq, acb); |
455 | out_free_ccb_pool: | 468 | out_free_ccb_pool: |
456 | arcmsr_free_ccb_pool(acb); | 469 | arcmsr_free_ccb_pool(acb); |
457 | iounmap(acb->pmu); | ||
458 | out_release_regions: | 470 | out_release_regions: |
459 | pci_release_regions(pdev); | 471 | pci_release_regions(pdev); |
460 | out_host_put: | 472 | out_host_put: |
@@ -467,7 +479,7 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
467 | 479 | ||
468 | static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) | 480 | static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) |
469 | { | 481 | { |
470 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 482 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
471 | uint32_t Index; | 483 | uint32_t Index; |
472 | uint8_t Retries = 0x00; | 484 | uint8_t Retries = 0x00; |
473 | 485 | ||
@@ -488,7 +500,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
488 | 500 | ||
489 | static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | 501 | static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) |
490 | { | 502 | { |
491 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 503 | struct MessageUnit_B *reg = acb->pmuB; |
492 | uint32_t Index; | 504 | uint32_t Index; |
493 | uint8_t Retries = 0x00; | 505 | uint8_t Retries = 0x00; |
494 | 506 | ||
@@ -509,7 +521,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
509 | 521 | ||
510 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | 522 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) |
511 | { | 523 | { |
512 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 524 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
513 | 525 | ||
514 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); | 526 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); |
515 | if (arcmsr_hba_wait_msgint_ready(acb)) | 527 | if (arcmsr_hba_wait_msgint_ready(acb)) |
@@ -520,7 +532,7 @@ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | |||
520 | 532 | ||
521 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) | 533 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) |
522 | { | 534 | { |
523 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 535 | struct MessageUnit_B *reg = acb->pmuB; |
524 | 536 | ||
525 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); | 537 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); |
526 | if (arcmsr_hbb_wait_msgint_ready(acb)) | 538 | if (arcmsr_hbb_wait_msgint_ready(acb)) |
@@ -566,7 +578,7 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) | |||
566 | 578 | ||
567 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) | 579 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) |
568 | { | 580 | { |
569 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 581 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
570 | int retry_count = 30; | 582 | int retry_count = 30; |
571 | 583 | ||
572 | writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); | 584 | writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); |
@@ -583,7 +595,7 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) | |||
583 | 595 | ||
584 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) | 596 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) |
585 | { | 597 | { |
586 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 598 | struct MessageUnit_B *reg = acb->pmuB; |
587 | int retry_count = 30; | 599 | int retry_count = 30; |
588 | 600 | ||
589 | writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); | 601 | writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); |
@@ -637,7 +649,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
637 | switch (acb->adapter_type) { | 649 | switch (acb->adapter_type) { |
638 | 650 | ||
639 | case ACB_ADAPTER_TYPE_A : { | 651 | case ACB_ADAPTER_TYPE_A : { |
640 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 652 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
641 | orig_mask = readl(®->outbound_intmask)|\ | 653 | orig_mask = readl(®->outbound_intmask)|\ |
642 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; | 654 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; |
643 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ | 655 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ |
@@ -646,7 +658,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
646 | break; | 658 | break; |
647 | 659 | ||
648 | case ACB_ADAPTER_TYPE_B : { | 660 | case ACB_ADAPTER_TYPE_B : { |
649 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 661 | struct MessageUnit_B *reg = acb->pmuB; |
650 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ | 662 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ |
651 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | 663 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); |
652 | writel(0, reg->iop2drv_doorbell_mask_reg); | 664 | writel(0, reg->iop2drv_doorbell_mask_reg); |
@@ -748,14 +760,13 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) | |||
748 | switch (acb->adapter_type) { | 760 | switch (acb->adapter_type) { |
749 | 761 | ||
750 | case ACB_ADAPTER_TYPE_A: { | 762 | case ACB_ADAPTER_TYPE_A: { |
751 | struct MessageUnit_A __iomem *reg = \ | 763 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
752 | (struct MessageUnit_A *)acb->pmu; | ||
753 | uint32_t outbound_intstatus; | 764 | uint32_t outbound_intstatus; |
754 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 765 | outbound_intstatus = readl(®->outbound_intstatus) & |
755 | acb->outbound_int_enable; | 766 | acb->outbound_int_enable; |
756 | /*clear and abort all outbound posted Q*/ | 767 | /*clear and abort all outbound posted Q*/ |
757 | writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ | 768 | writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ |
758 | while (((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) \ | 769 | while (((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) |
759 | && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { | 770 | && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { |
760 | arcmsr_drain_donequeue(acb, flag_ccb); | 771 | arcmsr_drain_donequeue(acb, flag_ccb); |
761 | } | 772 | } |
@@ -763,7 +774,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) | |||
763 | break; | 774 | break; |
764 | 775 | ||
765 | case ACB_ADAPTER_TYPE_B: { | 776 | case ACB_ADAPTER_TYPE_B: { |
766 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 777 | struct MessageUnit_B *reg = acb->pmuB; |
767 | /*clear all outbound posted Q*/ | 778 | /*clear all outbound posted Q*/ |
768 | for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { | 779 | for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { |
769 | if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { | 780 | if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { |
@@ -816,7 +827,6 @@ static void arcmsr_remove(struct pci_dev *pdev) | |||
816 | } | 827 | } |
817 | 828 | ||
818 | free_irq(pdev->irq, acb); | 829 | free_irq(pdev->irq, acb); |
819 | iounmap(acb->pmu); | ||
820 | arcmsr_free_ccb_pool(acb); | 830 | arcmsr_free_ccb_pool(acb); |
821 | pci_release_regions(pdev); | 831 | pci_release_regions(pdev); |
822 | 832 | ||
@@ -859,7 +869,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
859 | switch (acb->adapter_type) { | 869 | switch (acb->adapter_type) { |
860 | 870 | ||
861 | case ACB_ADAPTER_TYPE_A : { | 871 | case ACB_ADAPTER_TYPE_A : { |
862 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 872 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
863 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | | 873 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | |
864 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); | 874 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); |
865 | writel(mask, ®->outbound_intmask); | 875 | writel(mask, ®->outbound_intmask); |
@@ -868,7 +878,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
868 | break; | 878 | break; |
869 | 879 | ||
870 | case ACB_ADAPTER_TYPE_B : { | 880 | case ACB_ADAPTER_TYPE_B : { |
871 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 881 | struct MessageUnit_B *reg = acb->pmuB; |
872 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ | 882 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ |
873 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); | 883 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); |
874 | writel(mask, reg->iop2drv_doorbell_mask_reg); | 884 | writel(mask, reg->iop2drv_doorbell_mask_reg); |
@@ -882,7 +892,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
882 | { | 892 | { |
883 | struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; | 893 | struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; |
884 | int8_t *psge = (int8_t *)&arcmsr_cdb->u; | 894 | int8_t *psge = (int8_t *)&arcmsr_cdb->u; |
885 | uint32_t address_lo, address_hi; | 895 | __le32 address_lo, address_hi; |
886 | int arccdbsize = 0x30; | 896 | int arccdbsize = 0x30; |
887 | int nseg; | 897 | int nseg; |
888 | 898 | ||
@@ -900,7 +910,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
900 | BUG_ON(nseg < 0); | 910 | BUG_ON(nseg < 0); |
901 | 911 | ||
902 | if (nseg) { | 912 | if (nseg) { |
903 | int length, i, cdb_sgcount = 0; | 913 | __le32 length; |
914 | int i, cdb_sgcount = 0; | ||
904 | struct scatterlist *sg; | 915 | struct scatterlist *sg; |
905 | 916 | ||
906 | /* map stor port SG list to our iop SG List. */ | 917 | /* map stor port SG list to our iop SG List. */ |
@@ -921,7 +932,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
921 | 932 | ||
922 | pdma_sg->addresshigh = address_hi; | 933 | pdma_sg->addresshigh = address_hi; |
923 | pdma_sg->address = address_lo; | 934 | pdma_sg->address = address_lo; |
924 | pdma_sg->length = length|IS_SG64_ADDR; | 935 | pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); |
925 | psge += sizeof (struct SG64ENTRY); | 936 | psge += sizeof (struct SG64ENTRY); |
926 | arccdbsize += sizeof (struct SG64ENTRY); | 937 | arccdbsize += sizeof (struct SG64ENTRY); |
927 | } | 938 | } |
@@ -947,7 +958,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
947 | 958 | ||
948 | switch (acb->adapter_type) { | 959 | switch (acb->adapter_type) { |
949 | case ACB_ADAPTER_TYPE_A: { | 960 | case ACB_ADAPTER_TYPE_A: { |
950 | struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; | 961 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
951 | 962 | ||
952 | if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) | 963 | if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) |
953 | writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, | 964 | writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, |
@@ -959,7 +970,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
959 | break; | 970 | break; |
960 | 971 | ||
961 | case ACB_ADAPTER_TYPE_B: { | 972 | case ACB_ADAPTER_TYPE_B: { |
962 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 973 | struct MessageUnit_B *reg = acb->pmuB; |
963 | uint32_t ending_index, index = reg->postq_index; | 974 | uint32_t ending_index, index = reg->postq_index; |
964 | 975 | ||
965 | ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); | 976 | ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); |
@@ -982,7 +993,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
982 | 993 | ||
983 | static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) | 994 | static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) |
984 | { | 995 | { |
985 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 996 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
986 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; | 997 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; |
987 | writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); | 998 | writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); |
988 | 999 | ||
@@ -995,7 +1006,7 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) | |||
995 | 1006 | ||
996 | static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) | 1007 | static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) |
997 | { | 1008 | { |
998 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1009 | struct MessageUnit_B *reg = acb->pmuB; |
999 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; | 1010 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; |
1000 | writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); | 1011 | writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); |
1001 | 1012 | ||
@@ -1023,6 +1034,17 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) | |||
1023 | 1034 | ||
1024 | static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) | 1035 | static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) |
1025 | { | 1036 | { |
1037 | switch (acb->adapter_type) { | ||
1038 | case ACB_ADAPTER_TYPE_A: { | ||
1039 | iounmap(acb->pmuA); | ||
1040 | break; | ||
1041 | } | ||
1042 | case ACB_ADAPTER_TYPE_B: { | ||
1043 | struct MessageUnit_B *reg = acb->pmuB; | ||
1044 | iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); | ||
1045 | iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); | ||
1046 | } | ||
1047 | } | ||
1026 | dma_free_coherent(&acb->pdev->dev, | 1048 | dma_free_coherent(&acb->pdev->dev, |
1027 | ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, | 1049 | ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, |
1028 | acb->dma_coherent, | 1050 | acb->dma_coherent, |
@@ -1033,13 +1055,13 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb) | |||
1033 | { | 1055 | { |
1034 | switch (acb->adapter_type) { | 1056 | switch (acb->adapter_type) { |
1035 | case ACB_ADAPTER_TYPE_A: { | 1057 | case ACB_ADAPTER_TYPE_A: { |
1036 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1058 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1037 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); | 1059 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); |
1038 | } | 1060 | } |
1039 | break; | 1061 | break; |
1040 | 1062 | ||
1041 | case ACB_ADAPTER_TYPE_B: { | 1063 | case ACB_ADAPTER_TYPE_B: { |
1042 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1064 | struct MessageUnit_B *reg = acb->pmuB; |
1043 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); | 1065 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); |
1044 | } | 1066 | } |
1045 | break; | 1067 | break; |
@@ -1050,7 +1072,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1050 | { | 1072 | { |
1051 | switch (acb->adapter_type) { | 1073 | switch (acb->adapter_type) { |
1052 | case ACB_ADAPTER_TYPE_A: { | 1074 | case ACB_ADAPTER_TYPE_A: { |
1053 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1075 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1054 | /* | 1076 | /* |
1055 | ** push inbound doorbell tell iop, driver data write ok | 1077 | ** push inbound doorbell tell iop, driver data write ok |
1056 | ** and wait reply on next hwinterrupt for next Qbuffer post | 1078 | ** and wait reply on next hwinterrupt for next Qbuffer post |
@@ -1060,7 +1082,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1060 | break; | 1082 | break; |
1061 | 1083 | ||
1062 | case ACB_ADAPTER_TYPE_B: { | 1084 | case ACB_ADAPTER_TYPE_B: { |
1063 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1085 | struct MessageUnit_B *reg = acb->pmuB; |
1064 | /* | 1086 | /* |
1065 | ** push inbound doorbell tell iop, driver data write ok | 1087 | ** push inbound doorbell tell iop, driver data write ok |
1066 | ** and wait reply on next hwinterrupt for next Qbuffer post | 1088 | ** and wait reply on next hwinterrupt for next Qbuffer post |
@@ -1071,41 +1093,41 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1071 | } | 1093 | } |
1072 | } | 1094 | } |
1073 | 1095 | ||
1074 | struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) | 1096 | struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) |
1075 | { | 1097 | { |
1076 | static struct QBUFFER *qbuffer; | 1098 | struct QBUFFER __iomem *qbuffer = NULL; |
1077 | 1099 | ||
1078 | switch (acb->adapter_type) { | 1100 | switch (acb->adapter_type) { |
1079 | 1101 | ||
1080 | case ACB_ADAPTER_TYPE_A: { | 1102 | case ACB_ADAPTER_TYPE_A: { |
1081 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1103 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1082 | qbuffer = (struct QBUFFER __iomem *) ®->message_rbuffer; | 1104 | qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; |
1083 | } | 1105 | } |
1084 | break; | 1106 | break; |
1085 | 1107 | ||
1086 | case ACB_ADAPTER_TYPE_B: { | 1108 | case ACB_ADAPTER_TYPE_B: { |
1087 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1109 | struct MessageUnit_B *reg = acb->pmuB; |
1088 | qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg; | 1110 | qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg; |
1089 | } | 1111 | } |
1090 | break; | 1112 | break; |
1091 | } | 1113 | } |
1092 | return qbuffer; | 1114 | return qbuffer; |
1093 | } | 1115 | } |
1094 | 1116 | ||
1095 | static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) | 1117 | static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) |
1096 | { | 1118 | { |
1097 | static struct QBUFFER *pqbuffer; | 1119 | struct QBUFFER __iomem *pqbuffer = NULL; |
1098 | 1120 | ||
1099 | switch (acb->adapter_type) { | 1121 | switch (acb->adapter_type) { |
1100 | 1122 | ||
1101 | case ACB_ADAPTER_TYPE_A: { | 1123 | case ACB_ADAPTER_TYPE_A: { |
1102 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1124 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1103 | pqbuffer = (struct QBUFFER *) ®->message_wbuffer; | 1125 | pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; |
1104 | } | 1126 | } |
1105 | break; | 1127 | break; |
1106 | 1128 | ||
1107 | case ACB_ADAPTER_TYPE_B: { | 1129 | case ACB_ADAPTER_TYPE_B: { |
1108 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1130 | struct MessageUnit_B *reg = acb->pmuB; |
1109 | pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; | 1131 | pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; |
1110 | } | 1132 | } |
1111 | break; | 1133 | break; |
@@ -1115,15 +1137,15 @@ static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) | |||
1115 | 1137 | ||
1116 | static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) | 1138 | static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) |
1117 | { | 1139 | { |
1118 | struct QBUFFER *prbuffer; | 1140 | struct QBUFFER __iomem *prbuffer; |
1119 | struct QBUFFER *pQbuffer; | 1141 | struct QBUFFER *pQbuffer; |
1120 | uint8_t *iop_data; | 1142 | uint8_t __iomem *iop_data; |
1121 | int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; | 1143 | int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; |
1122 | 1144 | ||
1123 | rqbuf_lastindex = acb->rqbuf_lastindex; | 1145 | rqbuf_lastindex = acb->rqbuf_lastindex; |
1124 | rqbuf_firstindex = acb->rqbuf_firstindex; | 1146 | rqbuf_firstindex = acb->rqbuf_firstindex; |
1125 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 1147 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
1126 | iop_data = (uint8_t *)prbuffer->data; | 1148 | iop_data = (uint8_t __iomem *)prbuffer->data; |
1127 | iop_len = prbuffer->data_len; | 1149 | iop_len = prbuffer->data_len; |
1128 | my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); | 1150 | my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); |
1129 | 1151 | ||
@@ -1151,8 +1173,8 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) | |||
1151 | acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; | 1173 | acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; |
1152 | if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { | 1174 | if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { |
1153 | uint8_t *pQbuffer; | 1175 | uint8_t *pQbuffer; |
1154 | struct QBUFFER *pwbuffer; | 1176 | struct QBUFFER __iomem *pwbuffer; |
1155 | uint8_t *iop_data; | 1177 | uint8_t __iomem *iop_data; |
1156 | int32_t allxfer_len = 0; | 1178 | int32_t allxfer_len = 0; |
1157 | 1179 | ||
1158 | acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); | 1180 | acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); |
@@ -1181,7 +1203,7 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) | |||
1181 | static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) | 1203 | static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) |
1182 | { | 1204 | { |
1183 | uint32_t outbound_doorbell; | 1205 | uint32_t outbound_doorbell; |
1184 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1206 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1185 | 1207 | ||
1186 | outbound_doorbell = readl(®->outbound_doorbell); | 1208 | outbound_doorbell = readl(®->outbound_doorbell); |
1187 | writel(outbound_doorbell, ®->outbound_doorbell); | 1209 | writel(outbound_doorbell, ®->outbound_doorbell); |
@@ -1197,7 +1219,7 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) | |||
1197 | static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) | 1219 | static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) |
1198 | { | 1220 | { |
1199 | uint32_t flag_ccb; | 1221 | uint32_t flag_ccb; |
1200 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1222 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1201 | 1223 | ||
1202 | while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { | 1224 | while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { |
1203 | arcmsr_drain_donequeue(acb, flag_ccb); | 1225 | arcmsr_drain_donequeue(acb, flag_ccb); |
@@ -1208,7 +1230,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
1208 | { | 1230 | { |
1209 | uint32_t index; | 1231 | uint32_t index; |
1210 | uint32_t flag_ccb; | 1232 | uint32_t flag_ccb; |
1211 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1233 | struct MessageUnit_B *reg = acb->pmuB; |
1212 | 1234 | ||
1213 | index = reg->doneq_index; | 1235 | index = reg->doneq_index; |
1214 | 1236 | ||
@@ -1224,7 +1246,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
1224 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | 1246 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) |
1225 | { | 1247 | { |
1226 | uint32_t outbound_intstatus; | 1248 | uint32_t outbound_intstatus; |
1227 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1249 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1228 | 1250 | ||
1229 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 1251 | outbound_intstatus = readl(®->outbound_intstatus) & \ |
1230 | acb->outbound_int_enable; | 1252 | acb->outbound_int_enable; |
@@ -1244,7 +1266,7 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | |||
1244 | static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | 1266 | static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) |
1245 | { | 1267 | { |
1246 | uint32_t outbound_doorbell; | 1268 | uint32_t outbound_doorbell; |
1247 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1269 | struct MessageUnit_B *reg = acb->pmuB; |
1248 | 1270 | ||
1249 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ | 1271 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ |
1250 | acb->outbound_int_enable; | 1272 | acb->outbound_int_enable; |
@@ -1305,8 +1327,8 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) | |||
1305 | { | 1327 | { |
1306 | int32_t wqbuf_firstindex, wqbuf_lastindex; | 1328 | int32_t wqbuf_firstindex, wqbuf_lastindex; |
1307 | uint8_t *pQbuffer; | 1329 | uint8_t *pQbuffer; |
1308 | struct QBUFFER *pwbuffer; | 1330 | struct QBUFFER __iomem *pwbuffer; |
1309 | uint8_t *iop_data; | 1331 | uint8_t __iomem *iop_data; |
1310 | int32_t allxfer_len = 0; | 1332 | int32_t allxfer_len = 0; |
1311 | 1333 | ||
1312 | pwbuffer = arcmsr_get_iop_wqbuffer(acb); | 1334 | pwbuffer = arcmsr_get_iop_wqbuffer(acb); |
@@ -1380,13 +1402,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1380 | } | 1402 | } |
1381 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1403 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
1382 | 1404 | ||
1383 | struct QBUFFER *prbuffer; | 1405 | struct QBUFFER __iomem *prbuffer; |
1384 | uint8_t *iop_data; | 1406 | uint8_t __iomem *iop_data; |
1385 | int32_t iop_len; | 1407 | int32_t iop_len; |
1386 | 1408 | ||
1387 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1409 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
1388 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 1410 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
1389 | iop_data = (uint8_t *)prbuffer->data; | 1411 | iop_data = prbuffer->data; |
1390 | iop_len = readl(&prbuffer->data_len); | 1412 | iop_len = readl(&prbuffer->data_len); |
1391 | while (iop_len > 0) { | 1413 | while (iop_len > 0) { |
1392 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); | 1414 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); |
@@ -1669,11 +1691,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
1669 | 1691 | ||
1670 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 1692 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) |
1671 | { | 1693 | { |
1672 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1694 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1673 | char *acb_firm_model = acb->firm_model; | 1695 | char *acb_firm_model = acb->firm_model; |
1674 | char *acb_firm_version = acb->firm_version; | 1696 | char *acb_firm_version = acb->firm_version; |
1675 | char *iop_firm_model = (char *) (®->message_rwbuffer[15]); | 1697 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); |
1676 | char *iop_firm_version = (char *) (®->message_rwbuffer[17]); | 1698 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); |
1677 | int count; | 1699 | int count; |
1678 | 1700 | ||
1679 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | 1701 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); |
@@ -1710,13 +1732,13 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | |||
1710 | 1732 | ||
1711 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | 1733 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) |
1712 | { | 1734 | { |
1713 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1735 | struct MessageUnit_B *reg = acb->pmuB; |
1714 | uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg; | 1736 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; |
1715 | char *acb_firm_model = acb->firm_model; | 1737 | char *acb_firm_model = acb->firm_model; |
1716 | char *acb_firm_version = acb->firm_version; | 1738 | char *acb_firm_version = acb->firm_version; |
1717 | char *iop_firm_model = (char *) (&lrwbuffer[15]); | 1739 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); |
1718 | /*firm_model,15,60-67*/ | 1740 | /*firm_model,15,60-67*/ |
1719 | char *iop_firm_version = (char *) (&lrwbuffer[17]); | 1741 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); |
1720 | /*firm_version,17,68-83*/ | 1742 | /*firm_version,17,68-83*/ |
1721 | int count; | 1743 | int count; |
1722 | 1744 | ||
@@ -1777,7 +1799,7 @@ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) | |||
1777 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | 1799 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, |
1778 | struct CommandControlBlock *poll_ccb) | 1800 | struct CommandControlBlock *poll_ccb) |
1779 | { | 1801 | { |
1780 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1802 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1781 | struct CommandControlBlock *ccb; | 1803 | struct CommandControlBlock *ccb; |
1782 | uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; | 1804 | uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; |
1783 | 1805 | ||
@@ -1826,7 +1848,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | |||
1826 | static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ | 1848 | static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ |
1827 | struct CommandControlBlock *poll_ccb) | 1849 | struct CommandControlBlock *poll_ccb) |
1828 | { | 1850 | { |
1829 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1851 | struct MessageUnit_B *reg = acb->pmuB; |
1830 | struct CommandControlBlock *ccb; | 1852 | struct CommandControlBlock *ccb; |
1831 | uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; | 1853 | uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; |
1832 | int index; | 1854 | int index; |
@@ -1918,8 +1940,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) | |||
1918 | 1940 | ||
1919 | case ACB_ADAPTER_TYPE_A: { | 1941 | case ACB_ADAPTER_TYPE_A: { |
1920 | if (ccb_phyaddr_hi32 != 0) { | 1942 | if (ccb_phyaddr_hi32 != 0) { |
1921 | struct MessageUnit_A __iomem *reg = \ | 1943 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1922 | (struct MessageUnit_A *)acb->pmu; | ||
1923 | uint32_t intmask_org; | 1944 | uint32_t intmask_org; |
1924 | intmask_org = arcmsr_disable_outbound_ints(acb); | 1945 | intmask_org = arcmsr_disable_outbound_ints(acb); |
1925 | writel(ARCMSR_SIGNATURE_SET_CONFIG, \ | 1946 | writel(ARCMSR_SIGNATURE_SET_CONFIG, \ |
@@ -1940,9 +1961,9 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) | |||
1940 | 1961 | ||
1941 | case ACB_ADAPTER_TYPE_B: { | 1962 | case ACB_ADAPTER_TYPE_B: { |
1942 | unsigned long post_queue_phyaddr; | 1963 | unsigned long post_queue_phyaddr; |
1943 | uint32_t *rwbuffer; | 1964 | uint32_t __iomem *rwbuffer; |
1944 | 1965 | ||
1945 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1966 | struct MessageUnit_B *reg = acb->pmuB; |
1946 | uint32_t intmask_org; | 1967 | uint32_t intmask_org; |
1947 | intmask_org = arcmsr_disable_outbound_ints(acb); | 1968 | intmask_org = arcmsr_disable_outbound_ints(acb); |
1948 | reg->postq_index = 0; | 1969 | reg->postq_index = 0; |
@@ -1994,7 +2015,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
1994 | switch (acb->adapter_type) { | 2015 | switch (acb->adapter_type) { |
1995 | 2016 | ||
1996 | case ACB_ADAPTER_TYPE_A: { | 2017 | case ACB_ADAPTER_TYPE_A: { |
1997 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 2018 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1998 | do { | 2019 | do { |
1999 | firmware_state = readl(®->outbound_msgaddr1); | 2020 | firmware_state = readl(®->outbound_msgaddr1); |
2000 | } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); | 2021 | } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); |
@@ -2002,7 +2023,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
2002 | break; | 2023 | break; |
2003 | 2024 | ||
2004 | case ACB_ADAPTER_TYPE_B: { | 2025 | case ACB_ADAPTER_TYPE_B: { |
2005 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2026 | struct MessageUnit_B *reg = acb->pmuB; |
2006 | do { | 2027 | do { |
2007 | firmware_state = readl(reg->iop2drv_doorbell_reg); | 2028 | firmware_state = readl(reg->iop2drv_doorbell_reg); |
2008 | } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); | 2029 | } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); |
@@ -2013,7 +2034,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
2013 | 2034 | ||
2014 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | 2035 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) |
2015 | { | 2036 | { |
2016 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 2037 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
2017 | acb->acb_flags |= ACB_F_MSG_START_BGRB; | 2038 | acb->acb_flags |= ACB_F_MSG_START_BGRB; |
2018 | writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); | 2039 | writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); |
2019 | if (arcmsr_hba_wait_msgint_ready(acb)) { | 2040 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
@@ -2024,7 +2045,7 @@ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | |||
2024 | 2045 | ||
2025 | static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) | 2046 | static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) |
2026 | { | 2047 | { |
2027 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2048 | struct MessageUnit_B *reg = acb->pmuB; |
2028 | acb->acb_flags |= ACB_F_MSG_START_BGRB; | 2049 | acb->acb_flags |= ACB_F_MSG_START_BGRB; |
2029 | writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); | 2050 | writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); |
2030 | if (arcmsr_hbb_wait_msgint_ready(acb)) { | 2051 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
@@ -2049,7 +2070,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) | |||
2049 | { | 2070 | { |
2050 | switch (acb->adapter_type) { | 2071 | switch (acb->adapter_type) { |
2051 | case ACB_ADAPTER_TYPE_A: { | 2072 | case ACB_ADAPTER_TYPE_A: { |
2052 | struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; | 2073 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
2053 | uint32_t outbound_doorbell; | 2074 | uint32_t outbound_doorbell; |
2054 | /* empty doorbell Qbuffer if door bell ringed */ | 2075 | /* empty doorbell Qbuffer if door bell ringed */ |
2055 | outbound_doorbell = readl(®->outbound_doorbell); | 2076 | outbound_doorbell = readl(®->outbound_doorbell); |
@@ -2060,7 +2081,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) | |||
2060 | break; | 2081 | break; |
2061 | 2082 | ||
2062 | case ACB_ADAPTER_TYPE_B: { | 2083 | case ACB_ADAPTER_TYPE_B: { |
2063 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2084 | struct MessageUnit_B *reg = acb->pmuB; |
2064 | /*clear interrupt and message state*/ | 2085 | /*clear interrupt and message state*/ |
2065 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); | 2086 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); |
2066 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); | 2087 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index d1780980fb20..a9680b5e8ac6 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -477,10 +477,9 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) | |||
477 | 477 | ||
478 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; | 478 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; |
479 | cmd->SCp.buffers_residual && | 479 | cmd->SCp.buffers_residual && |
480 | virt_to_phys(page_address(cmd->SCp.buffer[1].page) + | 480 | virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { |
481 | cmd->SCp.buffer[1].offset) == endaddr;) { | ||
482 | MER_PRINTK("VTOP(%p) == %08lx -> merging\n", | 481 | MER_PRINTK("VTOP(%p) == %08lx -> merging\n", |
483 | page_address(cmd->SCp.buffer[1].page), endaddr); | 482 | page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); |
484 | #if (NDEBUG & NDEBUG_MERGING) | 483 | #if (NDEBUG & NDEBUG_MERGING) |
485 | ++cnt; | 484 | ++cnt; |
486 | #endif | 485 | #endif |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 439b97a6a269..0841df01bc19 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -2890,7 +2890,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) | |||
2890 | return NULL; | 2890 | return NULL; |
2891 | } | 2891 | } |
2892 | 2892 | ||
2893 | sg_set_page(&scatterlist[i], page); | 2893 | sg_set_page(&scatterlist[i], page, 0, 0); |
2894 | } | 2894 | } |
2895 | 2895 | ||
2896 | return sglist; | 2896 | return sglist; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 6ce4109efdf3..4bcf916c21a7 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -79,9 +79,7 @@ static inline void | |||
79 | iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) | 79 | iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) |
80 | { | 80 | { |
81 | sg_init_table(&ibuf->sg, 1); | 81 | sg_init_table(&ibuf->sg, 1); |
82 | sg_set_page(&ibuf->sg, sg_page(sg)); | 82 | sg_set_page(&ibuf->sg, sg_page(sg), sg->length, sg->offset); |
83 | ibuf->sg.offset = sg->offset; | ||
84 | ibuf->sg.length = sg->length; | ||
85 | /* | 83 | /* |
86 | * Fastpath: sg element fits into single page | 84 | * Fastpath: sg element fits into single page |
87 | */ | 85 | */ |
@@ -676,9 +674,8 @@ partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg, | |||
676 | { | 674 | { |
677 | struct scatterlist temp; | 675 | struct scatterlist temp; |
678 | 676 | ||
679 | memcpy(&temp, sg, sizeof(struct scatterlist)); | 677 | sg_init_table(&temp, 1); |
680 | temp.offset = offset; | 678 | sg_set_page(&temp, sg_page(sg), length, offset); |
681 | temp.length = length; | ||
682 | crypto_hash_update(desc, &temp, length); | 679 | crypto_hash_update(desc, &temp, length); |
683 | } | 680 | } |
684 | 681 | ||
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 1c5c4b68f20f..4652ad22516b 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
@@ -5256,8 +5256,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) | |||
5256 | 5256 | ||
5257 | STbuffer->sg[0].offset = 0; | 5257 | STbuffer->sg[0].offset = 0; |
5258 | if (page != NULL) { | 5258 | if (page != NULL) { |
5259 | sg_set_page(&STbuffer->sg[0], page); | 5259 | sg_set_page(&STbuffer->sg[0], page, b_size, 0); |
5260 | STbuffer->sg[0].length = b_size; | ||
5261 | STbuffer->b_data = page_address(page); | 5260 | STbuffer->b_data = page_address(page); |
5262 | break; | 5261 | break; |
5263 | } | 5262 | } |
@@ -5285,8 +5284,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) | |||
5285 | normalize_buffer(STbuffer); | 5284 | normalize_buffer(STbuffer); |
5286 | return 0; | 5285 | return 0; |
5287 | } | 5286 | } |
5288 | sg_set_page(&STbuffer->sg[segs], page); | 5287 | sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size, 0); |
5289 | STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size; | ||
5290 | got += STbuffer->sg[segs].length; | 5288 | got += STbuffer->sg[segs].length; |
5291 | STbuffer->buffer_size = got; | 5289 | STbuffer->buffer_size = got; |
5292 | STbuffer->sg_segs = ++segs; | 5290 | STbuffer->sg_segs = ++segs; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cc1971002846..f1871ea04045 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1652,6 +1652,7 @@ sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | |||
1652 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); | 1652 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); |
1653 | if (!schp->buffer) | 1653 | if (!schp->buffer) |
1654 | return -ENOMEM; | 1654 | return -ENOMEM; |
1655 | sg_init_table(schp->buffer, tablesize); | ||
1655 | schp->sglist_len = sg_bufflen; | 1656 | schp->sglist_len = sg_bufflen; |
1656 | return tablesize; /* number of scat_gath elements allocated */ | 1657 | return tablesize; /* number of scat_gath elements allocated */ |
1657 | } | 1658 | } |
@@ -1717,16 +1718,12 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, | |||
1717 | goto out_unlock; */ | 1718 | goto out_unlock; */ |
1718 | } | 1719 | } |
1719 | 1720 | ||
1720 | sg_set_page(sgl, pages[0]); | 1721 | sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK); |
1721 | sgl[0].offset = uaddr & ~PAGE_MASK; | ||
1722 | if (nr_pages > 1) { | 1722 | if (nr_pages > 1) { |
1723 | sgl[0].length = PAGE_SIZE - sgl[0].offset; | 1723 | sgl[0].length = PAGE_SIZE - sgl[0].offset; |
1724 | count -= sgl[0].length; | 1724 | count -= sgl[0].length; |
1725 | for (i=1; i < nr_pages ; i++) { | 1725 | for (i=1; i < nr_pages ; i++) |
1726 | sg_set_page(&sgl[i], pages[i]); | 1726 | sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0); |
1727 | sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; | ||
1728 | count -= PAGE_SIZE; | ||
1729 | } | ||
1730 | } | 1727 | } |
1731 | else { | 1728 | else { |
1732 | sgl[0].length = count; | 1729 | sgl[0].length = count; |
@@ -1854,8 +1851,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1854 | scatter_elem_sz_prev = ret_sz; | 1851 | scatter_elem_sz_prev = ret_sz; |
1855 | } | 1852 | } |
1856 | } | 1853 | } |
1857 | sg_set_page(sg, p); | 1854 | sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0); |
1858 | sg->length = (ret_sz > num) ? num : ret_sz; | ||
1859 | 1855 | ||
1860 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " | 1856 | SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " |
1861 | "ret_sz=%d\n", k, num, ret_sz)); | 1857 | "ret_sz=%d\n", k, num, ret_sz)); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ce69b9efc102..98dfd6ea209c 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -3797,13 +3797,11 @@ static void buf_to_sg(struct st_buffer *STbp, unsigned int length) | |||
3797 | sg = &(STbp->sg[0]); | 3797 | sg = &(STbp->sg[0]); |
3798 | frp = STbp->frp; | 3798 | frp = STbp->frp; |
3799 | for (i=count=0; count < length; i++) { | 3799 | for (i=count=0; count < length; i++) { |
3800 | sg_set_page(&sg[i], frp[i].page); | ||
3801 | if (length - count > frp[i].length) | 3800 | if (length - count > frp[i].length) |
3802 | sg[i].length = frp[i].length; | 3801 | sg_set_page(&sg[i], frp[i].page, frp[i].length, 0); |
3803 | else | 3802 | else |
3804 | sg[i].length = length - count; | 3803 | sg_set_page(&sg[i], frp[i].page, length - count, 0); |
3805 | count += sg[i].length; | 3804 | count += sg[i].length; |
3806 | sg[i].offset = 0; | ||
3807 | } | 3805 | } |
3808 | STbp->sg_segs = i; | 3806 | STbp->sg_segs = i; |
3809 | STbp->frp_sg_current = length; | 3807 | STbp->frp_sg_current = length; |
@@ -4446,15 +4444,13 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa | |||
4446 | } | 4444 | } |
4447 | 4445 | ||
4448 | /* Populate the scatter/gather list */ | 4446 | /* Populate the scatter/gather list */ |
4449 | sg_set_page(&sgl[0], pages[0]); | 4447 | sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK); |
4450 | sgl[0].offset = uaddr & ~PAGE_MASK; | ||
4451 | if (nr_pages > 1) { | 4448 | if (nr_pages > 1) { |
4452 | sgl[0].length = PAGE_SIZE - sgl[0].offset; | 4449 | sgl[0].length = PAGE_SIZE - sgl[0].offset; |
4453 | count -= sgl[0].length; | 4450 | count -= sgl[0].length; |
4454 | for (i=1; i < nr_pages ; i++) { | 4451 | for (i=1; i < nr_pages ; i++) { |
4455 | sg_set_page(&sgl[i], pages[i]);; | 4452 | sg_set_page(&sgl[i], pages[i], |
4456 | sgl[i].offset = 0; | 4453 | count < PAGE_SIZE ? count : PAGE_SIZE, 0);; |
4457 | sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; | ||
4458 | count -= PAGE_SIZE; | 4454 | count -= PAGE_SIZE; |
4459 | } | 4455 | } |
4460 | } | 4456 | } |
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 80fb3f88af2e..1bc41907a038 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c | |||
@@ -332,8 +332,8 @@ static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) | |||
332 | struct scatterlist *sg = sp->SCp.buffer; | 332 | struct scatterlist *sg = sp->SCp.buffer; |
333 | 333 | ||
334 | while (sz >= 0) { | 334 | while (sz >= 0) { |
335 | sg[sz].dma_address = dvma_map((unsigned long)page_address(sg[sz].page) + | 335 | sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]), |
336 | sg[sz].offset, sg[sz].length); | 336 | sg[sz].length); |
337 | sz--; | 337 | sz--; |
338 | } | 338 | } |
339 | sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address); | 339 | sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address); |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 103189095c80..3bb5d241dd40 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -1875,6 +1875,7 @@ uart_set_options(struct uart_port *port, struct console *co, | |||
1875 | int baud, int parity, int bits, int flow) | 1875 | int baud, int parity, int bits, int flow) |
1876 | { | 1876 | { |
1877 | struct ktermios termios; | 1877 | struct ktermios termios; |
1878 | static struct ktermios dummy; | ||
1878 | int i; | 1879 | int i; |
1879 | 1880 | ||
1880 | /* | 1881 | /* |
@@ -1920,7 +1921,7 @@ uart_set_options(struct uart_port *port, struct console *co, | |||
1920 | */ | 1921 | */ |
1921 | port->mctrl |= TIOCM_DTR; | 1922 | port->mctrl |= TIOCM_DTR; |
1922 | 1923 | ||
1923 | port->ops->set_termios(port, &termios, NULL); | 1924 | port->ops->set_termios(port, &termios, &dummy); |
1924 | co->cflag = termios.c_cflag; | 1925 | co->cflag = termios.c_cflag; |
1925 | 1926 | ||
1926 | return 0; | 1927 | return 0; |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 5afcb2fa7cd3..d8b660061c13 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -345,7 +345,7 @@ static int serial_probe(struct pcmcia_device *link) | |||
345 | 345 | ||
346 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 346 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
347 | link->io.NumPorts1 = 8; | 347 | link->io.NumPorts1 = 8; |
348 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 348 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
349 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 349 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
350 | link->conf.Attributes = CONF_ENABLE_IRQ; | 350 | link->conf.Attributes = CONF_ENABLE_IRQ; |
351 | if (do_sound) { | 351 | if (do_sound) { |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index c55459c592b8..b3518ca9f04e 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -184,14 +184,14 @@ static int spidev_message(struct spidev_data *spidev, | |||
184 | if (u_tmp->rx_buf) { | 184 | if (u_tmp->rx_buf) { |
185 | k_tmp->rx_buf = buf; | 185 | k_tmp->rx_buf = buf; |
186 | if (!access_ok(VERIFY_WRITE, (u8 __user *) | 186 | if (!access_ok(VERIFY_WRITE, (u8 __user *) |
187 | (ptrdiff_t) u_tmp->rx_buf, | 187 | (uintptr_t) u_tmp->rx_buf, |
188 | u_tmp->len)) | 188 | u_tmp->len)) |
189 | goto done; | 189 | goto done; |
190 | } | 190 | } |
191 | if (u_tmp->tx_buf) { | 191 | if (u_tmp->tx_buf) { |
192 | k_tmp->tx_buf = buf; | 192 | k_tmp->tx_buf = buf; |
193 | if (copy_from_user(buf, (const u8 __user *) | 193 | if (copy_from_user(buf, (const u8 __user *) |
194 | (ptrdiff_t) u_tmp->tx_buf, | 194 | (uintptr_t) u_tmp->tx_buf, |
195 | u_tmp->len)) | 195 | u_tmp->len)) |
196 | goto done; | 196 | goto done; |
197 | } | 197 | } |
@@ -224,7 +224,7 @@ static int spidev_message(struct spidev_data *spidev, | |||
224 | for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { | 224 | for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { |
225 | if (u_tmp->rx_buf) { | 225 | if (u_tmp->rx_buf) { |
226 | if (__copy_to_user((u8 __user *) | 226 | if (__copy_to_user((u8 __user *) |
227 | (ptrdiff_t) u_tmp->rx_buf, buf, | 227 | (uintptr_t) u_tmp->rx_buf, buf, |
228 | u_tmp->len)) { | 228 | u_tmp->len)) { |
229 | status = -EFAULT; | 229 | status = -EFAULT; |
230 | goto done; | 230 | goto done; |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 8bdaa157ffe7..eb4ac47612a5 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1641,7 +1641,13 @@ free_interfaces: | |||
1641 | intf->dev.bus_id, ret); | 1641 | intf->dev.bus_id, ret); |
1642 | continue; | 1642 | continue; |
1643 | } | 1643 | } |
1644 | usb_create_sysfs_intf_files (intf); | 1644 | |
1645 | /* The driver's probe method can call usb_set_interface(), | ||
1646 | * which would mean the interface's sysfs files are already | ||
1647 | * created. Just in case, we'll remove them first. | ||
1648 | */ | ||
1649 | usb_remove_sysfs_intf_files(intf); | ||
1650 | usb_create_sysfs_intf_files(intf); | ||
1645 | } | 1651 | } |
1646 | 1652 | ||
1647 | usb_autosuspend_device(dev); | 1653 | usb_autosuspend_device(dev); |
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index c20c03aaf012..d05ead20081c 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c | |||
@@ -372,7 +372,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) | |||
372 | 372 | ||
373 | /* enforce simple/standard policy */ | 373 | /* enforce simple/standard policy */ |
374 | allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | | 374 | allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | |
375 | URB_NO_INTERRUPT | URB_DIR_MASK); | 375 | URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER); |
376 | switch (xfertype) { | 376 | switch (xfertype) { |
377 | case USB_ENDPOINT_XFER_BULK: | 377 | case USB_ENDPOINT_XFER_BULK: |
378 | if (is_out) | 378 | if (is_out) |
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index 1c8040602525..c72e9620bf8d 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c | |||
@@ -3289,7 +3289,7 @@ static int udc_pci_probe( | |||
3289 | dev->chiprev = pdev->revision; | 3289 | dev->chiprev = pdev->revision; |
3290 | 3290 | ||
3291 | pci_set_master(pdev); | 3291 | pci_set_master(pdev); |
3292 | pci_set_mwi(pdev); | 3292 | pci_try_set_mwi(pdev); |
3293 | 3293 | ||
3294 | /* init dma pools */ | 3294 | /* init dma pools */ |
3295 | if (use_dma) { | 3295 | if (use_dma) { |
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index c978d622fa8a..177e78ed241b 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -156,7 +156,7 @@ config USB_OHCI_HCD_PCI | |||
156 | 156 | ||
157 | config USB_OHCI_HCD_SSB | 157 | config USB_OHCI_HCD_SSB |
158 | bool "OHCI support for Broadcom SSB OHCI core" | 158 | bool "OHCI support for Broadcom SSB OHCI core" |
159 | depends on USB_OHCI_HCD && SSB && EXPERIMENTAL | 159 | depends on USB_OHCI_HCD && (SSB = y || SSB = CONFIG_USB_OHCI_HCD) && EXPERIMENTAL |
160 | default n | 160 | default n |
161 | ---help--- | 161 | ---help--- |
162 | Support for the Sonics Silicon Backplane (SSB) attached | 162 | Support for the Sonics Silicon Backplane (SSB) attached |
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 240c7f507541..704f33fdd2f1 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -80,7 +80,10 @@ static const char hcd_name [] = "ohci_hcd"; | |||
80 | static void ohci_dump (struct ohci_hcd *ohci, int verbose); | 80 | static void ohci_dump (struct ohci_hcd *ohci, int verbose); |
81 | static int ohci_init (struct ohci_hcd *ohci); | 81 | static int ohci_init (struct ohci_hcd *ohci); |
82 | static void ohci_stop (struct usb_hcd *hcd); | 82 | static void ohci_stop (struct usb_hcd *hcd); |
83 | |||
84 | #if defined(CONFIG_PM) || defined(CONFIG_PCI) | ||
83 | static int ohci_restart (struct ohci_hcd *ohci); | 85 | static int ohci_restart (struct ohci_hcd *ohci); |
86 | #endif | ||
84 | 87 | ||
85 | #include "ohci-hub.c" | 88 | #include "ohci-hub.c" |
86 | #include "ohci-dbg.c" | 89 | #include "ohci-dbg.c" |
@@ -396,7 +399,7 @@ static int check_ed(struct ohci_hcd *ohci, struct ed *ed) | |||
396 | */ | 399 | */ |
397 | static void unlink_watchdog_func(unsigned long _ohci) | 400 | static void unlink_watchdog_func(unsigned long _ohci) |
398 | { | 401 | { |
399 | long flags; | 402 | unsigned long flags; |
400 | unsigned max; | 403 | unsigned max; |
401 | unsigned seen_count = 0; | 404 | unsigned seen_count = 0; |
402 | unsigned i; | 405 | unsigned i; |
@@ -893,6 +896,8 @@ static void ohci_stop (struct usb_hcd *hcd) | |||
893 | 896 | ||
894 | /*-------------------------------------------------------------------------*/ | 897 | /*-------------------------------------------------------------------------*/ |
895 | 898 | ||
899 | #if defined(CONFIG_PM) || defined(CONFIG_PCI) | ||
900 | |||
896 | /* must not be called from interrupt context */ | 901 | /* must not be called from interrupt context */ |
897 | static int ohci_restart (struct ohci_hcd *ohci) | 902 | static int ohci_restart (struct ohci_hcd *ohci) |
898 | { | 903 | { |
@@ -954,6 +959,8 @@ static int ohci_restart (struct ohci_hcd *ohci) | |||
954 | return 0; | 959 | return 0; |
955 | } | 960 | } |
956 | 961 | ||
962 | #endif | ||
963 | |||
957 | /*-------------------------------------------------------------------------*/ | 964 | /*-------------------------------------------------------------------------*/ |
958 | 965 | ||
959 | #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC | 966 | #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC |
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index e5d60d5b105a..60379b17bbc1 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -1271,7 +1271,8 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1271 | } else if (qh->period != urb->interval) { | 1271 | } else if (qh->period != urb->interval) { |
1272 | return -EINVAL; /* Can't change the period */ | 1272 | return -EINVAL; /* Can't change the period */ |
1273 | 1273 | ||
1274 | } else { /* Pick up where the last URB leaves off */ | 1274 | } else { |
1275 | /* Find the next unused frame */ | ||
1275 | if (list_empty(&qh->queue)) { | 1276 | if (list_empty(&qh->queue)) { |
1276 | frame = qh->iso_frame; | 1277 | frame = qh->iso_frame; |
1277 | } else { | 1278 | } else { |
@@ -1283,10 +1284,18 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1283 | lurb->number_of_packets * | 1284 | lurb->number_of_packets * |
1284 | lurb->interval; | 1285 | lurb->interval; |
1285 | } | 1286 | } |
1286 | if (urb->transfer_flags & URB_ISO_ASAP) | 1287 | if (urb->transfer_flags & URB_ISO_ASAP) { |
1287 | urb->start_frame = frame; | 1288 | /* Skip some frames if necessary to insure |
1288 | else if (urb->start_frame != frame) | 1289 | * the start frame is in the future. |
1289 | return -EINVAL; | 1290 | */ |
1291 | uhci_get_current_frame_number(uhci); | ||
1292 | if (uhci_frame_before_eq(frame, uhci->frame_number)) { | ||
1293 | frame = uhci->frame_number + 1; | ||
1294 | frame += ((qh->phase - frame) & | ||
1295 | (qh->period - 1)); | ||
1296 | } | ||
1297 | } /* Otherwise pick up where the last URB leaves off */ | ||
1298 | urb->start_frame = frame; | ||
1290 | } | 1299 | } |
1291 | 1300 | ||
1292 | /* Make sure we won't have to go too far into the future */ | 1301 | /* Make sure we won't have to go too far into the future */ |
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c index 2677fea147d9..1cd9e7eba93b 100644 --- a/drivers/usb/misc/cytherm.c +++ b/drivers/usb/misc/cytherm.c | |||
@@ -399,7 +399,6 @@ static void cytherm_disconnect(struct usb_interface *interface) | |||
399 | struct usb_cytherm *dev; | 399 | struct usb_cytherm *dev; |
400 | 400 | ||
401 | dev = usb_get_intfdata (interface); | 401 | dev = usb_get_intfdata (interface); |
402 | usb_set_intfdata (interface, NULL); | ||
403 | 402 | ||
404 | device_remove_file(&interface->dev, &dev_attr_brightness); | 403 | device_remove_file(&interface->dev, &dev_attr_brightness); |
405 | device_remove_file(&interface->dev, &dev_attr_temp); | 404 | device_remove_file(&interface->dev, &dev_attr_temp); |
@@ -407,6 +406,9 @@ static void cytherm_disconnect(struct usb_interface *interface) | |||
407 | device_remove_file(&interface->dev, &dev_attr_port0); | 406 | device_remove_file(&interface->dev, &dev_attr_port0); |
408 | device_remove_file(&interface->dev, &dev_attr_port1); | 407 | device_remove_file(&interface->dev, &dev_attr_port1); |
409 | 408 | ||
409 | /* first remove the files, then NULL the pointer */ | ||
410 | usb_set_intfdata (interface, NULL); | ||
411 | |||
410 | usb_put_dev(dev->udev); | 412 | usb_put_dev(dev->udev); |
411 | 413 | ||
412 | kfree(dev); | 414 | kfree(dev); |
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c index cd137577bb2d..4a09b87bdd28 100644 --- a/drivers/usb/misc/emi26.c +++ b/drivers/usb/misc/emi26.c | |||
@@ -114,6 +114,10 @@ static int emi26_load_firmware (struct usb_device *dev) | |||
114 | 114 | ||
115 | /* De-assert reset (let the CPU run) */ | 115 | /* De-assert reset (let the CPU run) */ |
116 | err = emi26_set_reset(dev,0); | 116 | err = emi26_set_reset(dev,0); |
117 | if (err < 0) { | ||
118 | err("%s - error loading firmware: error = %d", __FUNCTION__, err); | ||
119 | goto wraperr; | ||
120 | } | ||
117 | msleep(250); /* let device settle */ | 121 | msleep(250); /* let device settle */ |
118 | 122 | ||
119 | /* 2. We upload the FPGA firmware into the EMI | 123 | /* 2. We upload the FPGA firmware into the EMI |
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index 4758cc5ccebc..d1362415922c 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c | |||
@@ -123,6 +123,10 @@ static int emi62_load_firmware (struct usb_device *dev) | |||
123 | 123 | ||
124 | /* De-assert reset (let the CPU run) */ | 124 | /* De-assert reset (let the CPU run) */ |
125 | err = emi62_set_reset(dev,0); | 125 | err = emi62_set_reset(dev,0); |
126 | if (err < 0) { | ||
127 | err("%s - error loading firmware: error = %d", __FUNCTION__, err); | ||
128 | goto wraperr; | ||
129 | } | ||
126 | msleep(250); /* let device settle */ | 130 | msleep(250); /* let device settle */ |
127 | 131 | ||
128 | /* 2. We upload the FPGA firmware into the EMI | 132 | /* 2. We upload the FPGA firmware into the EMI |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index d3d8cd6ff103..148b7fe639b2 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -147,7 +147,7 @@ struct u132_target { | |||
147 | /* Structure to hold all of our device specific stuff*/ | 147 | /* Structure to hold all of our device specific stuff*/ |
148 | struct usb_ftdi { | 148 | struct usb_ftdi { |
149 | struct list_head ftdi_list; | 149 | struct list_head ftdi_list; |
150 | struct semaphore u132_lock; | 150 | struct mutex u132_lock; |
151 | int command_next; | 151 | int command_next; |
152 | int command_head; | 152 | int command_head; |
153 | struct u132_command command[COMMAND_SIZE]; | 153 | struct u132_command command[COMMAND_SIZE]; |
@@ -330,39 +330,39 @@ static int ftdi_elan_hcd_init(struct usb_ftdi *ftdi) | |||
330 | 330 | ||
331 | static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi) | 331 | static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi) |
332 | { | 332 | { |
333 | down(&ftdi->u132_lock); | 333 | mutex_lock(&ftdi->u132_lock); |
334 | while (ftdi->respond_next > ftdi->respond_head) { | 334 | while (ftdi->respond_next > ftdi->respond_head) { |
335 | struct u132_respond *respond = &ftdi->respond[RESPOND_MASK & | 335 | struct u132_respond *respond = &ftdi->respond[RESPOND_MASK & |
336 | ftdi->respond_head++]; | 336 | ftdi->respond_head++]; |
337 | *respond->result = -ESHUTDOWN; | 337 | *respond->result = -ESHUTDOWN; |
338 | *respond->value = 0; | 338 | *respond->value = 0; |
339 | complete(&respond->wait_completion); | 339 | complete(&respond->wait_completion); |
340 | } up(&ftdi->u132_lock); | 340 | } mutex_unlock(&ftdi->u132_lock); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi) | 343 | static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi) |
344 | { | 344 | { |
345 | int ed_number = 4; | 345 | int ed_number = 4; |
346 | down(&ftdi->u132_lock); | 346 | mutex_lock(&ftdi->u132_lock); |
347 | while (ed_number-- > 0) { | 347 | while (ed_number-- > 0) { |
348 | struct u132_target *target = &ftdi->target[ed_number]; | 348 | struct u132_target *target = &ftdi->target[ed_number]; |
349 | if (target->active == 1) { | 349 | if (target->active == 1) { |
350 | target->condition_code = TD_DEVNOTRESP; | 350 | target->condition_code = TD_DEVNOTRESP; |
351 | up(&ftdi->u132_lock); | 351 | mutex_unlock(&ftdi->u132_lock); |
352 | ftdi_elan_do_callback(ftdi, target, NULL, 0); | 352 | ftdi_elan_do_callback(ftdi, target, NULL, 0); |
353 | down(&ftdi->u132_lock); | 353 | mutex_lock(&ftdi->u132_lock); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | ftdi->recieved = 0; | 356 | ftdi->recieved = 0; |
357 | ftdi->expected = 4; | 357 | ftdi->expected = 4; |
358 | ftdi->ed_found = 0; | 358 | ftdi->ed_found = 0; |
359 | up(&ftdi->u132_lock); | 359 | mutex_unlock(&ftdi->u132_lock); |
360 | } | 360 | } |
361 | 361 | ||
362 | static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) | 362 | static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) |
363 | { | 363 | { |
364 | int ed_number = 4; | 364 | int ed_number = 4; |
365 | down(&ftdi->u132_lock); | 365 | mutex_lock(&ftdi->u132_lock); |
366 | while (ed_number-- > 0) { | 366 | while (ed_number-- > 0) { |
367 | struct u132_target *target = &ftdi->target[ed_number]; | 367 | struct u132_target *target = &ftdi->target[ed_number]; |
368 | target->abandoning = 1; | 368 | target->abandoning = 1; |
@@ -382,9 +382,9 @@ static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) | |||
382 | ftdi->command_next += 1; | 382 | ftdi->command_next += 1; |
383 | ftdi_elan_kick_command_queue(ftdi); | 383 | ftdi_elan_kick_command_queue(ftdi); |
384 | } else { | 384 | } else { |
385 | up(&ftdi->u132_lock); | 385 | mutex_unlock(&ftdi->u132_lock); |
386 | msleep(100); | 386 | msleep(100); |
387 | down(&ftdi->u132_lock); | 387 | mutex_lock(&ftdi->u132_lock); |
388 | goto wait_1; | 388 | goto wait_1; |
389 | } | 389 | } |
390 | } | 390 | } |
@@ -404,9 +404,9 @@ static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) | |||
404 | ftdi->command_next += 1; | 404 | ftdi->command_next += 1; |
405 | ftdi_elan_kick_command_queue(ftdi); | 405 | ftdi_elan_kick_command_queue(ftdi); |
406 | } else { | 406 | } else { |
407 | up(&ftdi->u132_lock); | 407 | mutex_unlock(&ftdi->u132_lock); |
408 | msleep(100); | 408 | msleep(100); |
409 | down(&ftdi->u132_lock); | 409 | mutex_lock(&ftdi->u132_lock); |
410 | goto wait_2; | 410 | goto wait_2; |
411 | } | 411 | } |
412 | } | 412 | } |
@@ -414,13 +414,13 @@ static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi) | |||
414 | ftdi->recieved = 0; | 414 | ftdi->recieved = 0; |
415 | ftdi->expected = 4; | 415 | ftdi->expected = 4; |
416 | ftdi->ed_found = 0; | 416 | ftdi->ed_found = 0; |
417 | up(&ftdi->u132_lock); | 417 | mutex_unlock(&ftdi->u132_lock); |
418 | } | 418 | } |
419 | 419 | ||
420 | static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi) | 420 | static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi) |
421 | { | 421 | { |
422 | int ed_number = 4; | 422 | int ed_number = 4; |
423 | down(&ftdi->u132_lock); | 423 | mutex_lock(&ftdi->u132_lock); |
424 | while (ed_number-- > 0) { | 424 | while (ed_number-- > 0) { |
425 | struct u132_target *target = &ftdi->target[ed_number]; | 425 | struct u132_target *target = &ftdi->target[ed_number]; |
426 | target->abandoning = 1; | 426 | target->abandoning = 1; |
@@ -440,9 +440,9 @@ static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi) | |||
440 | ftdi->command_next += 1; | 440 | ftdi->command_next += 1; |
441 | ftdi_elan_kick_command_queue(ftdi); | 441 | ftdi_elan_kick_command_queue(ftdi); |
442 | } else { | 442 | } else { |
443 | up(&ftdi->u132_lock); | 443 | mutex_unlock(&ftdi->u132_lock); |
444 | msleep(100); | 444 | msleep(100); |
445 | down(&ftdi->u132_lock); | 445 | mutex_lock(&ftdi->u132_lock); |
446 | goto wait; | 446 | goto wait; |
447 | } | 447 | } |
448 | } | 448 | } |
@@ -450,7 +450,7 @@ static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi) | |||
450 | ftdi->recieved = 0; | 450 | ftdi->recieved = 0; |
451 | ftdi->expected = 4; | 451 | ftdi->expected = 4; |
452 | ftdi->ed_found = 0; | 452 | ftdi->ed_found = 0; |
453 | up(&ftdi->u132_lock); | 453 | mutex_unlock(&ftdi->u132_lock); |
454 | } | 454 | } |
455 | 455 | ||
456 | static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) | 456 | static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) |
@@ -886,14 +886,14 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi, | |||
886 | char *b) | 886 | char *b) |
887 | { | 887 | { |
888 | int payload = (ed_length >> 0) & 0x07FF; | 888 | int payload = (ed_length >> 0) & 0x07FF; |
889 | down(&ftdi->u132_lock); | 889 | mutex_lock(&ftdi->u132_lock); |
890 | target->actual = 0; | 890 | target->actual = 0; |
891 | target->non_null = (ed_length >> 15) & 0x0001; | 891 | target->non_null = (ed_length >> 15) & 0x0001; |
892 | target->repeat_number = (ed_length >> 11) & 0x000F; | 892 | target->repeat_number = (ed_length >> 11) & 0x000F; |
893 | if (ed_type == 0x02) { | 893 | if (ed_type == 0x02) { |
894 | if (payload == 0 || target->abandoning > 0) { | 894 | if (payload == 0 || target->abandoning > 0) { |
895 | target->abandoning = 0; | 895 | target->abandoning = 0; |
896 | up(&ftdi->u132_lock); | 896 | mutex_unlock(&ftdi->u132_lock); |
897 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, | 897 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, |
898 | payload); | 898 | payload); |
899 | ftdi->recieved = 0; | 899 | ftdi->recieved = 0; |
@@ -903,13 +903,13 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi, | |||
903 | } else { | 903 | } else { |
904 | ftdi->expected = 4 + payload; | 904 | ftdi->expected = 4 + payload; |
905 | ftdi->ed_found = 1; | 905 | ftdi->ed_found = 1; |
906 | up(&ftdi->u132_lock); | 906 | mutex_unlock(&ftdi->u132_lock); |
907 | return b; | 907 | return b; |
908 | } | 908 | } |
909 | } else if (ed_type == 0x03) { | 909 | } else if (ed_type == 0x03) { |
910 | if (payload == 0 || target->abandoning > 0) { | 910 | if (payload == 0 || target->abandoning > 0) { |
911 | target->abandoning = 0; | 911 | target->abandoning = 0; |
912 | up(&ftdi->u132_lock); | 912 | mutex_unlock(&ftdi->u132_lock); |
913 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, | 913 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, |
914 | payload); | 914 | payload); |
915 | ftdi->recieved = 0; | 915 | ftdi->recieved = 0; |
@@ -919,12 +919,12 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi, | |||
919 | } else { | 919 | } else { |
920 | ftdi->expected = 4 + payload; | 920 | ftdi->expected = 4 + payload; |
921 | ftdi->ed_found = 1; | 921 | ftdi->ed_found = 1; |
922 | up(&ftdi->u132_lock); | 922 | mutex_unlock(&ftdi->u132_lock); |
923 | return b; | 923 | return b; |
924 | } | 924 | } |
925 | } else if (ed_type == 0x01) { | 925 | } else if (ed_type == 0x01) { |
926 | target->abandoning = 0; | 926 | target->abandoning = 0; |
927 | up(&ftdi->u132_lock); | 927 | mutex_unlock(&ftdi->u132_lock); |
928 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, | 928 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, |
929 | payload); | 929 | payload); |
930 | ftdi->recieved = 0; | 930 | ftdi->recieved = 0; |
@@ -933,7 +933,7 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi, | |||
933 | return ftdi->response; | 933 | return ftdi->response; |
934 | } else { | 934 | } else { |
935 | target->abandoning = 0; | 935 | target->abandoning = 0; |
936 | up(&ftdi->u132_lock); | 936 | mutex_unlock(&ftdi->u132_lock); |
937 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, | 937 | ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response, |
938 | payload); | 938 | payload); |
939 | ftdi->recieved = 0; | 939 | ftdi->recieved = 0; |
@@ -947,12 +947,12 @@ static char *have_ed_get_response(struct usb_ftdi *ftdi, | |||
947 | struct u132_target *target, u16 ed_length, int ed_number, int ed_type, | 947 | struct u132_target *target, u16 ed_length, int ed_number, int ed_type, |
948 | char *b) | 948 | char *b) |
949 | { | 949 | { |
950 | down(&ftdi->u132_lock); | 950 | mutex_lock(&ftdi->u132_lock); |
951 | target->condition_code = TD_DEVNOTRESP; | 951 | target->condition_code = TD_DEVNOTRESP; |
952 | target->actual = (ed_length >> 0) & 0x01FF; | 952 | target->actual = (ed_length >> 0) & 0x01FF; |
953 | target->non_null = (ed_length >> 15) & 0x0001; | 953 | target->non_null = (ed_length >> 15) & 0x0001; |
954 | target->repeat_number = (ed_length >> 11) & 0x000F; | 954 | target->repeat_number = (ed_length >> 11) & 0x000F; |
955 | up(&ftdi->u132_lock); | 955 | mutex_unlock(&ftdi->u132_lock); |
956 | if (target->active) | 956 | if (target->active) |
957 | ftdi_elan_do_callback(ftdi, target, NULL, 0); | 957 | ftdi_elan_do_callback(ftdi, target, NULL, 0); |
958 | target->abandoning = 0; | 958 | target->abandoning = 0; |
@@ -1278,7 +1278,7 @@ static int ftdi_elan_write_reg(struct usb_ftdi *ftdi, u32 data) | |||
1278 | return -ENODEV; | 1278 | return -ENODEV; |
1279 | } else { | 1279 | } else { |
1280 | int command_size; | 1280 | int command_size; |
1281 | down(&ftdi->u132_lock); | 1281 | mutex_lock(&ftdi->u132_lock); |
1282 | command_size = ftdi->command_next - ftdi->command_head; | 1282 | command_size = ftdi->command_next - ftdi->command_head; |
1283 | if (command_size < COMMAND_SIZE) { | 1283 | if (command_size < COMMAND_SIZE) { |
1284 | struct u132_command *command = &ftdi->command[ | 1284 | struct u132_command *command = &ftdi->command[ |
@@ -1292,10 +1292,10 @@ static int ftdi_elan_write_reg(struct usb_ftdi *ftdi, u32 data) | |||
1292 | command->buffer = &command->value; | 1292 | command->buffer = &command->value; |
1293 | ftdi->command_next += 1; | 1293 | ftdi->command_next += 1; |
1294 | ftdi_elan_kick_command_queue(ftdi); | 1294 | ftdi_elan_kick_command_queue(ftdi); |
1295 | up(&ftdi->u132_lock); | 1295 | mutex_unlock(&ftdi->u132_lock); |
1296 | return 0; | 1296 | return 0; |
1297 | } else { | 1297 | } else { |
1298 | up(&ftdi->u132_lock); | 1298 | mutex_unlock(&ftdi->u132_lock); |
1299 | msleep(100); | 1299 | msleep(100); |
1300 | goto wait; | 1300 | goto wait; |
1301 | } | 1301 | } |
@@ -1310,7 +1310,7 @@ static int ftdi_elan_write_config(struct usb_ftdi *ftdi, int config_offset, | |||
1310 | return -ENODEV; | 1310 | return -ENODEV; |
1311 | } else { | 1311 | } else { |
1312 | int command_size; | 1312 | int command_size; |
1313 | down(&ftdi->u132_lock); | 1313 | mutex_lock(&ftdi->u132_lock); |
1314 | command_size = ftdi->command_next - ftdi->command_head; | 1314 | command_size = ftdi->command_next - ftdi->command_head; |
1315 | if (command_size < COMMAND_SIZE) { | 1315 | if (command_size < COMMAND_SIZE) { |
1316 | struct u132_command *command = &ftdi->command[ | 1316 | struct u132_command *command = &ftdi->command[ |
@@ -1324,10 +1324,10 @@ static int ftdi_elan_write_config(struct usb_ftdi *ftdi, int config_offset, | |||
1324 | command->buffer = &command->value; | 1324 | command->buffer = &command->value; |
1325 | ftdi->command_next += 1; | 1325 | ftdi->command_next += 1; |
1326 | ftdi_elan_kick_command_queue(ftdi); | 1326 | ftdi_elan_kick_command_queue(ftdi); |
1327 | up(&ftdi->u132_lock); | 1327 | mutex_unlock(&ftdi->u132_lock); |
1328 | return 0; | 1328 | return 0; |
1329 | } else { | 1329 | } else { |
1330 | up(&ftdi->u132_lock); | 1330 | mutex_unlock(&ftdi->u132_lock); |
1331 | msleep(100); | 1331 | msleep(100); |
1332 | goto wait; | 1332 | goto wait; |
1333 | } | 1333 | } |
@@ -1342,7 +1342,7 @@ static int ftdi_elan_write_pcimem(struct usb_ftdi *ftdi, int mem_offset, | |||
1342 | return -ENODEV; | 1342 | return -ENODEV; |
1343 | } else { | 1343 | } else { |
1344 | int command_size; | 1344 | int command_size; |
1345 | down(&ftdi->u132_lock); | 1345 | mutex_lock(&ftdi->u132_lock); |
1346 | command_size = ftdi->command_next - ftdi->command_head; | 1346 | command_size = ftdi->command_next - ftdi->command_head; |
1347 | if (command_size < COMMAND_SIZE) { | 1347 | if (command_size < COMMAND_SIZE) { |
1348 | struct u132_command *command = &ftdi->command[ | 1348 | struct u132_command *command = &ftdi->command[ |
@@ -1356,10 +1356,10 @@ static int ftdi_elan_write_pcimem(struct usb_ftdi *ftdi, int mem_offset, | |||
1356 | command->buffer = &command->value; | 1356 | command->buffer = &command->value; |
1357 | ftdi->command_next += 1; | 1357 | ftdi->command_next += 1; |
1358 | ftdi_elan_kick_command_queue(ftdi); | 1358 | ftdi_elan_kick_command_queue(ftdi); |
1359 | up(&ftdi->u132_lock); | 1359 | mutex_unlock(&ftdi->u132_lock); |
1360 | return 0; | 1360 | return 0; |
1361 | } else { | 1361 | } else { |
1362 | up(&ftdi->u132_lock); | 1362 | mutex_unlock(&ftdi->u132_lock); |
1363 | msleep(100); | 1363 | msleep(100); |
1364 | goto wait; | 1364 | goto wait; |
1365 | } | 1365 | } |
@@ -1382,7 +1382,7 @@ static int ftdi_elan_read_reg(struct usb_ftdi *ftdi, u32 *data) | |||
1382 | } else { | 1382 | } else { |
1383 | int command_size; | 1383 | int command_size; |
1384 | int respond_size; | 1384 | int respond_size; |
1385 | down(&ftdi->u132_lock); | 1385 | mutex_lock(&ftdi->u132_lock); |
1386 | command_size = ftdi->command_next - ftdi->command_head; | 1386 | command_size = ftdi->command_next - ftdi->command_head; |
1387 | respond_size = ftdi->respond_next - ftdi->respond_head; | 1387 | respond_size = ftdi->respond_next - ftdi->respond_head; |
1388 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) | 1388 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) |
@@ -1405,11 +1405,11 @@ static int ftdi_elan_read_reg(struct usb_ftdi *ftdi, u32 *data) | |||
1405 | ftdi->command_next += 1; | 1405 | ftdi->command_next += 1; |
1406 | ftdi->respond_next += 1; | 1406 | ftdi->respond_next += 1; |
1407 | ftdi_elan_kick_command_queue(ftdi); | 1407 | ftdi_elan_kick_command_queue(ftdi); |
1408 | up(&ftdi->u132_lock); | 1408 | mutex_unlock(&ftdi->u132_lock); |
1409 | wait_for_completion(&respond->wait_completion); | 1409 | wait_for_completion(&respond->wait_completion); |
1410 | return result; | 1410 | return result; |
1411 | } else { | 1411 | } else { |
1412 | up(&ftdi->u132_lock); | 1412 | mutex_unlock(&ftdi->u132_lock); |
1413 | msleep(100); | 1413 | msleep(100); |
1414 | goto wait; | 1414 | goto wait; |
1415 | } | 1415 | } |
@@ -1425,7 +1425,7 @@ static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset, | |||
1425 | } else { | 1425 | } else { |
1426 | int command_size; | 1426 | int command_size; |
1427 | int respond_size; | 1427 | int respond_size; |
1428 | down(&ftdi->u132_lock); | 1428 | mutex_lock(&ftdi->u132_lock); |
1429 | command_size = ftdi->command_next - ftdi->command_head; | 1429 | command_size = ftdi->command_next - ftdi->command_head; |
1430 | respond_size = ftdi->respond_next - ftdi->respond_head; | 1430 | respond_size = ftdi->respond_next - ftdi->respond_head; |
1431 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) | 1431 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) |
@@ -1449,11 +1449,11 @@ static int ftdi_elan_read_config(struct usb_ftdi *ftdi, int config_offset, | |||
1449 | ftdi->command_next += 1; | 1449 | ftdi->command_next += 1; |
1450 | ftdi->respond_next += 1; | 1450 | ftdi->respond_next += 1; |
1451 | ftdi_elan_kick_command_queue(ftdi); | 1451 | ftdi_elan_kick_command_queue(ftdi); |
1452 | up(&ftdi->u132_lock); | 1452 | mutex_unlock(&ftdi->u132_lock); |
1453 | wait_for_completion(&respond->wait_completion); | 1453 | wait_for_completion(&respond->wait_completion); |
1454 | return result; | 1454 | return result; |
1455 | } else { | 1455 | } else { |
1456 | up(&ftdi->u132_lock); | 1456 | mutex_unlock(&ftdi->u132_lock); |
1457 | msleep(100); | 1457 | msleep(100); |
1458 | goto wait; | 1458 | goto wait; |
1459 | } | 1459 | } |
@@ -1469,7 +1469,7 @@ static int ftdi_elan_read_pcimem(struct usb_ftdi *ftdi, int mem_offset, | |||
1469 | } else { | 1469 | } else { |
1470 | int command_size; | 1470 | int command_size; |
1471 | int respond_size; | 1471 | int respond_size; |
1472 | down(&ftdi->u132_lock); | 1472 | mutex_lock(&ftdi->u132_lock); |
1473 | command_size = ftdi->command_next - ftdi->command_head; | 1473 | command_size = ftdi->command_next - ftdi->command_head; |
1474 | respond_size = ftdi->respond_next - ftdi->respond_head; | 1474 | respond_size = ftdi->respond_next - ftdi->respond_head; |
1475 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) | 1475 | if (command_size < COMMAND_SIZE && respond_size < RESPOND_SIZE) |
@@ -1493,11 +1493,11 @@ static int ftdi_elan_read_pcimem(struct usb_ftdi *ftdi, int mem_offset, | |||
1493 | ftdi->command_next += 1; | 1493 | ftdi->command_next += 1; |
1494 | ftdi->respond_next += 1; | 1494 | ftdi->respond_next += 1; |
1495 | ftdi_elan_kick_command_queue(ftdi); | 1495 | ftdi_elan_kick_command_queue(ftdi); |
1496 | up(&ftdi->u132_lock); | 1496 | mutex_unlock(&ftdi->u132_lock); |
1497 | wait_for_completion(&respond->wait_completion); | 1497 | wait_for_completion(&respond->wait_completion); |
1498 | return result; | 1498 | return result; |
1499 | } else { | 1499 | } else { |
1500 | up(&ftdi->u132_lock); | 1500 | mutex_unlock(&ftdi->u132_lock); |
1501 | msleep(100); | 1501 | msleep(100); |
1502 | goto wait; | 1502 | goto wait; |
1503 | } | 1503 | } |
@@ -1529,7 +1529,7 @@ static int ftdi_elan_edset_setup(struct usb_ftdi *ftdi, u8 ed_number, | |||
1529 | return -ENODEV; | 1529 | return -ENODEV; |
1530 | } else { | 1530 | } else { |
1531 | int command_size; | 1531 | int command_size; |
1532 | down(&ftdi->u132_lock); | 1532 | mutex_lock(&ftdi->u132_lock); |
1533 | command_size = ftdi->command_next - ftdi->command_head; | 1533 | command_size = ftdi->command_next - ftdi->command_head; |
1534 | if (command_size < COMMAND_SIZE) { | 1534 | if (command_size < COMMAND_SIZE) { |
1535 | struct u132_target *target = &ftdi->target[ed]; | 1535 | struct u132_target *target = &ftdi->target[ed]; |
@@ -1550,10 +1550,10 @@ static int ftdi_elan_edset_setup(struct usb_ftdi *ftdi, u8 ed_number, | |||
1550 | target->active = 1; | 1550 | target->active = 1; |
1551 | ftdi->command_next += 1; | 1551 | ftdi->command_next += 1; |
1552 | ftdi_elan_kick_command_queue(ftdi); | 1552 | ftdi_elan_kick_command_queue(ftdi); |
1553 | up(&ftdi->u132_lock); | 1553 | mutex_unlock(&ftdi->u132_lock); |
1554 | return 0; | 1554 | return 0; |
1555 | } else { | 1555 | } else { |
1556 | up(&ftdi->u132_lock); | 1556 | mutex_unlock(&ftdi->u132_lock); |
1557 | msleep(100); | 1557 | msleep(100); |
1558 | goto wait; | 1558 | goto wait; |
1559 | } | 1559 | } |
@@ -1586,7 +1586,7 @@ static int ftdi_elan_edset_input(struct usb_ftdi *ftdi, u8 ed_number, | |||
1586 | return -ENODEV; | 1586 | return -ENODEV; |
1587 | } else { | 1587 | } else { |
1588 | int command_size; | 1588 | int command_size; |
1589 | down(&ftdi->u132_lock); | 1589 | mutex_lock(&ftdi->u132_lock); |
1590 | command_size = ftdi->command_next - ftdi->command_head; | 1590 | command_size = ftdi->command_next - ftdi->command_head; |
1591 | if (command_size < COMMAND_SIZE) { | 1591 | if (command_size < COMMAND_SIZE) { |
1592 | struct u132_target *target = &ftdi->target[ed]; | 1592 | struct u132_target *target = &ftdi->target[ed]; |
@@ -1615,10 +1615,10 @@ static int ftdi_elan_edset_input(struct usb_ftdi *ftdi, u8 ed_number, | |||
1615 | target->active = 1; | 1615 | target->active = 1; |
1616 | ftdi->command_next += 1; | 1616 | ftdi->command_next += 1; |
1617 | ftdi_elan_kick_command_queue(ftdi); | 1617 | ftdi_elan_kick_command_queue(ftdi); |
1618 | up(&ftdi->u132_lock); | 1618 | mutex_unlock(&ftdi->u132_lock); |
1619 | return 0; | 1619 | return 0; |
1620 | } else { | 1620 | } else { |
1621 | up(&ftdi->u132_lock); | 1621 | mutex_unlock(&ftdi->u132_lock); |
1622 | msleep(100); | 1622 | msleep(100); |
1623 | goto wait; | 1623 | goto wait; |
1624 | } | 1624 | } |
@@ -1651,7 +1651,7 @@ static int ftdi_elan_edset_empty(struct usb_ftdi *ftdi, u8 ed_number, | |||
1651 | return -ENODEV; | 1651 | return -ENODEV; |
1652 | } else { | 1652 | } else { |
1653 | int command_size; | 1653 | int command_size; |
1654 | down(&ftdi->u132_lock); | 1654 | mutex_lock(&ftdi->u132_lock); |
1655 | command_size = ftdi->command_next - ftdi->command_head; | 1655 | command_size = ftdi->command_next - ftdi->command_head; |
1656 | if (command_size < COMMAND_SIZE) { | 1656 | if (command_size < COMMAND_SIZE) { |
1657 | struct u132_target *target = &ftdi->target[ed]; | 1657 | struct u132_target *target = &ftdi->target[ed]; |
@@ -1672,10 +1672,10 @@ static int ftdi_elan_edset_empty(struct usb_ftdi *ftdi, u8 ed_number, | |||
1672 | target->active = 1; | 1672 | target->active = 1; |
1673 | ftdi->command_next += 1; | 1673 | ftdi->command_next += 1; |
1674 | ftdi_elan_kick_command_queue(ftdi); | 1674 | ftdi_elan_kick_command_queue(ftdi); |
1675 | up(&ftdi->u132_lock); | 1675 | mutex_unlock(&ftdi->u132_lock); |
1676 | return 0; | 1676 | return 0; |
1677 | } else { | 1677 | } else { |
1678 | up(&ftdi->u132_lock); | 1678 | mutex_unlock(&ftdi->u132_lock); |
1679 | msleep(100); | 1679 | msleep(100); |
1680 | goto wait; | 1680 | goto wait; |
1681 | } | 1681 | } |
@@ -1708,7 +1708,7 @@ static int ftdi_elan_edset_output(struct usb_ftdi *ftdi, u8 ed_number, | |||
1708 | return -ENODEV; | 1708 | return -ENODEV; |
1709 | } else { | 1709 | } else { |
1710 | int command_size; | 1710 | int command_size; |
1711 | down(&ftdi->u132_lock); | 1711 | mutex_lock(&ftdi->u132_lock); |
1712 | command_size = ftdi->command_next - ftdi->command_head; | 1712 | command_size = ftdi->command_next - ftdi->command_head; |
1713 | if (command_size < COMMAND_SIZE) { | 1713 | if (command_size < COMMAND_SIZE) { |
1714 | u8 *b; | 1714 | u8 *b; |
@@ -1751,10 +1751,10 @@ static int ftdi_elan_edset_output(struct usb_ftdi *ftdi, u8 ed_number, | |||
1751 | target->active = 1; | 1751 | target->active = 1; |
1752 | ftdi->command_next += 1; | 1752 | ftdi->command_next += 1; |
1753 | ftdi_elan_kick_command_queue(ftdi); | 1753 | ftdi_elan_kick_command_queue(ftdi); |
1754 | up(&ftdi->u132_lock); | 1754 | mutex_unlock(&ftdi->u132_lock); |
1755 | return 0; | 1755 | return 0; |
1756 | } else { | 1756 | } else { |
1757 | up(&ftdi->u132_lock); | 1757 | mutex_unlock(&ftdi->u132_lock); |
1758 | msleep(100); | 1758 | msleep(100); |
1759 | goto wait; | 1759 | goto wait; |
1760 | } | 1760 | } |
@@ -1787,7 +1787,7 @@ static int ftdi_elan_edset_single(struct usb_ftdi *ftdi, u8 ed_number, | |||
1787 | return -ENODEV; | 1787 | return -ENODEV; |
1788 | } else { | 1788 | } else { |
1789 | int command_size; | 1789 | int command_size; |
1790 | down(&ftdi->u132_lock); | 1790 | mutex_lock(&ftdi->u132_lock); |
1791 | command_size = ftdi->command_next - ftdi->command_head; | 1791 | command_size = ftdi->command_next - ftdi->command_head; |
1792 | if (command_size < COMMAND_SIZE) { | 1792 | if (command_size < COMMAND_SIZE) { |
1793 | int remaining_length = urb->transfer_buffer_length - | 1793 | int remaining_length = urb->transfer_buffer_length - |
@@ -1816,10 +1816,10 @@ static int ftdi_elan_edset_single(struct usb_ftdi *ftdi, u8 ed_number, | |||
1816 | target->active = 1; | 1816 | target->active = 1; |
1817 | ftdi->command_next += 1; | 1817 | ftdi->command_next += 1; |
1818 | ftdi_elan_kick_command_queue(ftdi); | 1818 | ftdi_elan_kick_command_queue(ftdi); |
1819 | up(&ftdi->u132_lock); | 1819 | mutex_unlock(&ftdi->u132_lock); |
1820 | return 0; | 1820 | return 0; |
1821 | } else { | 1821 | } else { |
1822 | up(&ftdi->u132_lock); | 1822 | mutex_unlock(&ftdi->u132_lock); |
1823 | msleep(100); | 1823 | msleep(100); |
1824 | goto wait; | 1824 | goto wait; |
1825 | } | 1825 | } |
@@ -1849,9 +1849,9 @@ static int ftdi_elan_edset_flush(struct usb_ftdi *ftdi, u8 ed_number, | |||
1849 | return -ENODEV; | 1849 | return -ENODEV; |
1850 | } else { | 1850 | } else { |
1851 | struct u132_target *target = &ftdi->target[ed]; | 1851 | struct u132_target *target = &ftdi->target[ed]; |
1852 | down(&ftdi->u132_lock); | 1852 | mutex_lock(&ftdi->u132_lock); |
1853 | if (target->abandoning > 0) { | 1853 | if (target->abandoning > 0) { |
1854 | up(&ftdi->u132_lock); | 1854 | mutex_unlock(&ftdi->u132_lock); |
1855 | return 0; | 1855 | return 0; |
1856 | } else { | 1856 | } else { |
1857 | target->abandoning = 1; | 1857 | target->abandoning = 1; |
@@ -1873,13 +1873,13 @@ static int ftdi_elan_edset_flush(struct usb_ftdi *ftdi, u8 ed_number, | |||
1873 | ftdi->command_next += 1; | 1873 | ftdi->command_next += 1; |
1874 | ftdi_elan_kick_command_queue(ftdi); | 1874 | ftdi_elan_kick_command_queue(ftdi); |
1875 | } else { | 1875 | } else { |
1876 | up(&ftdi->u132_lock); | 1876 | mutex_unlock(&ftdi->u132_lock); |
1877 | msleep(100); | 1877 | msleep(100); |
1878 | down(&ftdi->u132_lock); | 1878 | mutex_lock(&ftdi->u132_lock); |
1879 | goto wait_1; | 1879 | goto wait_1; |
1880 | } | 1880 | } |
1881 | } | 1881 | } |
1882 | up(&ftdi->u132_lock); | 1882 | mutex_unlock(&ftdi->u132_lock); |
1883 | return 0; | 1883 | return 0; |
1884 | } | 1884 | } |
1885 | } | 1885 | } |
@@ -2793,7 +2793,7 @@ static int ftdi_elan_probe(struct usb_interface *interface, | |||
2793 | init_MUTEX(&ftdi->sw_lock); | 2793 | init_MUTEX(&ftdi->sw_lock); |
2794 | ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); | 2794 | ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); |
2795 | ftdi->interface = interface; | 2795 | ftdi->interface = interface; |
2796 | init_MUTEX(&ftdi->u132_lock); | 2796 | mutex_init(&ftdi->u132_lock); |
2797 | ftdi->expected = 4; | 2797 | ftdi->expected = 4; |
2798 | iface_desc = interface->cur_altsetting; | 2798 | iface_desc = interface->cur_altsetting; |
2799 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { | 2799 | for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { |
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index e6fd024024f5..4bcf7fb4e5da 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c | |||
@@ -66,6 +66,7 @@ static struct usb_device_id idmouse_table[] = { | |||
66 | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000) | 66 | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000) |
67 | 67 | ||
68 | MODULE_DEVICE_TABLE(usb, idmouse_table); | 68 | MODULE_DEVICE_TABLE(usb, idmouse_table); |
69 | static DEFINE_MUTEX(open_disc_mutex); | ||
69 | 70 | ||
70 | /* structure to hold all of our device specific stuff */ | 71 | /* structure to hold all of our device specific stuff */ |
71 | struct usb_idmouse { | 72 | struct usb_idmouse { |
@@ -80,7 +81,7 @@ struct usb_idmouse { | |||
80 | 81 | ||
81 | int open; /* if the port is open or not */ | 82 | int open; /* if the port is open or not */ |
82 | int present; /* if the device is not disconnected */ | 83 | int present; /* if the device is not disconnected */ |
83 | struct semaphore sem; /* locks this structure */ | 84 | struct mutex lock; /* locks this structure */ |
84 | 85 | ||
85 | }; | 86 | }; |
86 | 87 | ||
@@ -213,13 +214,17 @@ static int idmouse_open(struct inode *inode, struct file *file) | |||
213 | if (!interface) | 214 | if (!interface) |
214 | return -ENODEV; | 215 | return -ENODEV; |
215 | 216 | ||
217 | mutex_lock(&open_disc_mutex); | ||
216 | /* get the device information block from the interface */ | 218 | /* get the device information block from the interface */ |
217 | dev = usb_get_intfdata(interface); | 219 | dev = usb_get_intfdata(interface); |
218 | if (!dev) | 220 | if (!dev) { |
221 | mutex_unlock(&open_disc_mutex); | ||
219 | return -ENODEV; | 222 | return -ENODEV; |
223 | } | ||
220 | 224 | ||
221 | /* lock this device */ | 225 | /* lock this device */ |
222 | down(&dev->sem); | 226 | mutex_lock(&dev->lock); |
227 | mutex_unlock(&open_disc_mutex); | ||
223 | 228 | ||
224 | /* check if already open */ | 229 | /* check if already open */ |
225 | if (dev->open) { | 230 | if (dev->open) { |
@@ -245,7 +250,7 @@ static int idmouse_open(struct inode *inode, struct file *file) | |||
245 | error: | 250 | error: |
246 | 251 | ||
247 | /* unlock this device */ | 252 | /* unlock this device */ |
248 | up(&dev->sem); | 253 | mutex_unlock(&dev->lock); |
249 | return result; | 254 | return result; |
250 | } | 255 | } |
251 | 256 | ||
@@ -258,12 +263,14 @@ static int idmouse_release(struct inode *inode, struct file *file) | |||
258 | if (dev == NULL) | 263 | if (dev == NULL) |
259 | return -ENODEV; | 264 | return -ENODEV; |
260 | 265 | ||
266 | mutex_lock(&open_disc_mutex); | ||
261 | /* lock our device */ | 267 | /* lock our device */ |
262 | down(&dev->sem); | 268 | mutex_lock(&dev->lock); |
263 | 269 | ||
264 | /* are we really open? */ | 270 | /* are we really open? */ |
265 | if (dev->open <= 0) { | 271 | if (dev->open <= 0) { |
266 | up(&dev->sem); | 272 | mutex_unlock(&dev->lock); |
273 | mutex_unlock(&open_disc_mutex); | ||
267 | return -ENODEV; | 274 | return -ENODEV; |
268 | } | 275 | } |
269 | 276 | ||
@@ -271,10 +278,12 @@ static int idmouse_release(struct inode *inode, struct file *file) | |||
271 | 278 | ||
272 | if (!dev->present) { | 279 | if (!dev->present) { |
273 | /* the device was unplugged before the file was released */ | 280 | /* the device was unplugged before the file was released */ |
274 | up(&dev->sem); | 281 | mutex_unlock(&dev->lock); |
282 | mutex_unlock(&open_disc_mutex); | ||
275 | idmouse_delete(dev); | 283 | idmouse_delete(dev); |
276 | } else { | 284 | } else { |
277 | up(&dev->sem); | 285 | mutex_unlock(&dev->lock); |
286 | mutex_unlock(&open_disc_mutex); | ||
278 | } | 287 | } |
279 | return 0; | 288 | return 0; |
280 | } | 289 | } |
@@ -286,18 +295,18 @@ static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count | |||
286 | int result; | 295 | int result; |
287 | 296 | ||
288 | /* lock this object */ | 297 | /* lock this object */ |
289 | down(&dev->sem); | 298 | mutex_lock(&dev->lock); |
290 | 299 | ||
291 | /* verify that the device wasn't unplugged */ | 300 | /* verify that the device wasn't unplugged */ |
292 | if (!dev->present) { | 301 | if (!dev->present) { |
293 | up(&dev->sem); | 302 | mutex_unlock(&dev->lock); |
294 | return -ENODEV; | 303 | return -ENODEV; |
295 | } | 304 | } |
296 | 305 | ||
297 | result = simple_read_from_buffer(buffer, count, ppos, | 306 | result = simple_read_from_buffer(buffer, count, ppos, |
298 | dev->bulk_in_buffer, IMGSIZE); | 307 | dev->bulk_in_buffer, IMGSIZE); |
299 | /* unlock the device */ | 308 | /* unlock the device */ |
300 | up(&dev->sem); | 309 | mutex_unlock(&dev->lock); |
301 | return result; | 310 | return result; |
302 | } | 311 | } |
303 | 312 | ||
@@ -320,7 +329,7 @@ static int idmouse_probe(struct usb_interface *interface, | |||
320 | if (dev == NULL) | 329 | if (dev == NULL) |
321 | return -ENOMEM; | 330 | return -ENOMEM; |
322 | 331 | ||
323 | init_MUTEX(&dev->sem); | 332 | mutex_init(&dev->lock); |
324 | dev->udev = udev; | 333 | dev->udev = udev; |
325 | dev->interface = interface; | 334 | dev->interface = interface; |
326 | 335 | ||
@@ -372,24 +381,26 @@ static void idmouse_disconnect(struct usb_interface *interface) | |||
372 | 381 | ||
373 | /* get device structure */ | 382 | /* get device structure */ |
374 | dev = usb_get_intfdata(interface); | 383 | dev = usb_get_intfdata(interface); |
375 | usb_set_intfdata(interface, NULL); | ||
376 | 384 | ||
377 | /* give back our minor */ | 385 | /* give back our minor */ |
378 | usb_deregister_dev(interface, &idmouse_class); | 386 | usb_deregister_dev(interface, &idmouse_class); |
379 | 387 | ||
380 | /* lock it */ | 388 | mutex_lock(&open_disc_mutex); |
381 | down(&dev->sem); | 389 | usb_set_intfdata(interface, NULL); |
390 | /* lock the device */ | ||
391 | mutex_lock(&dev->lock); | ||
392 | mutex_unlock(&open_disc_mutex); | ||
382 | 393 | ||
383 | /* prevent device read, write and ioctl */ | 394 | /* prevent device read, write and ioctl */ |
384 | dev->present = 0; | 395 | dev->present = 0; |
385 | 396 | ||
386 | /* if the device is opened, idmouse_release will clean this up */ | 397 | /* if the device is opened, idmouse_release will clean this up */ |
387 | if (!dev->open) { | 398 | if (!dev->open) { |
388 | up(&dev->sem); | 399 | mutex_unlock(&dev->lock); |
389 | idmouse_delete(dev); | 400 | idmouse_delete(dev); |
390 | } else { | 401 | } else { |
391 | /* unlock */ | 402 | /* unlock */ |
392 | up(&dev->sem); | 403 | mutex_unlock(&dev->lock); |
393 | } | 404 | } |
394 | 405 | ||
395 | info("%s disconnected", DRIVER_DESC); | 406 | info("%s disconnected", DRIVER_DESC); |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index d372fbc4effb..764696ff1e8e 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
@@ -66,6 +66,7 @@ module_param(debug, bool, 0644); | |||
66 | MODULE_PARM_DESC(debug, "debug=1 enables debugging messages"); | 66 | MODULE_PARM_DESC(debug, "debug=1 enables debugging messages"); |
67 | 67 | ||
68 | static struct usb_driver iowarrior_driver; | 68 | static struct usb_driver iowarrior_driver; |
69 | static DEFINE_MUTEX(iowarrior_open_disc_lock); | ||
69 | 70 | ||
70 | /*--------------*/ | 71 | /*--------------*/ |
71 | /* data */ | 72 | /* data */ |
@@ -351,7 +352,7 @@ static ssize_t iowarrior_write(struct file *file, | |||
351 | 352 | ||
352 | mutex_lock(&dev->mutex); | 353 | mutex_lock(&dev->mutex); |
353 | /* verify that the device wasn't unplugged */ | 354 | /* verify that the device wasn't unplugged */ |
354 | if (dev == NULL || !dev->present) { | 355 | if (!dev->present) { |
355 | retval = -ENODEV; | 356 | retval = -ENODEV; |
356 | goto exit; | 357 | goto exit; |
357 | } | 358 | } |
@@ -608,11 +609,15 @@ static int iowarrior_open(struct inode *inode, struct file *file) | |||
608 | return -ENODEV; | 609 | return -ENODEV; |
609 | } | 610 | } |
610 | 611 | ||
612 | mutex_lock(&iowarrior_open_disc_lock); | ||
611 | dev = usb_get_intfdata(interface); | 613 | dev = usb_get_intfdata(interface); |
612 | if (!dev) | 614 | if (!dev) { |
615 | mutex_unlock(&iowarrior_open_disc_lock); | ||
613 | return -ENODEV; | 616 | return -ENODEV; |
617 | } | ||
614 | 618 | ||
615 | mutex_lock(&dev->mutex); | 619 | mutex_lock(&dev->mutex); |
620 | mutex_unlock(&iowarrior_open_disc_lock); | ||
616 | 621 | ||
617 | /* Only one process can open each device, no sharing. */ | 622 | /* Only one process can open each device, no sharing. */ |
618 | if (dev->opened) { | 623 | if (dev->opened) { |
@@ -866,6 +871,7 @@ static void iowarrior_disconnect(struct usb_interface *interface) | |||
866 | int minor; | 871 | int minor; |
867 | 872 | ||
868 | dev = usb_get_intfdata(interface); | 873 | dev = usb_get_intfdata(interface); |
874 | mutex_lock(&iowarrior_open_disc_lock); | ||
869 | usb_set_intfdata(interface, NULL); | 875 | usb_set_intfdata(interface, NULL); |
870 | 876 | ||
871 | minor = dev->minor; | 877 | minor = dev->minor; |
@@ -879,6 +885,7 @@ static void iowarrior_disconnect(struct usb_interface *interface) | |||
879 | dev->present = 0; | 885 | dev->present = 0; |
880 | 886 | ||
881 | mutex_unlock(&dev->mutex); | 887 | mutex_unlock(&dev->mutex); |
888 | mutex_unlock(&iowarrior_open_disc_lock); | ||
882 | 889 | ||
883 | if (dev->opened) { | 890 | if (dev->opened) { |
884 | /* There is a process that holds a filedescriptor to the device , | 891 | /* There is a process that holds a filedescriptor to the device , |
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 561970b889a5..aab320085ebf 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c | |||
@@ -198,6 +198,7 @@ static struct usb_device_id tower_table [] = { | |||
198 | }; | 198 | }; |
199 | 199 | ||
200 | MODULE_DEVICE_TABLE (usb, tower_table); | 200 | MODULE_DEVICE_TABLE (usb, tower_table); |
201 | static DEFINE_MUTEX(open_disc_mutex); | ||
201 | 202 | ||
202 | #define LEGO_USB_TOWER_MINOR_BASE 160 | 203 | #define LEGO_USB_TOWER_MINOR_BASE 160 |
203 | 204 | ||
@@ -350,25 +351,31 @@ static int tower_open (struct inode *inode, struct file *file) | |||
350 | goto exit; | 351 | goto exit; |
351 | } | 352 | } |
352 | 353 | ||
354 | mutex_lock(&open_disc_mutex); | ||
353 | dev = usb_get_intfdata(interface); | 355 | dev = usb_get_intfdata(interface); |
354 | 356 | ||
355 | if (!dev) { | 357 | if (!dev) { |
358 | mutex_unlock(&open_disc_mutex); | ||
356 | retval = -ENODEV; | 359 | retval = -ENODEV; |
357 | goto exit; | 360 | goto exit; |
358 | } | 361 | } |
359 | 362 | ||
360 | /* lock this device */ | 363 | /* lock this device */ |
361 | if (down_interruptible (&dev->sem)) { | 364 | if (down_interruptible (&dev->sem)) { |
365 | mutex_unlock(&open_disc_mutex); | ||
362 | retval = -ERESTARTSYS; | 366 | retval = -ERESTARTSYS; |
363 | goto exit; | 367 | goto exit; |
364 | } | 368 | } |
365 | 369 | ||
370 | |||
366 | /* allow opening only once */ | 371 | /* allow opening only once */ |
367 | if (dev->open_count) { | 372 | if (dev->open_count) { |
373 | mutex_unlock(&open_disc_mutex); | ||
368 | retval = -EBUSY; | 374 | retval = -EBUSY; |
369 | goto unlock_exit; | 375 | goto unlock_exit; |
370 | } | 376 | } |
371 | dev->open_count = 1; | 377 | dev->open_count = 1; |
378 | mutex_unlock(&open_disc_mutex); | ||
372 | 379 | ||
373 | /* reset the tower */ | 380 | /* reset the tower */ |
374 | result = usb_control_msg (dev->udev, | 381 | result = usb_control_msg (dev->udev, |
@@ -437,9 +444,10 @@ static int tower_release (struct inode *inode, struct file *file) | |||
437 | if (dev == NULL) { | 444 | if (dev == NULL) { |
438 | dbg(1, "%s: object is NULL", __FUNCTION__); | 445 | dbg(1, "%s: object is NULL", __FUNCTION__); |
439 | retval = -ENODEV; | 446 | retval = -ENODEV; |
440 | goto exit; | 447 | goto exit_nolock; |
441 | } | 448 | } |
442 | 449 | ||
450 | mutex_lock(&open_disc_mutex); | ||
443 | if (down_interruptible (&dev->sem)) { | 451 | if (down_interruptible (&dev->sem)) { |
444 | retval = -ERESTARTSYS; | 452 | retval = -ERESTARTSYS; |
445 | goto exit; | 453 | goto exit; |
@@ -468,6 +476,8 @@ unlock_exit: | |||
468 | up (&dev->sem); | 476 | up (&dev->sem); |
469 | 477 | ||
470 | exit: | 478 | exit: |
479 | mutex_unlock(&open_disc_mutex); | ||
480 | exit_nolock: | ||
471 | dbg(2, "%s: leave, return value %d", __FUNCTION__, retval); | 481 | dbg(2, "%s: leave, return value %d", __FUNCTION__, retval); |
472 | return retval; | 482 | return retval; |
473 | } | 483 | } |
@@ -989,6 +999,7 @@ static void tower_disconnect (struct usb_interface *interface) | |||
989 | dbg(2, "%s: enter", __FUNCTION__); | 999 | dbg(2, "%s: enter", __FUNCTION__); |
990 | 1000 | ||
991 | dev = usb_get_intfdata (interface); | 1001 | dev = usb_get_intfdata (interface); |
1002 | mutex_lock(&open_disc_mutex); | ||
992 | usb_set_intfdata (interface, NULL); | 1003 | usb_set_intfdata (interface, NULL); |
993 | 1004 | ||
994 | minor = dev->minor; | 1005 | minor = dev->minor; |
@@ -997,6 +1008,7 @@ static void tower_disconnect (struct usb_interface *interface) | |||
997 | usb_deregister_dev (interface, &tower_class); | 1008 | usb_deregister_dev (interface, &tower_class); |
998 | 1009 | ||
999 | down (&dev->sem); | 1010 | down (&dev->sem); |
1011 | mutex_unlock(&open_disc_mutex); | ||
1000 | 1012 | ||
1001 | /* if the device is not opened, then we clean up right now */ | 1013 | /* if the device is not opened, then we clean up right now */ |
1002 | if (!dev->open_count) { | 1014 | if (!dev->open_count) { |
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index 88f6abe73624..330c18e390b8 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c | |||
@@ -118,10 +118,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd, | |||
118 | 118 | ||
119 | mutex_lock(&(rio->lock)); | 119 | mutex_lock(&(rio->lock)); |
120 | /* Sanity check to make sure rio is connected, powered, etc */ | 120 | /* Sanity check to make sure rio is connected, powered, etc */ |
121 | if ( rio == NULL || | 121 | if (rio->present == 0 || rio->rio_dev == NULL) { |
122 | rio->present == 0 || | ||
123 | rio->rio_dev == NULL ) | ||
124 | { | ||
125 | retval = -ENODEV; | 122 | retval = -ENODEV; |
126 | goto err_out; | 123 | goto err_out; |
127 | } | 124 | } |
@@ -280,10 +277,7 @@ write_rio(struct file *file, const char __user *buffer, | |||
280 | if (intr) | 277 | if (intr) |
281 | return -EINTR; | 278 | return -EINTR; |
282 | /* Sanity check to make sure rio is connected, powered, etc */ | 279 | /* Sanity check to make sure rio is connected, powered, etc */ |
283 | if ( rio == NULL || | 280 | if (rio->present == 0 || rio->rio_dev == NULL) { |
284 | rio->present == 0 || | ||
285 | rio->rio_dev == NULL ) | ||
286 | { | ||
287 | mutex_unlock(&(rio->lock)); | 281 | mutex_unlock(&(rio->lock)); |
288 | return -ENODEV; | 282 | return -ENODEV; |
289 | } | 283 | } |
@@ -369,10 +363,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos) | |||
369 | if (intr) | 363 | if (intr) |
370 | return -EINTR; | 364 | return -EINTR; |
371 | /* Sanity check to make sure rio is connected, powered, etc */ | 365 | /* Sanity check to make sure rio is connected, powered, etc */ |
372 | if ( rio == NULL || | 366 | if (rio->present == 0 || rio->rio_dev == NULL) { |
373 | rio->present == 0 || | ||
374 | rio->rio_dev == NULL ) | ||
375 | { | ||
376 | mutex_unlock(&(rio->lock)); | 367 | mutex_unlock(&(rio->lock)); |
377 | return -ENODEV; | 368 | return -ENODEV; |
378 | } | 369 | } |
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 719842032712..20777d01db62 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/mutex.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <linux/usb.h> | 22 | #include <linux/usb.h> |
22 | 23 | ||
@@ -34,6 +35,8 @@ static struct usb_device_id id_table [] = { | |||
34 | }; | 35 | }; |
35 | MODULE_DEVICE_TABLE (usb, id_table); | 36 | MODULE_DEVICE_TABLE (usb, id_table); |
36 | 37 | ||
38 | static DEFINE_MUTEX(open_disc_mutex); | ||
39 | |||
37 | 40 | ||
38 | struct usb_lcd { | 41 | struct usb_lcd { |
39 | struct usb_device * udev; /* init: probe_lcd */ | 42 | struct usb_device * udev; /* init: probe_lcd */ |
@@ -79,12 +82,16 @@ static int lcd_open(struct inode *inode, struct file *file) | |||
79 | return -ENODEV; | 82 | return -ENODEV; |
80 | } | 83 | } |
81 | 84 | ||
85 | mutex_lock(&open_disc_mutex); | ||
82 | dev = usb_get_intfdata(interface); | 86 | dev = usb_get_intfdata(interface); |
83 | if (!dev) | 87 | if (!dev) { |
88 | mutex_unlock(&open_disc_mutex); | ||
84 | return -ENODEV; | 89 | return -ENODEV; |
90 | } | ||
85 | 91 | ||
86 | /* increment our usage count for the device */ | 92 | /* increment our usage count for the device */ |
87 | kref_get(&dev->kref); | 93 | kref_get(&dev->kref); |
94 | mutex_unlock(&open_disc_mutex); | ||
88 | 95 | ||
89 | /* grab a power reference */ | 96 | /* grab a power reference */ |
90 | r = usb_autopm_get_interface(interface); | 97 | r = usb_autopm_get_interface(interface); |
@@ -393,8 +400,10 @@ static void lcd_disconnect(struct usb_interface *interface) | |||
393 | struct usb_lcd *dev; | 400 | struct usb_lcd *dev; |
394 | int minor = interface->minor; | 401 | int minor = interface->minor; |
395 | 402 | ||
403 | mutex_lock(&open_disc_mutex); | ||
396 | dev = usb_get_intfdata(interface); | 404 | dev = usb_get_intfdata(interface); |
397 | usb_set_intfdata(interface, NULL); | 405 | usb_set_intfdata(interface, NULL); |
406 | mutex_unlock(&open_disc_mutex); | ||
398 | 407 | ||
399 | /* give back our minor */ | 408 | /* give back our minor */ |
400 | usb_deregister_dev(interface, &lcd_class); | 409 | usb_deregister_dev(interface, &lcd_class); |
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 2a8e537cb046..ddfee918000d 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c | |||
@@ -161,7 +161,8 @@ static void ark3116_set_termios(struct usb_serial_port *port, | |||
161 | { | 161 | { |
162 | struct usb_serial *serial = port->serial; | 162 | struct usb_serial *serial = port->serial; |
163 | struct ark3116_private *priv = usb_get_serial_port_data(port); | 163 | struct ark3116_private *priv = usb_get_serial_port_data(port); |
164 | unsigned int cflag = port->tty->termios->c_cflag; | 164 | struct ktermios *termios = port->tty->termios; |
165 | unsigned int cflag = termios->c_cflag; | ||
165 | unsigned long flags; | 166 | unsigned long flags; |
166 | int baud; | 167 | int baud; |
167 | int ark3116_baud; | 168 | int ark3116_baud; |
@@ -177,11 +178,14 @@ static void ark3116_set_termios(struct usb_serial_port *port, | |||
177 | *(port->tty->termios) = tty_std_termios; | 178 | *(port->tty->termios) = tty_std_termios; |
178 | port->tty->termios->c_cflag = B9600 | CS8 | 179 | port->tty->termios->c_cflag = B9600 | CS8 |
179 | | CREAD | HUPCL | CLOCAL; | 180 | | CREAD | HUPCL | CLOCAL; |
181 | termios->c_ispeed = 9600; | ||
182 | termios->c_ospeed = 9600; | ||
180 | priv->termios_initialized = 1; | 183 | priv->termios_initialized = 1; |
181 | } | 184 | } |
182 | spin_unlock_irqrestore(&priv->lock, flags); | 185 | spin_unlock_irqrestore(&priv->lock, flags); |
183 | 186 | ||
184 | cflag = port->tty->termios->c_cflag; | 187 | cflag = termios->c_cflag; |
188 | termios->c_cflag &= ~(CMSPAR|CRTSCTS); | ||
185 | 189 | ||
186 | buf = kmalloc(1, GFP_KERNEL); | 190 | buf = kmalloc(1, GFP_KERNEL); |
187 | if (!buf) { | 191 | if (!buf) { |
@@ -254,9 +258,13 @@ static void ark3116_set_termios(struct usb_serial_port *port, | |||
254 | case 115200: | 258 | case 115200: |
255 | case 230400: | 259 | case 230400: |
256 | case 460800: | 260 | case 460800: |
261 | /* Report the resulting rate back to the caller */ | ||
262 | tty_encode_baud_rate(port->tty, baud, baud); | ||
257 | break; | 263 | break; |
258 | /* set 9600 as default (if given baudrate is invalid for example) */ | 264 | /* set 9600 as default (if given baudrate is invalid for example) */ |
259 | default: | 265 | default: |
266 | tty_encode_baud_rate(port->tty, 9600, 9600); | ||
267 | case 0: | ||
260 | baud = 9600; | 268 | baud = 9600; |
261 | } | 269 | } |
262 | 270 | ||
@@ -302,6 +310,7 @@ static void ark3116_set_termios(struct usb_serial_port *port, | |||
302 | /* TEST ARK3116_SND(154, 0xFE, 0x40, 0xFFFF, 0x0006); */ | 310 | /* TEST ARK3116_SND(154, 0xFE, 0x40, 0xFFFF, 0x0006); */ |
303 | 311 | ||
304 | kfree(buf); | 312 | kfree(buf); |
313 | |||
305 | return; | 314 | return; |
306 | } | 315 | } |
307 | 316 | ||
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 6b252ceb39a8..42582d49b69c 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
@@ -272,9 +272,6 @@ static void ch341_set_termios(struct usb_serial_port *port, | |||
272 | 272 | ||
273 | dbg("ch341_set_termios()"); | 273 | dbg("ch341_set_termios()"); |
274 | 274 | ||
275 | if (!tty || !tty->termios) | ||
276 | return; | ||
277 | |||
278 | baud_rate = tty_get_baud_rate(tty); | 275 | baud_rate = tty_get_baud_rate(tty); |
279 | 276 | ||
280 | switch (baud_rate) { | 277 | switch (baud_rate) { |
@@ -299,6 +296,11 @@ static void ch341_set_termios(struct usb_serial_port *port, | |||
299 | * (cflag & PARENB) : parity {NONE, EVEN, ODD} | 296 | * (cflag & PARENB) : parity {NONE, EVEN, ODD} |
300 | * (cflag & CSTOPB) : stop bits [1, 2] | 297 | * (cflag & CSTOPB) : stop bits [1, 2] |
301 | */ | 298 | */ |
299 | |||
300 | /* Copy back the old hardware settings */ | ||
301 | tty_termios_copy_hw(tty->termios, old_termios); | ||
302 | /* And re-encode with the new baud */ | ||
303 | tty_encode_baud_rate(tty, baud_rate, baud_rate); | ||
302 | } | 304 | } |
303 | 305 | ||
304 | static struct usb_driver ch341_driver = { | 306 | static struct usb_driver ch341_driver = { |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 9386e216d681..0362654d3b52 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
@@ -164,6 +164,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | if (serial->type->set_termios) { | 166 | if (serial->type->set_termios) { |
167 | struct ktermios dummy; | ||
167 | /* build up a fake tty structure so that the open call has something | 168 | /* build up a fake tty structure so that the open call has something |
168 | * to look at to get the cflag value */ | 169 | * to look at to get the cflag value */ |
169 | tty = kzalloc(sizeof(*tty), GFP_KERNEL); | 170 | tty = kzalloc(sizeof(*tty), GFP_KERNEL); |
@@ -177,12 +178,13 @@ static int usb_console_setup(struct console *co, char *options) | |||
177 | kfree (tty); | 178 | kfree (tty); |
178 | return -ENOMEM; | 179 | return -ENOMEM; |
179 | } | 180 | } |
181 | memset(&dummy, 0, sizeof(struct ktermios)); | ||
180 | termios->c_cflag = cflag; | 182 | termios->c_cflag = cflag; |
181 | tty->termios = termios; | 183 | tty->termios = termios; |
182 | port->tty = tty; | 184 | port->tty = tty; |
183 | 185 | ||
184 | /* set up the initial termios settings */ | 186 | /* set up the initial termios settings */ |
185 | serial->type->set_termios(port, NULL); | 187 | serial->type->set_termios(port, &dummy); |
186 | port->tty = NULL; | 188 | port->tty = NULL; |
187 | kfree (termios); | 189 | kfree (termios); |
188 | kfree (tty); | 190 | kfree (tty); |
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c index eb7df1835c11..3a83cb4c4bc2 100644 --- a/drivers/usb/serial/cp2101.c +++ b/drivers/usb/serial/cp2101.c | |||
@@ -361,7 +361,6 @@ static void cp2101_get_termios (struct usb_serial_port *port) | |||
361 | dbg("%s - no tty structures", __FUNCTION__); | 361 | dbg("%s - no tty structures", __FUNCTION__); |
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | cflag = port->tty->termios->c_cflag; | ||
365 | 364 | ||
366 | cp2101_get_config(port, CP2101_BAUDRATE, &baud, 2); | 365 | cp2101_get_config(port, CP2101_BAUDRATE, &baud, 2); |
367 | /* Convert to baudrate */ | 366 | /* Convert to baudrate */ |
@@ -369,40 +368,9 @@ static void cp2101_get_termios (struct usb_serial_port *port) | |||
369 | baud = BAUD_RATE_GEN_FREQ / baud; | 368 | baud = BAUD_RATE_GEN_FREQ / baud; |
370 | 369 | ||
371 | dbg("%s - baud rate = %d", __FUNCTION__, baud); | 370 | dbg("%s - baud rate = %d", __FUNCTION__, baud); |
372 | cflag &= ~CBAUD; | 371 | |
373 | switch (baud) { | 372 | tty_encode_baud_rate(port->tty, baud, baud); |
374 | /* | 373 | cflag = port->tty->termios->c_cflag; |
375 | * The baud rates which are commented out below | ||
376 | * appear to be supported by the device | ||
377 | * but are non-standard | ||
378 | */ | ||
379 | case 600: cflag |= B600; break; | ||
380 | case 1200: cflag |= B1200; break; | ||
381 | case 1800: cflag |= B1800; break; | ||
382 | case 2400: cflag |= B2400; break; | ||
383 | case 4800: cflag |= B4800; break; | ||
384 | /*case 7200: cflag |= B7200; break;*/ | ||
385 | case 9600: cflag |= B9600; break; | ||
386 | /*case 14400: cflag |= B14400; break;*/ | ||
387 | case 19200: cflag |= B19200; break; | ||
388 | /*case 28800: cflag |= B28800; break;*/ | ||
389 | case 38400: cflag |= B38400; break; | ||
390 | /*case 55854: cflag |= B55054; break;*/ | ||
391 | case 57600: cflag |= B57600; break; | ||
392 | case 115200: cflag |= B115200; break; | ||
393 | /*case 127117: cflag |= B127117; break;*/ | ||
394 | case 230400: cflag |= B230400; break; | ||
395 | case 460800: cflag |= B460800; break; | ||
396 | case 921600: cflag |= B921600; break; | ||
397 | /*case 3686400: cflag |= B3686400; break;*/ | ||
398 | default: | ||
399 | dbg("%s - Baud rate is not supported, " | ||
400 | "using 9600 baud", __FUNCTION__); | ||
401 | cflag |= B9600; | ||
402 | cp2101_set_config_single(port, CP2101_BAUDRATE, | ||
403 | (BAUD_RATE_GEN_FREQ/9600)); | ||
404 | break; | ||
405 | } | ||
406 | 374 | ||
407 | cp2101_get_config(port, CP2101_BITS, &bits, 2); | 375 | cp2101_get_config(port, CP2101_BITS, &bits, 2); |
408 | cflag &= ~CSIZE; | 376 | cflag &= ~CSIZE; |
@@ -516,7 +484,7 @@ static void cp2101_get_termios (struct usb_serial_port *port) | |||
516 | static void cp2101_set_termios (struct usb_serial_port *port, | 484 | static void cp2101_set_termios (struct usb_serial_port *port, |
517 | struct ktermios *old_termios) | 485 | struct ktermios *old_termios) |
518 | { | 486 | { |
519 | unsigned int cflag, old_cflag=0; | 487 | unsigned int cflag, old_cflag; |
520 | int baud=0, bits; | 488 | int baud=0, bits; |
521 | unsigned int modem_ctl[4]; | 489 | unsigned int modem_ctl[4]; |
522 | 490 | ||
@@ -526,6 +494,8 @@ static void cp2101_set_termios (struct usb_serial_port *port, | |||
526 | dbg("%s - no tty structures", __FUNCTION__); | 494 | dbg("%s - no tty structures", __FUNCTION__); |
527 | return; | 495 | return; |
528 | } | 496 | } |
497 | port->tty->termios->c_cflag &= ~CMSPAR; | ||
498 | |||
529 | cflag = port->tty->termios->c_cflag; | 499 | cflag = port->tty->termios->c_cflag; |
530 | old_cflag = old_termios->c_cflag; | 500 | old_cflag = old_termios->c_cflag; |
531 | baud = tty_get_baud_rate(port->tty); | 501 | baud = tty_get_baud_rate(port->tty); |
@@ -563,11 +533,15 @@ static void cp2101_set_termios (struct usb_serial_port *port, | |||
563 | dbg("%s - Setting baud rate to %d baud", __FUNCTION__, | 533 | dbg("%s - Setting baud rate to %d baud", __FUNCTION__, |
564 | baud); | 534 | baud); |
565 | if (cp2101_set_config_single(port, CP2101_BAUDRATE, | 535 | if (cp2101_set_config_single(port, CP2101_BAUDRATE, |
566 | (BAUD_RATE_GEN_FREQ / baud))) | 536 | (BAUD_RATE_GEN_FREQ / baud))) { |
567 | dev_err(&port->dev, "Baud rate requested not " | 537 | dev_err(&port->dev, "Baud rate requested not " |
568 | "supported by device\n"); | 538 | "supported by device\n"); |
539 | baud = tty_termios_baud_rate(old_termios); | ||
540 | } | ||
569 | } | 541 | } |
570 | } | 542 | } |
543 | /* Report back the resulting baud rate */ | ||
544 | tty_encode_baud_rate(port->tty, baud, baud); | ||
571 | 545 | ||
572 | /* If the number of data bits is to be updated */ | 546 | /* If the number of data bits is to be updated */ |
573 | if ((cflag & CSIZE) != (old_cflag & CSIZE)) { | 547 | if ((cflag & CSIZE) != (old_cflag & CSIZE)) { |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index dab2e66d111d..ae410c4678ea 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
@@ -973,6 +973,8 @@ static void digi_set_termios(struct usb_serial_port *port, | |||
973 | } | 973 | } |
974 | } | 974 | } |
975 | /* set parity */ | 975 | /* set parity */ |
976 | tty->termios->c_cflag &= ~CMSPAR; | ||
977 | |||
976 | if ((cflag&(PARENB|PARODD)) != (old_cflag&(PARENB|PARODD))) { | 978 | if ((cflag&(PARENB|PARODD)) != (old_cflag&(PARENB|PARODD))) { |
977 | if (cflag&PARENB) { | 979 | if (cflag&PARENB) { |
978 | if (cflag&PARODD) | 980 | if (cflag&PARODD) |
@@ -1054,15 +1056,15 @@ static void digi_set_termios(struct usb_serial_port *port, | |||
1054 | } | 1056 | } |
1055 | 1057 | ||
1056 | /* set output flow control */ | 1058 | /* set output flow control */ |
1057 | if ((iflag&IXON) != (old_iflag&IXON) | 1059 | if ((iflag & IXON) != (old_iflag & IXON) |
1058 | || (cflag&CRTSCTS) != (old_cflag&CRTSCTS)) { | 1060 | || (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) { |
1059 | arg = 0; | 1061 | arg = 0; |
1060 | if (iflag&IXON) | 1062 | if (iflag & IXON) |
1061 | arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF; | 1063 | arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF; |
1062 | else | 1064 | else |
1063 | arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF; | 1065 | arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF; |
1064 | 1066 | ||
1065 | if (cflag&CRTSCTS) { | 1067 | if (cflag & CRTSCTS) { |
1066 | arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS; | 1068 | arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS; |
1067 | } else { | 1069 | } else { |
1068 | arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS; | 1070 | arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS; |
@@ -1076,8 +1078,8 @@ static void digi_set_termios(struct usb_serial_port *port, | |||
1076 | } | 1078 | } |
1077 | 1079 | ||
1078 | /* set receive enable/disable */ | 1080 | /* set receive enable/disable */ |
1079 | if ((cflag&CREAD) != (old_cflag&CREAD)) { | 1081 | if ((cflag & CREAD) != (old_cflag & CREAD)) { |
1080 | if (cflag&CREAD) | 1082 | if (cflag & CREAD) |
1081 | arg = DIGI_ENABLE; | 1083 | arg = DIGI_ENABLE; |
1082 | else | 1084 | else |
1083 | arg = DIGI_DISABLE; | 1085 | arg = DIGI_DISABLE; |
@@ -1089,7 +1091,7 @@ static void digi_set_termios(struct usb_serial_port *port, | |||
1089 | } | 1091 | } |
1090 | if ((ret = digi_write_oob_command(port, buf, i, 1)) != 0) | 1092 | if ((ret = digi_write_oob_command(port, buf, i, 1)) != 0) |
1091 | dbg("digi_set_termios: write oob failed, ret=%d", ret); | 1093 | dbg("digi_set_termios: write oob failed, ret=%d", ret); |
1092 | 1094 | tty_encode_baud_rate(tty, baud, baud); | |
1093 | } | 1095 | } |
1094 | 1096 | ||
1095 | 1097 | ||
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c index 050fcc996f56..a5c8e1e17ea5 100644 --- a/drivers/usb/serial/empeg.c +++ b/drivers/usb/serial/empeg.c | |||
@@ -449,14 +449,9 @@ static int empeg_ioctl (struct usb_serial_port *port, struct file * file, unsign | |||
449 | 449 | ||
450 | static void empeg_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) | 450 | static void empeg_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) |
451 | { | 451 | { |
452 | 452 | struct ktermios *termios = port->tty->termios; | |
453 | dbg("%s - port %d", __FUNCTION__, port->number); | 453 | dbg("%s - port %d", __FUNCTION__, port->number); |
454 | 454 | ||
455 | if ((!port->tty) || (!port->tty->termios)) { | ||
456 | dbg("%s - no tty structures", __FUNCTION__); | ||
457 | return; | ||
458 | } | ||
459 | |||
460 | /* | 455 | /* |
461 | * The empeg-car player wants these particular tty settings. | 456 | * The empeg-car player wants these particular tty settings. |
462 | * You could, for example, change the baud rate, however the | 457 | * You could, for example, change the baud rate, however the |
@@ -466,7 +461,7 @@ static void empeg_set_termios (struct usb_serial_port *port, struct ktermios *ol | |||
466 | * | 461 | * |
467 | * The default requirements for this device are: | 462 | * The default requirements for this device are: |
468 | */ | 463 | */ |
469 | port->tty->termios->c_iflag | 464 | termios->c_iflag |
470 | &= ~(IGNBRK /* disable ignore break */ | 465 | &= ~(IGNBRK /* disable ignore break */ |
471 | | BRKINT /* disable break causes interrupt */ | 466 | | BRKINT /* disable break causes interrupt */ |
472 | | PARMRK /* disable mark parity errors */ | 467 | | PARMRK /* disable mark parity errors */ |
@@ -476,24 +471,23 @@ static void empeg_set_termios (struct usb_serial_port *port, struct ktermios *ol | |||
476 | | ICRNL /* disable translate CR to NL */ | 471 | | ICRNL /* disable translate CR to NL */ |
477 | | IXON); /* disable enable XON/XOFF flow control */ | 472 | | IXON); /* disable enable XON/XOFF flow control */ |
478 | 473 | ||
479 | port->tty->termios->c_oflag | 474 | termios->c_oflag |
480 | &= ~OPOST; /* disable postprocess output characters */ | 475 | &= ~OPOST; /* disable postprocess output characters */ |
481 | 476 | ||
482 | port->tty->termios->c_lflag | 477 | termios->c_lflag |
483 | &= ~(ECHO /* disable echo input characters */ | 478 | &= ~(ECHO /* disable echo input characters */ |
484 | | ECHONL /* disable echo new line */ | 479 | | ECHONL /* disable echo new line */ |
485 | | ICANON /* disable erase, kill, werase, and rprnt special characters */ | 480 | | ICANON /* disable erase, kill, werase, and rprnt special characters */ |
486 | | ISIG /* disable interrupt, quit, and suspend special characters */ | 481 | | ISIG /* disable interrupt, quit, and suspend special characters */ |
487 | | IEXTEN); /* disable non-POSIX special characters */ | 482 | | IEXTEN); /* disable non-POSIX special characters */ |
488 | 483 | ||
489 | port->tty->termios->c_cflag | 484 | termios->c_cflag |
490 | &= ~(CSIZE /* no size */ | 485 | &= ~(CSIZE /* no size */ |
491 | | PARENB /* disable parity bit */ | 486 | | PARENB /* disable parity bit */ |
492 | | CBAUD); /* clear current baud rate */ | 487 | | CBAUD); /* clear current baud rate */ |
493 | 488 | ||
494 | port->tty->termios->c_cflag | 489 | termios->c_cflag |
495 | |= (CS8 /* character size 8 bits */ | 490 | |= CS8; /* character size 8 bits */ |
496 | | B115200); /* baud rate 115200 */ | ||
497 | 491 | ||
498 | /* | 492 | /* |
499 | * Force low_latency on; otherwise the pushes are scheduled; | 493 | * Force low_latency on; otherwise the pushes are scheduled; |
@@ -501,8 +495,7 @@ static void empeg_set_termios (struct usb_serial_port *port, struct ktermios *ol | |||
501 | * on the floor. We don't want to drop bytes on the floor. :) | 495 | * on the floor. We don't want to drop bytes on the floor. :) |
502 | */ | 496 | */ |
503 | port->tty->low_latency = 1; | 497 | port->tty->low_latency = 1; |
504 | 498 | tty_encode_baud_rate(port->tty, 115200, 115200); | |
505 | return; | ||
506 | } | 499 | } |
507 | 500 | ||
508 | 501 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8a8a6b9fb05b..c40e77dccf8e 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -294,7 +294,7 @@ struct ftdi_private { | |||
294 | 294 | ||
295 | __u16 interface; /* FT2232C port interface (0 for FT232/245) */ | 295 | __u16 interface; /* FT2232C port interface (0 for FT232/245) */ |
296 | 296 | ||
297 | int force_baud; /* if non-zero, force the baud rate to this value */ | 297 | speed_t force_baud; /* if non-zero, force the baud rate to this value */ |
298 | int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ | 298 | int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ |
299 | 299 | ||
300 | spinlock_t tx_lock; /* spinlock for transmit state */ | 300 | spinlock_t tx_lock; /* spinlock for transmit state */ |
@@ -878,6 +878,7 @@ static __u32 get_ftdi_divisor(struct usb_serial_port * port) | |||
878 | if (div_value == 0) { | 878 | if (div_value == 0) { |
879 | dbg("%s - Baudrate (%d) requested is not supported", __FUNCTION__, baud); | 879 | dbg("%s - Baudrate (%d) requested is not supported", __FUNCTION__, baud); |
880 | div_value = ftdi_sio_b9600; | 880 | div_value = ftdi_sio_b9600; |
881 | baud = 9600; | ||
881 | div_okay = 0; | 882 | div_okay = 0; |
882 | } | 883 | } |
883 | break; | 884 | break; |
@@ -886,6 +887,7 @@ static __u32 get_ftdi_divisor(struct usb_serial_port * port) | |||
886 | div_value = ftdi_232am_baud_to_divisor(baud); | 887 | div_value = ftdi_232am_baud_to_divisor(baud); |
887 | } else { | 888 | } else { |
888 | dbg("%s - Baud rate too high!", __FUNCTION__); | 889 | dbg("%s - Baud rate too high!", __FUNCTION__); |
890 | baud = 9600; | ||
889 | div_value = ftdi_232am_baud_to_divisor(9600); | 891 | div_value = ftdi_232am_baud_to_divisor(9600); |
890 | div_okay = 0; | 892 | div_okay = 0; |
891 | } | 893 | } |
@@ -899,6 +901,7 @@ static __u32 get_ftdi_divisor(struct usb_serial_port * port) | |||
899 | dbg("%s - Baud rate too high!", __FUNCTION__); | 901 | dbg("%s - Baud rate too high!", __FUNCTION__); |
900 | div_value = ftdi_232bm_baud_to_divisor(9600); | 902 | div_value = ftdi_232bm_baud_to_divisor(9600); |
901 | div_okay = 0; | 903 | div_okay = 0; |
904 | baud = 9600; | ||
902 | } | 905 | } |
903 | break; | 906 | break; |
904 | } /* priv->chip_type */ | 907 | } /* priv->chip_type */ |
@@ -909,6 +912,7 @@ static __u32 get_ftdi_divisor(struct usb_serial_port * port) | |||
909 | ftdi_chip_name[priv->chip_type]); | 912 | ftdi_chip_name[priv->chip_type]); |
910 | } | 913 | } |
911 | 914 | ||
915 | tty_encode_baud_rate(port->tty, baud, baud); | ||
912 | return(div_value); | 916 | return(div_value); |
913 | } | 917 | } |
914 | 918 | ||
@@ -1263,7 +1267,7 @@ static void ftdi_USB_UIRT_setup (struct ftdi_private *priv) | |||
1263 | 1267 | ||
1264 | priv->flags |= ASYNC_SPD_CUST; | 1268 | priv->flags |= ASYNC_SPD_CUST; |
1265 | priv->custom_divisor = 77; | 1269 | priv->custom_divisor = 77; |
1266 | priv->force_baud = B38400; | 1270 | priv->force_baud = 38400; |
1267 | } /* ftdi_USB_UIRT_setup */ | 1271 | } /* ftdi_USB_UIRT_setup */ |
1268 | 1272 | ||
1269 | /* Setup for the HE-TIRA1 device, which requires hardwired | 1273 | /* Setup for the HE-TIRA1 device, which requires hardwired |
@@ -1274,7 +1278,7 @@ static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv) | |||
1274 | 1278 | ||
1275 | priv->flags |= ASYNC_SPD_CUST; | 1279 | priv->flags |= ASYNC_SPD_CUST; |
1276 | priv->custom_divisor = 240; | 1280 | priv->custom_divisor = 240; |
1277 | priv->force_baud = B38400; | 1281 | priv->force_baud = 38400; |
1278 | priv->force_rtscts = 1; | 1282 | priv->force_rtscts = 1; |
1279 | } /* ftdi_HE_TIRA1_setup */ | 1283 | } /* ftdi_HE_TIRA1_setup */ |
1280 | 1284 | ||
@@ -1363,7 +1367,7 @@ static int ftdi_open (struct usb_serial_port *port, struct file *filp) | |||
1363 | 1367 | ||
1364 | /* ftdi_set_termios will send usb control messages */ | 1368 | /* ftdi_set_termios will send usb control messages */ |
1365 | if (port->tty) | 1369 | if (port->tty) |
1366 | ftdi_set_termios(port, NULL); | 1370 | ftdi_set_termios(port, port->tty->termios); |
1367 | 1371 | ||
1368 | /* FIXME: Flow control might be enabled, so it should be checked - | 1372 | /* FIXME: Flow control might be enabled, so it should be checked - |
1369 | we have no control of defaults! */ | 1373 | we have no control of defaults! */ |
@@ -1933,32 +1937,33 @@ static void ftdi_break_ctl( struct usb_serial_port *port, int break_state ) | |||
1933 | static void ftdi_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) | 1937 | static void ftdi_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) |
1934 | { /* ftdi_termios */ | 1938 | { /* ftdi_termios */ |
1935 | struct usb_device *dev = port->serial->dev; | 1939 | struct usb_device *dev = port->serial->dev; |
1936 | unsigned int cflag = port->tty->termios->c_cflag; | ||
1937 | struct ftdi_private *priv = usb_get_serial_port_data(port); | 1940 | struct ftdi_private *priv = usb_get_serial_port_data(port); |
1941 | struct ktermios *termios = port->tty->termios; | ||
1942 | unsigned int cflag = termios->c_cflag; | ||
1938 | __u16 urb_value; /* will hold the new flags */ | 1943 | __u16 urb_value; /* will hold the new flags */ |
1939 | char buf[1]; /* Perhaps I should dynamically alloc this? */ | 1944 | char buf[1]; /* Perhaps I should dynamically alloc this? */ |
1940 | 1945 | ||
1941 | // Added for xon/xoff support | 1946 | // Added for xon/xoff support |
1942 | unsigned int iflag = port->tty->termios->c_iflag; | 1947 | unsigned int iflag = termios->c_iflag; |
1943 | unsigned char vstop; | 1948 | unsigned char vstop; |
1944 | unsigned char vstart; | 1949 | unsigned char vstart; |
1945 | 1950 | ||
1946 | dbg("%s", __FUNCTION__); | 1951 | dbg("%s", __FUNCTION__); |
1947 | 1952 | ||
1948 | /* Force baud rate if this device requires it, unless it is set to B0. */ | 1953 | /* Force baud rate if this device requires it, unless it is set to B0. */ |
1949 | if (priv->force_baud && ((port->tty->termios->c_cflag & CBAUD) != B0)) { | 1954 | if (priv->force_baud && ((termios->c_cflag & CBAUD) != B0)) { |
1950 | dbg("%s: forcing baud rate for this device", __FUNCTION__); | 1955 | dbg("%s: forcing baud rate for this device", __FUNCTION__); |
1951 | port->tty->termios->c_cflag &= ~CBAUD; | 1956 | tty_encode_baud_rate(port->tty, priv->force_baud, |
1952 | port->tty->termios->c_cflag |= priv->force_baud; | 1957 | priv->force_baud); |
1953 | } | 1958 | } |
1954 | 1959 | ||
1955 | /* Force RTS-CTS if this device requires it. */ | 1960 | /* Force RTS-CTS if this device requires it. */ |
1956 | if (priv->force_rtscts) { | 1961 | if (priv->force_rtscts) { |
1957 | dbg("%s: forcing rtscts for this device", __FUNCTION__); | 1962 | dbg("%s: forcing rtscts for this device", __FUNCTION__); |
1958 | port->tty->termios->c_cflag |= CRTSCTS; | 1963 | termios->c_cflag |= CRTSCTS; |
1959 | } | 1964 | } |
1960 | 1965 | ||
1961 | cflag = port->tty->termios->c_cflag; | 1966 | cflag = termios->c_cflag; |
1962 | 1967 | ||
1963 | /* FIXME -For this cut I don't care if the line is really changing or | 1968 | /* FIXME -For this cut I don't care if the line is really changing or |
1964 | not - so just do the change regardless - should be able to | 1969 | not - so just do the change regardless - should be able to |
@@ -1969,6 +1974,8 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct ktermios *old | |||
1969 | 1974 | ||
1970 | /* Set number of data bits, parity, stop bits */ | 1975 | /* Set number of data bits, parity, stop bits */ |
1971 | 1976 | ||
1977 | termios->c_cflag &= ~CMSPAR; | ||
1978 | |||
1972 | urb_value = 0; | 1979 | urb_value = 0; |
1973 | urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 : | 1980 | urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 : |
1974 | FTDI_SIO_SET_DATA_STOP_BITS_1); | 1981 | FTDI_SIO_SET_DATA_STOP_BITS_1); |
@@ -2048,8 +2055,8 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct ktermios *old | |||
2048 | // Set the vstart and vstop -- could have been done up above where | 2055 | // Set the vstart and vstop -- could have been done up above where |
2049 | // a lot of other dereferencing is done but that would be very | 2056 | // a lot of other dereferencing is done but that would be very |
2050 | // inefficient as vstart and vstop are not always needed | 2057 | // inefficient as vstart and vstop are not always needed |
2051 | vstart=port->tty->termios->c_cc[VSTART]; | 2058 | vstart = termios->c_cc[VSTART]; |
2052 | vstop=port->tty->termios->c_cc[VSTOP]; | 2059 | vstop = termios->c_cc[VSTOP]; |
2053 | urb_value=(vstop << 8) | (vstart); | 2060 | urb_value=(vstop << 8) | (vstart); |
2054 | 2061 | ||
2055 | if (usb_control_msg(dev, | 2062 | if (usb_control_msg(dev, |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 88a2c7dce335..9eb4a65ee4d9 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
@@ -208,14 +208,15 @@ int usb_serial_generic_write(struct usb_serial_port *port, const unsigned char * | |||
208 | 208 | ||
209 | /* only do something if we have a bulk out endpoint */ | 209 | /* only do something if we have a bulk out endpoint */ |
210 | if (serial->num_bulk_out) { | 210 | if (serial->num_bulk_out) { |
211 | spin_lock_bh(&port->lock); | 211 | unsigned long flags; |
212 | spin_lock_irqsave(&port->lock, flags); | ||
212 | if (port->write_urb_busy) { | 213 | if (port->write_urb_busy) { |
213 | spin_unlock_bh(&port->lock); | 214 | spin_unlock_irqrestore(&port->lock, flags); |
214 | dbg("%s - already writing", __FUNCTION__); | 215 | dbg("%s - already writing", __FUNCTION__); |
215 | return 0; | 216 | return 0; |
216 | } | 217 | } |
217 | port->write_urb_busy = 1; | 218 | port->write_urb_busy = 1; |
218 | spin_unlock_bh(&port->lock); | 219 | spin_unlock_irqrestore(&port->lock, flags); |
219 | 220 | ||
220 | count = (count > port->bulk_out_size) ? port->bulk_out_size : count; | 221 | count = (count > port->bulk_out_size) ? port->bulk_out_size : count; |
221 | 222 | ||
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 8dd3abc99d63..a5d2e115e167 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c | |||
@@ -1503,22 +1503,16 @@ static void edge_unthrottle (struct usb_serial_port *port) | |||
1503 | *****************************************************************************/ | 1503 | *****************************************************************************/ |
1504 | static void edge_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) | 1504 | static void edge_set_termios (struct usb_serial_port *port, struct ktermios *old_termios) |
1505 | { | 1505 | { |
1506 | /* FIXME: This function appears unused ?? */ | ||
1506 | struct edgeport_port *edge_port = usb_get_serial_port_data(port); | 1507 | struct edgeport_port *edge_port = usb_get_serial_port_data(port); |
1507 | struct tty_struct *tty = port->tty; | 1508 | struct tty_struct *tty = port->tty; |
1508 | unsigned int cflag; | 1509 | unsigned int cflag; |
1509 | 1510 | ||
1510 | if (!port->tty || !port->tty->termios) { | ||
1511 | dbg ("%s - no tty or termios", __FUNCTION__); | ||
1512 | return; | ||
1513 | } | ||
1514 | |||
1515 | cflag = tty->termios->c_cflag; | 1511 | cflag = tty->termios->c_cflag; |
1516 | dbg("%s - clfag %08x iflag %08x", __FUNCTION__, | 1512 | dbg("%s - clfag %08x iflag %08x", __FUNCTION__, |
1517 | tty->termios->c_cflag, tty->termios->c_iflag); | 1513 | tty->termios->c_cflag, tty->termios->c_iflag); |
1518 | if (old_termios) { | 1514 | dbg("%s - old clfag %08x old iflag %08x", __FUNCTION__, |
1519 | dbg("%s - old clfag %08x old iflag %08x", __FUNCTION__, | 1515 | old_termios->c_cflag, old_termios->c_iflag); |
1520 | old_termios->c_cflag, old_termios->c_iflag); | ||
1521 | } | ||
1522 | 1516 | ||
1523 | dbg("%s - port %d", __FUNCTION__, port->number); | 1517 | dbg("%s - port %d", __FUNCTION__, port->number); |
1524 | 1518 | ||
@@ -2653,7 +2647,11 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi | |||
2653 | 2647 | ||
2654 | dbg("%s - baud rate = %d", __FUNCTION__, baud); | 2648 | dbg("%s - baud rate = %d", __FUNCTION__, baud); |
2655 | status = send_cmd_write_baud_rate (edge_port, baud); | 2649 | status = send_cmd_write_baud_rate (edge_port, baud); |
2656 | 2650 | if (status == -1) { | |
2651 | /* Speed change was not possible - put back the old speed */ | ||
2652 | baud = tty_termios_baud_rate(old_termios); | ||
2653 | tty_encode_baud_rate(tty, baud, baud); | ||
2654 | } | ||
2657 | return; | 2655 | return; |
2658 | } | 2656 | } |
2659 | 2657 | ||
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 5ab6a0c5ac52..6b803ab98543 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c | |||
@@ -504,11 +504,6 @@ static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_t | |||
504 | 504 | ||
505 | dbg("%s - port %d", __FUNCTION__, port->number); | 505 | dbg("%s - port %d", __FUNCTION__, port->number); |
506 | 506 | ||
507 | if ((!port->tty) || (!port->tty->termios)) { | ||
508 | dbg("%s - no tty structures", __FUNCTION__); | ||
509 | return; | ||
510 | } | ||
511 | |||
512 | baud = tty_get_baud_rate(port->tty); | 507 | baud = tty_get_baud_rate(port->tty); |
513 | 508 | ||
514 | /* | 509 | /* |
@@ -531,8 +526,6 @@ static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_t | |||
531 | default: | 526 | default: |
532 | ir_baud = SPEED_9600; | 527 | ir_baud = SPEED_9600; |
533 | baud = 9600; | 528 | baud = 9600; |
534 | /* And once the new tty stuff is all done we need to | ||
535 | call back to correct the baud bits */ | ||
536 | } | 529 | } |
537 | 530 | ||
538 | if (xbof == -1) | 531 | if (xbof == -1) |
@@ -562,6 +555,10 @@ static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_t | |||
562 | result = usb_submit_urb (port->write_urb, GFP_KERNEL); | 555 | result = usb_submit_urb (port->write_urb, GFP_KERNEL); |
563 | if (result) | 556 | if (result) |
564 | dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, result); | 557 | dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, result); |
558 | |||
559 | /* Only speed changes are supported */ | ||
560 | tty_termios_copy_hw(port->tty->termios, old_termios); | ||
561 | tty_encode_baud_rate(port->tty, baud, baud); | ||
565 | } | 562 | } |
566 | 563 | ||
567 | 564 | ||
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index f2a6fce5de1e..6bfdba6a213f 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
@@ -278,29 +278,35 @@ static void keyspan_set_termios (struct usb_serial_port *port, | |||
278 | struct keyspan_port_private *p_priv; | 278 | struct keyspan_port_private *p_priv; |
279 | const struct keyspan_device_details *d_details; | 279 | const struct keyspan_device_details *d_details; |
280 | unsigned int cflag; | 280 | unsigned int cflag; |
281 | struct tty_struct *tty = port->tty; | ||
281 | 282 | ||
282 | dbg("%s", __FUNCTION__); | 283 | dbg("%s", __FUNCTION__); |
283 | 284 | ||
284 | p_priv = usb_get_serial_port_data(port); | 285 | p_priv = usb_get_serial_port_data(port); |
285 | d_details = p_priv->device_details; | 286 | d_details = p_priv->device_details; |
286 | cflag = port->tty->termios->c_cflag; | 287 | cflag = tty->termios->c_cflag; |
287 | device_port = port->number - port->serial->minor; | 288 | device_port = port->number - port->serial->minor; |
288 | 289 | ||
289 | /* Baud rate calculation takes baud rate as an integer | 290 | /* Baud rate calculation takes baud rate as an integer |
290 | so other rates can be generated if desired. */ | 291 | so other rates can be generated if desired. */ |
291 | baud_rate = tty_get_baud_rate(port->tty); | 292 | baud_rate = tty_get_baud_rate(tty); |
292 | /* If no match or invalid, don't change */ | 293 | /* If no match or invalid, don't change */ |
293 | if (baud_rate >= 0 | 294 | if (d_details->calculate_baud_rate(baud_rate, d_details->baudclk, |
294 | && d_details->calculate_baud_rate(baud_rate, d_details->baudclk, | ||
295 | NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) { | 295 | NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) { |
296 | /* FIXME - more to do here to ensure rate changes cleanly */ | 296 | /* FIXME - more to do here to ensure rate changes cleanly */ |
297 | /* FIXME - calcuate exact rate from divisor ? */ | ||
297 | p_priv->baud = baud_rate; | 298 | p_priv->baud = baud_rate; |
298 | } | 299 | } else |
300 | baud_rate = tty_termios_baud_rate(old_termios); | ||
299 | 301 | ||
302 | tty_encode_baud_rate(tty, baud_rate, baud_rate); | ||
300 | /* set CTS/RTS handshake etc. */ | 303 | /* set CTS/RTS handshake etc. */ |
301 | p_priv->cflag = cflag; | 304 | p_priv->cflag = cflag; |
302 | p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none; | 305 | p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none; |
303 | 306 | ||
307 | /* Mark/Space not supported */ | ||
308 | tty->termios->c_cflag &= ~CMSPAR; | ||
309 | |||
304 | keyspan_send_setup(port, 0); | 310 | keyspan_send_setup(port, 0); |
305 | } | 311 | } |
306 | 312 | ||
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 6f224195bd25..aee450246bfd 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c | |||
@@ -616,8 +616,9 @@ static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old | |||
616 | case 1200: | 616 | case 1200: |
617 | urb_val = SUSBCR_SBR_1200; | 617 | urb_val = SUSBCR_SBR_1200; |
618 | break; | 618 | break; |
619 | case 9600: | ||
620 | default: | 619 | default: |
620 | speed = 9600; | ||
621 | case 9600: | ||
621 | urb_val = SUSBCR_SBR_9600; | 622 | urb_val = SUSBCR_SBR_9600; |
622 | break; | 623 | break; |
623 | } | 624 | } |
@@ -641,6 +642,8 @@ static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old | |||
641 | urb_val |= SUSBCR_SPASB_NoParity; | 642 | urb_val |= SUSBCR_SPASB_NoParity; |
642 | strcat(settings, "No Parity"); | 643 | strcat(settings, "No Parity"); |
643 | } | 644 | } |
645 | port->tty->termios->c_cflag &= ~CMSPAR; | ||
646 | tty_encode_baud_rate(port->tty, speed, speed); | ||
644 | 647 | ||
645 | result = usb_control_msg( port->serial->dev, | 648 | result = usb_control_msg( port->serial->dev, |
646 | usb_rcvctrlpipe(port->serial->dev, 0 ), | 649 | usb_rcvctrlpipe(port->serial->dev, 0 ), |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index f76480f1455d..a5ced7e08cbf 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -1977,11 +1977,6 @@ static void mos7840_change_port_settings(struct moschip_port *mos7840_port, | |||
1977 | 1977 | ||
1978 | tty = mos7840_port->port->tty; | 1978 | tty = mos7840_port->port->tty; |
1979 | 1979 | ||
1980 | if ((!tty) || (!tty->termios)) { | ||
1981 | dbg("%s - no tty structures", __FUNCTION__); | ||
1982 | return; | ||
1983 | } | ||
1984 | |||
1985 | dbg("%s", "Entering .......... \n"); | 1980 | dbg("%s", "Entering .......... \n"); |
1986 | 1981 | ||
1987 | lData = LCR_BITS_8; | 1982 | lData = LCR_BITS_8; |
@@ -2151,11 +2146,6 @@ static void mos7840_set_termios(struct usb_serial_port *port, | |||
2151 | 2146 | ||
2152 | tty = port->tty; | 2147 | tty = port->tty; |
2153 | 2148 | ||
2154 | if (!port->tty || !port->tty->termios) { | ||
2155 | dbg("%s - no tty or termios", __FUNCTION__); | ||
2156 | return; | ||
2157 | } | ||
2158 | |||
2159 | if (!mos7840_port->open) { | 2149 | if (!mos7840_port->open) { |
2160 | dbg("%s - port not opened", __FUNCTION__); | 2150 | dbg("%s - port not opened", __FUNCTION__); |
2161 | return; | 2151 | return; |
@@ -2165,19 +2155,10 @@ static void mos7840_set_termios(struct usb_serial_port *port, | |||
2165 | 2155 | ||
2166 | cflag = tty->termios->c_cflag; | 2156 | cflag = tty->termios->c_cflag; |
2167 | 2157 | ||
2168 | if (!cflag) { | ||
2169 | dbg("%s %s\n", __FUNCTION__, "cflag is NULL"); | ||
2170 | return; | ||
2171 | } | ||
2172 | |||
2173 | dbg("%s - clfag %08x iflag %08x", __FUNCTION__, | 2158 | dbg("%s - clfag %08x iflag %08x", __FUNCTION__, |
2174 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); | 2159 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); |
2175 | 2160 | dbg("%s - old clfag %08x old iflag %08x", __FUNCTION__, | |
2176 | if (old_termios) { | 2161 | old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); |
2177 | dbg("%s - old clfag %08x old iflag %08x", __FUNCTION__, | ||
2178 | old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); | ||
2179 | } | ||
2180 | |||
2181 | dbg("%s - port %d", __FUNCTION__, port->number); | 2162 | dbg("%s - port %d", __FUNCTION__, port->number); |
2182 | 2163 | ||
2183 | /* change the port settings to the new ones specified */ | 2164 | /* change the port settings to the new ones specified */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a18659e0700c..4590124cf888 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -172,6 +172,8 @@ static struct usb_device_id option_ids[] = { | |||
172 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2110) }, /* Novatel Merlin ES620 / Merlin ES720 / Ovation U720 */ | 172 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2110) }, /* Novatel Merlin ES620 / Merlin ES720 / Ovation U720 */ |
173 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ | 173 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ |
174 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ | 174 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ |
175 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4100) }, /* Novatel U727 */ | ||
176 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4400) }, /* Novatel MC950 */ | ||
175 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ | 177 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ |
176 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 178 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
177 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 179 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
@@ -311,7 +313,8 @@ static void option_set_termios(struct usb_serial_port *port, | |||
311 | struct ktermios *old_termios) | 313 | struct ktermios *old_termios) |
312 | { | 314 | { |
313 | dbg("%s", __FUNCTION__); | 315 | dbg("%s", __FUNCTION__); |
314 | 316 | /* Doesn't support option setting */ | |
317 | tty_termios_copy_hw(port->tty->termios, old_termios); | ||
315 | option_send_setup(port); | 318 | option_send_setup(port); |
316 | } | 319 | } |
317 | 320 | ||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1da57fd9ea23..2cd3f1d4b687 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -56,6 +56,7 @@ static struct usb_device_id id_table [] = { | |||
56 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, | 56 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, |
57 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, | 57 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, |
58 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, | 58 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, |
59 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | ||
59 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, | 60 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, |
60 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, | 61 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, |
61 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, | 62 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, |
@@ -470,16 +471,13 @@ static void pl2303_set_termios(struct usb_serial_port *port, | |||
470 | 471 | ||
471 | dbg("%s - port %d", __FUNCTION__, port->number); | 472 | dbg("%s - port %d", __FUNCTION__, port->number); |
472 | 473 | ||
473 | if ((!port->tty) || (!port->tty->termios)) { | ||
474 | dbg("%s - no tty structures", __FUNCTION__); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | spin_lock_irqsave(&priv->lock, flags); | 474 | spin_lock_irqsave(&priv->lock, flags); |
479 | if (!priv->termios_initialized) { | 475 | if (!priv->termios_initialized) { |
480 | *(port->tty->termios) = tty_std_termios; | 476 | *(port->tty->termios) = tty_std_termios; |
481 | port->tty->termios->c_cflag = B9600 | CS8 | CREAD | | 477 | port->tty->termios->c_cflag = B9600 | CS8 | CREAD | |
482 | HUPCL | CLOCAL; | 478 | HUPCL | CLOCAL; |
479 | port->tty->termios->c_ispeed = 9600; | ||
480 | port->tty->termios->c_ospeed = 9600; | ||
483 | priv->termios_initialized = 1; | 481 | priv->termios_initialized = 1; |
484 | } | 482 | } |
485 | spin_unlock_irqrestore(&priv->lock, flags); | 483 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -596,6 +594,10 @@ static void pl2303_set_termios(struct usb_serial_port *port, | |||
596 | dbg ("0x40:0x1:0x0:0x0 %d", i); | 594 | dbg ("0x40:0x1:0x0:0x0 %d", i); |
597 | } | 595 | } |
598 | 596 | ||
597 | /* FIXME: Need to read back resulting baud rate */ | ||
598 | if (baud) | ||
599 | tty_encode_baud_rate(port->tty, baud, baud); | ||
600 | |||
599 | kfree(buf); | 601 | kfree(buf); |
600 | } | 602 | } |
601 | 603 | ||
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index c39bace5cbcc..ed603e3decd6 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | #define IODATA_VENDOR_ID 0x04bb | 21 | #define IODATA_VENDOR_ID 0x04bb |
22 | #define IODATA_PRODUCT_ID 0x0a03 | 22 | #define IODATA_PRODUCT_ID 0x0a03 |
23 | #define IODATA_PRODUCT_ID_RSAQ5 0x0a0e | ||
23 | 24 | ||
24 | #define ELCOM_VENDOR_ID 0x056e | 25 | #define ELCOM_VENDOR_ID 0x056e |
25 | #define ELCOM_PRODUCT_ID 0x5003 | 26 | #define ELCOM_PRODUCT_ID 0x5003 |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 959b3e4e9077..833f6e1e3721 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -224,7 +224,7 @@ static void sierra_set_termios(struct usb_serial_port *port, | |||
224 | struct ktermios *old_termios) | 224 | struct ktermios *old_termios) |
225 | { | 225 | { |
226 | dbg("%s", __FUNCTION__); | 226 | dbg("%s", __FUNCTION__); |
227 | 227 | tty_termios_copy_hw(port->tty->termios, old_termios); | |
228 | sierra_send_setup(port); | 228 | sierra_send_setup(port); |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 4b1bd7def4a5..497e29a700ca 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -429,6 +429,8 @@ static void serial_set_termios (struct tty_struct *tty, struct ktermios * old) | |||
429 | /* pass on to the driver specific version of this function if it is available */ | 429 | /* pass on to the driver specific version of this function if it is available */ |
430 | if (port->serial->type->set_termios) | 430 | if (port->serial->type->set_termios) |
431 | port->serial->type->set_termios(port, old); | 431 | port->serial->type->set_termios(port, old); |
432 | else | ||
433 | tty_termios_copy_hw(tty->termios, old); | ||
432 | } | 434 | } |
433 | 435 | ||
434 | static void serial_break (struct tty_struct *tty, int break_state) | 436 | static void serial_break (struct tty_struct *tty, int break_state) |
@@ -1121,7 +1123,9 @@ int usb_serial_resume(struct usb_interface *intf) | |||
1121 | { | 1123 | { |
1122 | struct usb_serial *serial = usb_get_intfdata(intf); | 1124 | struct usb_serial *serial = usb_get_intfdata(intf); |
1123 | 1125 | ||
1124 | return serial->type->resume(serial); | 1126 | if (serial->type->resume) |
1127 | return serial->type->resume(serial); | ||
1128 | return 0; | ||
1125 | } | 1129 | } |
1126 | EXPORT_SYMBOL(usb_serial_resume); | 1130 | EXPORT_SYMBOL(usb_serial_resume); |
1127 | 1131 | ||
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index cc8b44c08712..ee5dd8b5a713 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c | |||
@@ -885,16 +885,7 @@ static int whiteheat_ioctl (struct usb_serial_port *port, struct file * file, un | |||
885 | static void whiteheat_set_termios(struct usb_serial_port *port, struct ktermios *old_termios) | 885 | static void whiteheat_set_termios(struct usb_serial_port *port, struct ktermios *old_termios) |
886 | { | 886 | { |
887 | dbg("%s -port %d", __FUNCTION__, port->number); | 887 | dbg("%s -port %d", __FUNCTION__, port->number); |
888 | |||
889 | if ((!port->tty) || (!port->tty->termios)) { | ||
890 | dbg("%s - no tty structures", __FUNCTION__); | ||
891 | goto exit; | ||
892 | } | ||
893 | |||
894 | firm_setup_port(port); | 888 | firm_setup_port(port); |
895 | |||
896 | exit: | ||
897 | return; | ||
898 | } | 889 | } |
899 | 890 | ||
900 | 891 | ||
@@ -1244,6 +1235,8 @@ static int firm_setup_port(struct usb_serial_port *port) { | |||
1244 | port_settings.baud = tty_get_baud_rate(port->tty); | 1235 | port_settings.baud = tty_get_baud_rate(port->tty); |
1245 | dbg("%s - baud rate = %d", __FUNCTION__, port_settings.baud); | 1236 | dbg("%s - baud rate = %d", __FUNCTION__, port_settings.baud); |
1246 | 1237 | ||
1238 | /* fixme: should set validated settings */ | ||
1239 | tty_encode_baud_rate(port->tty, port_settings.baud, port_settings.baud); | ||
1247 | /* handle any settings that aren't specified in the tty structure */ | 1240 | /* handle any settings that aren't specified in the tty structure */ |
1248 | port_settings.lloop = 0; | 1241 | port_settings.lloop = 0; |
1249 | 1242 | ||
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index fe2c4cd53f5a..7e53333be013 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
@@ -48,7 +48,6 @@ config USB_STORAGE_FREECOM | |||
48 | config USB_STORAGE_ISD200 | 48 | config USB_STORAGE_ISD200 |
49 | bool "ISD-200 USB/ATA Bridge support" | 49 | bool "ISD-200 USB/ATA Bridge support" |
50 | depends on USB_STORAGE | 50 | depends on USB_STORAGE |
51 | depends on BLK_DEV_IDE=y || BLK_DEV_IDE=USB_STORAGE | ||
52 | ---help--- | 51 | ---help--- |
53 | Say Y here if you want to use USB Mass Store devices based | 52 | Say Y here if you want to use USB Mass Store devices based |
54 | on the In-Systems Design ISD-200 USB/ATA bridge. | 53 | on the In-Systems Design ISD-200 USB/ATA bridge. |
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 93a7724e167a..49ba6c0ff1e8 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c | |||
@@ -977,6 +977,109 @@ static int isd200_manual_enum(struct us_data *us) | |||
977 | return(retStatus); | 977 | return(retStatus); |
978 | } | 978 | } |
979 | 979 | ||
980 | /* | ||
981 | * We are the last non IDE user of the legacy IDE ident structures | ||
982 | * and we thus want to keep a private copy of this function so the | ||
983 | * driver can be used without the obsolete drivers/ide layer | ||
984 | */ | ||
985 | |||
986 | static void isd200_fix_driveid (struct hd_driveid *id) | ||
987 | { | ||
988 | #ifndef __LITTLE_ENDIAN | ||
989 | # ifdef __BIG_ENDIAN | ||
990 | int i; | ||
991 | u16 *stringcast; | ||
992 | |||
993 | id->config = __le16_to_cpu(id->config); | ||
994 | id->cyls = __le16_to_cpu(id->cyls); | ||
995 | id->reserved2 = __le16_to_cpu(id->reserved2); | ||
996 | id->heads = __le16_to_cpu(id->heads); | ||
997 | id->track_bytes = __le16_to_cpu(id->track_bytes); | ||
998 | id->sector_bytes = __le16_to_cpu(id->sector_bytes); | ||
999 | id->sectors = __le16_to_cpu(id->sectors); | ||
1000 | id->vendor0 = __le16_to_cpu(id->vendor0); | ||
1001 | id->vendor1 = __le16_to_cpu(id->vendor1); | ||
1002 | id->vendor2 = __le16_to_cpu(id->vendor2); | ||
1003 | stringcast = (u16 *)&id->serial_no[0]; | ||
1004 | for (i = 0; i < (20/2); i++) | ||
1005 | stringcast[i] = __le16_to_cpu(stringcast[i]); | ||
1006 | id->buf_type = __le16_to_cpu(id->buf_type); | ||
1007 | id->buf_size = __le16_to_cpu(id->buf_size); | ||
1008 | id->ecc_bytes = __le16_to_cpu(id->ecc_bytes); | ||
1009 | stringcast = (u16 *)&id->fw_rev[0]; | ||
1010 | for (i = 0; i < (8/2); i++) | ||
1011 | stringcast[i] = __le16_to_cpu(stringcast[i]); | ||
1012 | stringcast = (u16 *)&id->model[0]; | ||
1013 | for (i = 0; i < (40/2); i++) | ||
1014 | stringcast[i] = __le16_to_cpu(stringcast[i]); | ||
1015 | id->dword_io = __le16_to_cpu(id->dword_io); | ||
1016 | id->reserved50 = __le16_to_cpu(id->reserved50); | ||
1017 | id->field_valid = __le16_to_cpu(id->field_valid); | ||
1018 | id->cur_cyls = __le16_to_cpu(id->cur_cyls); | ||
1019 | id->cur_heads = __le16_to_cpu(id->cur_heads); | ||
1020 | id->cur_sectors = __le16_to_cpu(id->cur_sectors); | ||
1021 | id->cur_capacity0 = __le16_to_cpu(id->cur_capacity0); | ||
1022 | id->cur_capacity1 = __le16_to_cpu(id->cur_capacity1); | ||
1023 | id->lba_capacity = __le32_to_cpu(id->lba_capacity); | ||
1024 | id->dma_1word = __le16_to_cpu(id->dma_1word); | ||
1025 | id->dma_mword = __le16_to_cpu(id->dma_mword); | ||
1026 | id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes); | ||
1027 | id->eide_dma_min = __le16_to_cpu(id->eide_dma_min); | ||
1028 | id->eide_dma_time = __le16_to_cpu(id->eide_dma_time); | ||
1029 | id->eide_pio = __le16_to_cpu(id->eide_pio); | ||
1030 | id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy); | ||
1031 | for (i = 0; i < 2; ++i) | ||
1032 | id->words69_70[i] = __le16_to_cpu(id->words69_70[i]); | ||
1033 | for (i = 0; i < 4; ++i) | ||
1034 | id->words71_74[i] = __le16_to_cpu(id->words71_74[i]); | ||
1035 | id->queue_depth = __le16_to_cpu(id->queue_depth); | ||
1036 | for (i = 0; i < 4; ++i) | ||
1037 | id->words76_79[i] = __le16_to_cpu(id->words76_79[i]); | ||
1038 | id->major_rev_num = __le16_to_cpu(id->major_rev_num); | ||
1039 | id->minor_rev_num = __le16_to_cpu(id->minor_rev_num); | ||
1040 | id->command_set_1 = __le16_to_cpu(id->command_set_1); | ||
1041 | id->command_set_2 = __le16_to_cpu(id->command_set_2); | ||
1042 | id->cfsse = __le16_to_cpu(id->cfsse); | ||
1043 | id->cfs_enable_1 = __le16_to_cpu(id->cfs_enable_1); | ||
1044 | id->cfs_enable_2 = __le16_to_cpu(id->cfs_enable_2); | ||
1045 | id->csf_default = __le16_to_cpu(id->csf_default); | ||
1046 | id->dma_ultra = __le16_to_cpu(id->dma_ultra); | ||
1047 | id->trseuc = __le16_to_cpu(id->trseuc); | ||
1048 | id->trsEuc = __le16_to_cpu(id->trsEuc); | ||
1049 | id->CurAPMvalues = __le16_to_cpu(id->CurAPMvalues); | ||
1050 | id->mprc = __le16_to_cpu(id->mprc); | ||
1051 | id->hw_config = __le16_to_cpu(id->hw_config); | ||
1052 | id->acoustic = __le16_to_cpu(id->acoustic); | ||
1053 | id->msrqs = __le16_to_cpu(id->msrqs); | ||
1054 | id->sxfert = __le16_to_cpu(id->sxfert); | ||
1055 | id->sal = __le16_to_cpu(id->sal); | ||
1056 | id->spg = __le32_to_cpu(id->spg); | ||
1057 | id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2); | ||
1058 | for (i = 0; i < 22; i++) | ||
1059 | id->words104_125[i] = __le16_to_cpu(id->words104_125[i]); | ||
1060 | id->last_lun = __le16_to_cpu(id->last_lun); | ||
1061 | id->word127 = __le16_to_cpu(id->word127); | ||
1062 | id->dlf = __le16_to_cpu(id->dlf); | ||
1063 | id->csfo = __le16_to_cpu(id->csfo); | ||
1064 | for (i = 0; i < 26; i++) | ||
1065 | id->words130_155[i] = __le16_to_cpu(id->words130_155[i]); | ||
1066 | id->word156 = __le16_to_cpu(id->word156); | ||
1067 | for (i = 0; i < 3; i++) | ||
1068 | id->words157_159[i] = __le16_to_cpu(id->words157_159[i]); | ||
1069 | id->cfa_power = __le16_to_cpu(id->cfa_power); | ||
1070 | for (i = 0; i < 14; i++) | ||
1071 | id->words161_175[i] = __le16_to_cpu(id->words161_175[i]); | ||
1072 | for (i = 0; i < 31; i++) | ||
1073 | id->words176_205[i] = __le16_to_cpu(id->words176_205[i]); | ||
1074 | for (i = 0; i < 48; i++) | ||
1075 | id->words206_254[i] = __le16_to_cpu(id->words206_254[i]); | ||
1076 | id->integrity_word = __le16_to_cpu(id->integrity_word); | ||
1077 | # else | ||
1078 | # error "Please fix <asm/byteorder.h>" | ||
1079 | # endif | ||
1080 | #endif | ||
1081 | } | ||
1082 | |||
980 | 1083 | ||
981 | /************************************************************************** | 1084 | /************************************************************************** |
982 | * isd200_get_inquiry_data | 1085 | * isd200_get_inquiry_data |
@@ -1018,7 +1121,7 @@ static int isd200_get_inquiry_data( struct us_data *us ) | |||
1018 | int i; | 1121 | int i; |
1019 | __be16 *src; | 1122 | __be16 *src; |
1020 | __u16 *dest; | 1123 | __u16 *dest; |
1021 | ide_fix_driveid(id); | 1124 | isd200_fix_driveid(id); |
1022 | 1125 | ||
1023 | US_DEBUGP(" Identify Data Structure:\n"); | 1126 | US_DEBUGP(" Identify Data Structure:\n"); |
1024 | US_DEBUGP(" config = 0x%x\n", id->config); | 1127 | US_DEBUGP(" config = 0x%x\n", id->config); |
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c index f99cb77e7b42..f7e2d5add831 100644 --- a/drivers/video/cirrusfb.c +++ b/drivers/video/cirrusfb.c | |||
@@ -2509,8 +2509,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z, | |||
2509 | cinfo = info->par; | 2509 | cinfo = info->par; |
2510 | cinfo->btype = btype; | 2510 | cinfo->btype = btype; |
2511 | 2511 | ||
2512 | assert(z > 0); | 2512 | assert(z); |
2513 | assert(z2 >= 0); | ||
2514 | assert(btype != BT_NONE); | 2513 | assert(btype != BT_NONE); |
2515 | 2514 | ||
2516 | cinfo->zdev = z; | 2515 | cinfo->zdev = z; |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 0a9882edf562..9d70289f7df3 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -279,13 +279,13 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, | |||
279 | int offset; | 279 | int offset; |
280 | int remainder_of_page; | 280 | int remainder_of_page; |
281 | 281 | ||
282 | sg_init_table(sg, sg_size); | ||
283 | |||
282 | while (size > 0 && i < sg_size) { | 284 | while (size > 0 && i < sg_size) { |
283 | pg = virt_to_page(addr); | 285 | pg = virt_to_page(addr); |
284 | offset = offset_in_page(addr); | 286 | offset = offset_in_page(addr); |
285 | if (sg) { | 287 | if (sg) |
286 | sg_set_page(&sg[i], pg); | 288 | sg_set_page(&sg[i], pg, 0, offset); |
287 | sg[i].offset = offset; | ||
288 | } | ||
289 | remainder_of_page = PAGE_CACHE_SIZE - offset; | 289 | remainder_of_page = PAGE_CACHE_SIZE - offset; |
290 | if (size >= remainder_of_page) { | 290 | if (size >= remainder_of_page) { |
291 | if (sg) | 291 | if (sg) |
@@ -716,12 +716,8 @@ ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat, | |||
716 | sg_init_table(&src_sg, 1); | 716 | sg_init_table(&src_sg, 1); |
717 | sg_init_table(&dst_sg, 1); | 717 | sg_init_table(&dst_sg, 1); |
718 | 718 | ||
719 | sg_set_page(&src_sg, src_page); | 719 | sg_set_page(&src_sg, src_page, size, src_offset); |
720 | src_sg.offset = src_offset; | 720 | sg_set_page(&dst_sg, dst_page, size, dst_offset); |
721 | src_sg.length = size; | ||
722 | sg_set_page(&dst_sg, dst_page); | ||
723 | dst_sg.offset = dst_offset; | ||
724 | dst_sg.length = size; | ||
725 | return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); | 721 | return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); |
726 | } | 722 | } |
727 | 723 | ||
@@ -746,14 +742,11 @@ ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat, | |||
746 | struct scatterlist src_sg, dst_sg; | 742 | struct scatterlist src_sg, dst_sg; |
747 | 743 | ||
748 | sg_init_table(&src_sg, 1); | 744 | sg_init_table(&src_sg, 1); |
745 | sg_set_page(&src_sg, src_page, size, src_offset); | ||
746 | |||
749 | sg_init_table(&dst_sg, 1); | 747 | sg_init_table(&dst_sg, 1); |
748 | sg_set_page(&dst_sg, dst_page, size, dst_offset); | ||
750 | 749 | ||
751 | sg_set_page(&src_sg, src_page); | ||
752 | src_sg.offset = src_offset; | ||
753 | src_sg.length = size; | ||
754 | sg_set_page(&dst_sg, dst_page); | ||
755 | dst_sg.offset = dst_offset; | ||
756 | dst_sg.length = size; | ||
757 | return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); | 750 | return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv); |
758 | } | 751 | } |
759 | 752 | ||
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 9728614b8958..77fc5838609c 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c | |||
@@ -372,7 +372,7 @@ int jffs2_init_acl_post(struct inode *inode) | |||
372 | return rc; | 372 | return rc; |
373 | } | 373 | } |
374 | 374 | ||
375 | return rc; | 375 | return 0; |
376 | } | 376 | } |
377 | 377 | ||
378 | void jffs2_clear_acl(struct jffs2_inode_info *f) | 378 | void jffs2_clear_acl(struct jffs2_inode_info *f) |
diff --git a/fs/mbcache.c b/fs/mbcache.c index 1046cbefbfbf..eb31b73e7d69 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -403,9 +403,9 @@ mb_cache_entry_alloc(struct mb_cache *cache) | |||
403 | { | 403 | { |
404 | struct mb_cache_entry *ce; | 404 | struct mb_cache_entry *ce; |
405 | 405 | ||
406 | atomic_inc(&cache->c_entry_count); | ||
407 | ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); | 406 | ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL); |
408 | if (ce) { | 407 | if (ce) { |
408 | atomic_inc(&cache->c_entry_count); | ||
409 | INIT_LIST_HEAD(&ce->e_lru_list); | 409 | INIT_LIST_HEAD(&ce->e_lru_list); |
410 | INIT_LIST_HEAD(&ce->e_block_list); | 410 | INIT_LIST_HEAD(&ce->e_block_list); |
411 | ce->e_cache = cache; | 411 | ce->e_cache = cache; |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 2e91fb756e9a..749def054a34 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -185,7 +185,7 @@ static __net_exit void proc_net_ns_exit(struct net *net) | |||
185 | kfree(net->proc_net_root); | 185 | kfree(net->proc_net_root); |
186 | } | 186 | } |
187 | 187 | ||
188 | struct pernet_operations __net_initdata proc_net_ns_ops = { | 188 | static struct pernet_operations proc_net_ns_ops = { |
189 | .init = proc_net_ns_init, | 189 | .init = proc_net_ns_init, |
190 | .exit = proc_net_ns_exit, | 190 | .exit = proc_net_ns_exit, |
191 | }; | 191 | }; |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 680c429bfa22..4e57fcf85982 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -171,7 +171,8 @@ static ssize_t proc_sys_read(struct file *filp, char __user *buf, | |||
171 | struct dentry *dentry = filp->f_dentry; | 171 | struct dentry *dentry = filp->f_dentry; |
172 | struct ctl_table_header *head; | 172 | struct ctl_table_header *head; |
173 | struct ctl_table *table; | 173 | struct ctl_table *table; |
174 | ssize_t error, res; | 174 | ssize_t error; |
175 | size_t res; | ||
175 | 176 | ||
176 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); | 177 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); |
177 | /* Has the sysctl entry disappeared on us? */ | 178 | /* Has the sysctl entry disappeared on us? */ |
@@ -209,7 +210,8 @@ static ssize_t proc_sys_write(struct file *filp, const char __user *buf, | |||
209 | struct dentry *dentry = filp->f_dentry; | 210 | struct dentry *dentry = filp->f_dentry; |
210 | struct ctl_table_header *head; | 211 | struct ctl_table_header *head; |
211 | struct ctl_table *table; | 212 | struct ctl_table *table; |
212 | ssize_t error, res; | 213 | ssize_t error; |
214 | size_t res; | ||
213 | 215 | ||
214 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); | 216 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); |
215 | /* Has the sysctl entry disappeared on us? */ | 217 | /* Has the sysctl entry disappeared on us? */ |
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h index a7131630c057..57dc672bab8e 100644 --- a/include/asm-avr32/dma-mapping.h +++ b/include/asm-avr32/dma-mapping.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <asm/scatterlist.h> | 6 | #include <linux/scatterlist.h> |
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
9 | #include <asm/io.h> | 9 | #include <asm/io.h> |
diff --git a/include/asm-frv/scatterlist.h b/include/asm-frv/scatterlist.h index 99ba76edc42a..2e7143b5a7ad 100644 --- a/include/asm-frv/scatterlist.h +++ b/include/asm-frv/scatterlist.h | |||
@@ -16,8 +16,7 @@ | |||
16 | * | 16 | * |
17 | * can be rewritten as | 17 | * can be rewritten as |
18 | * | 18 | * |
19 | * sg_set_page(virt_to_page(some_ptr)); | 19 | * sg_set_buf(sg, some_ptr, length); |
20 | * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK; | ||
21 | * | 20 | * |
22 | * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens | 21 | * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens |
23 | */ | 22 | */ |
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index bdca5416d8b0..3328950dbfe6 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* atomic.h: These still suck, but the I-cache hit rate is higher. | 1 | /* atomic.h: These still suck, but the I-cache hit rate is higher. |
2 | * | 2 | * |
3 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) | 4 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) |
5 | * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) | 5 | * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) |
6 | * | 6 | * |
@@ -33,7 +33,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); | |||
33 | extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); | 33 | extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); |
34 | 34 | ||
35 | /* don't worry...optimizer will get rid of most of this */ | 35 | /* don't worry...optimizer will get rid of most of this */ |
36 | static __inline__ unsigned long | 36 | static inline unsigned long |
37 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) | 37 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) |
38 | { | 38 | { |
39 | switch(size) { | 39 | switch(size) { |
diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h index 407b3614468a..959d6c8a71ae 100644 --- a/include/asm-sparc/dma.h +++ b/include/asm-sparc/dma.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: dma.h,v 1.35 1999/12/27 06:37:09 anton Exp $ | 1 | /* include/asm-sparc/dma.h |
2 | * include/asm-sparc/dma.h | ||
3 | * | 2 | * |
4 | * Copyright 1995 (C) David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright 1995 (C) David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef _ASM_SPARC_DMA_H | 6 | #ifndef _ASM_SPARC_DMA_H |
@@ -21,14 +20,14 @@ | |||
21 | struct page; | 20 | struct page; |
22 | extern spinlock_t dma_spin_lock; | 21 | extern spinlock_t dma_spin_lock; |
23 | 22 | ||
24 | static __inline__ unsigned long claim_dma_lock(void) | 23 | static inline unsigned long claim_dma_lock(void) |
25 | { | 24 | { |
26 | unsigned long flags; | 25 | unsigned long flags; |
27 | spin_lock_irqsave(&dma_spin_lock, flags); | 26 | spin_lock_irqsave(&dma_spin_lock, flags); |
28 | return flags; | 27 | return flags; |
29 | } | 28 | } |
30 | 29 | ||
31 | static __inline__ void release_dma_lock(unsigned long flags) | 30 | static inline void release_dma_lock(unsigned long flags) |
32 | { | 31 | { |
33 | spin_unlock_irqrestore(&dma_spin_lock, flags); | 32 | spin_unlock_irqrestore(&dma_spin_lock, flags); |
34 | } | 33 | } |
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h index 5da1eef0f706..dbe7a586be5b 100644 --- a/include/asm-sparc/floppy.h +++ b/include/asm-sparc/floppy.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver. | 1 | /* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver. |
2 | * | 2 | * |
3 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995 David S. Miller (davem@davemloft.net) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef __ASM_SPARC_FLOPPY_H | 6 | #ifndef __ASM_SPARC_FLOPPY_H |
@@ -232,12 +232,12 @@ extern char *pdma_base; | |||
232 | extern unsigned long pdma_areasize; | 232 | extern unsigned long pdma_areasize; |
233 | 233 | ||
234 | /* Common routines to all controller types on the Sparc. */ | 234 | /* Common routines to all controller types on the Sparc. */ |
235 | static __inline__ void virtual_dma_init(void) | 235 | static inline void virtual_dma_init(void) |
236 | { | 236 | { |
237 | /* nothing... */ | 237 | /* nothing... */ |
238 | } | 238 | } |
239 | 239 | ||
240 | static __inline__ void sun_fd_disable_dma(void) | 240 | static inline void sun_fd_disable_dma(void) |
241 | { | 241 | { |
242 | doing_pdma = 0; | 242 | doing_pdma = 0; |
243 | if (pdma_base) { | 243 | if (pdma_base) { |
@@ -246,7 +246,7 @@ static __inline__ void sun_fd_disable_dma(void) | |||
246 | } | 246 | } |
247 | } | 247 | } |
248 | 248 | ||
249 | static __inline__ void sun_fd_set_dma_mode(int mode) | 249 | static inline void sun_fd_set_dma_mode(int mode) |
250 | { | 250 | { |
251 | switch(mode) { | 251 | switch(mode) { |
252 | case DMA_MODE_READ: | 252 | case DMA_MODE_READ: |
@@ -261,17 +261,17 @@ static __inline__ void sun_fd_set_dma_mode(int mode) | |||
261 | } | 261 | } |
262 | } | 262 | } |
263 | 263 | ||
264 | static __inline__ void sun_fd_set_dma_addr(char *buffer) | 264 | static inline void sun_fd_set_dma_addr(char *buffer) |
265 | { | 265 | { |
266 | pdma_vaddr = buffer; | 266 | pdma_vaddr = buffer; |
267 | } | 267 | } |
268 | 268 | ||
269 | static __inline__ void sun_fd_set_dma_count(int length) | 269 | static inline void sun_fd_set_dma_count(int length) |
270 | { | 270 | { |
271 | pdma_size = length; | 271 | pdma_size = length; |
272 | } | 272 | } |
273 | 273 | ||
274 | static __inline__ void sun_fd_enable_dma(void) | 274 | static inline void sun_fd_enable_dma(void) |
275 | { | 275 | { |
276 | pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size); | 276 | pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size); |
277 | pdma_base = pdma_vaddr; | 277 | pdma_base = pdma_vaddr; |
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h index 404022765fc7..4076cb5d1581 100644 --- a/include/asm-sparc/ide.h +++ b/include/asm-sparc/ide.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: ide.h,v 1.7 2002/01/16 20:58:40 davem Exp $ | 1 | /* ide.h: SPARC PCI specific IDE glue. |
2 | * ide.h: SPARC PCI specific IDE glue. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1997 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) |
6 | * Adaptation from sparc64 version to sparc by Pete Zaitcev. | 5 | * Adaptation from sparc64 version to sparc by Pete Zaitcev. |
7 | */ | 6 | */ |
@@ -31,7 +30,7 @@ | |||
31 | #define __ide_mm_outsw __ide_outsw | 30 | #define __ide_mm_outsw __ide_outsw |
32 | #define __ide_mm_outsl __ide_outsl | 31 | #define __ide_mm_outsl __ide_outsl |
33 | 32 | ||
34 | static __inline__ void __ide_insw(unsigned long port, | 33 | static inline void __ide_insw(unsigned long port, |
35 | void *dst, | 34 | void *dst, |
36 | unsigned long count) | 35 | unsigned long count) |
37 | { | 36 | { |
@@ -62,7 +61,7 @@ static __inline__ void __ide_insw(unsigned long port, | |||
62 | /* __flush_dcache_range((unsigned long)dst, end); */ /* P3 see hme */ | 61 | /* __flush_dcache_range((unsigned long)dst, end); */ /* P3 see hme */ |
63 | } | 62 | } |
64 | 63 | ||
65 | static __inline__ void __ide_outsw(unsigned long port, | 64 | static inline void __ide_outsw(unsigned long port, |
66 | const void *src, | 65 | const void *src, |
67 | unsigned long count) | 66 | unsigned long count) |
68 | { | 67 | { |
diff --git a/include/asm-sparc/posix_types.h b/include/asm-sparc/posix_types.h index 9ef1b3db4cbf..62c8fa7b36d4 100644 --- a/include/asm-sparc/posix_types.h +++ b/include/asm-sparc/posix_types.h | |||
@@ -49,7 +49,7 @@ typedef struct { | |||
49 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | 49 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) |
50 | 50 | ||
51 | #undef __FD_SET | 51 | #undef __FD_SET |
52 | static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | 52 | static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) |
53 | { | 53 | { |
54 | unsigned long _tmp = fd / __NFDBITS; | 54 | unsigned long _tmp = fd / __NFDBITS; |
55 | unsigned long _rem = fd % __NFDBITS; | 55 | unsigned long _rem = fd % __NFDBITS; |
@@ -57,7 +57,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | #undef __FD_CLR | 59 | #undef __FD_CLR |
60 | static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | 60 | static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) |
61 | { | 61 | { |
62 | unsigned long _tmp = fd / __NFDBITS; | 62 | unsigned long _tmp = fd / __NFDBITS; |
63 | unsigned long _rem = fd % __NFDBITS; | 63 | unsigned long _rem = fd % __NFDBITS; |
@@ -65,7 +65,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | #undef __FD_ISSET | 67 | #undef __FD_ISSET |
68 | static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | 68 | static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) |
69 | { | 69 | { |
70 | unsigned long _tmp = fd / __NFDBITS; | 70 | unsigned long _tmp = fd / __NFDBITS; |
71 | unsigned long _rem = fd % __NFDBITS; | 71 | unsigned long _rem = fd % __NFDBITS; |
@@ -77,7 +77,7 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | |||
77 | * for 256 and 1024-bit fd_sets respectively) | 77 | * for 256 and 1024-bit fd_sets respectively) |
78 | */ | 78 | */ |
79 | #undef __FD_ZERO | 79 | #undef __FD_ZERO |
80 | static __inline__ void __FD_ZERO(__kernel_fd_set *p) | 80 | static inline void __FD_ZERO(__kernel_fd_set *p) |
81 | { | 81 | { |
82 | unsigned long *tmp = p->fds_bits; | 82 | unsigned long *tmp = p->fds_bits; |
83 | int i; | 83 | int i; |
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 8c259de02614..2655d142b22d 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h | |||
@@ -1,5 +1,3 @@ | |||
1 | /* $Id: system.h,v 1.86 2001/10/30 04:57:10 davem Exp $ */ | ||
2 | |||
3 | #ifndef __SPARC_SYSTEM_H | 1 | #ifndef __SPARC_SYSTEM_H |
4 | #define __SPARC_SYSTEM_H | 2 | #define __SPARC_SYSTEM_H |
5 | 3 | ||
@@ -56,7 +54,7 @@ extern void sun_do_break(void); | |||
56 | extern int serial_console; | 54 | extern int serial_console; |
57 | extern int stop_a_enabled; | 55 | extern int stop_a_enabled; |
58 | 56 | ||
59 | static __inline__ int con_is_present(void) | 57 | static inline int con_is_present(void) |
60 | { | 58 | { |
61 | return serial_console ? 0 : 1; | 59 | return serial_console ? 0 : 1; |
62 | } | 60 | } |
@@ -217,7 +215,7 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon | |||
217 | 215 | ||
218 | extern void __xchg_called_with_bad_pointer(void); | 216 | extern void __xchg_called_with_bad_pointer(void); |
219 | 217 | ||
220 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) | 218 | static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) |
221 | { | 219 | { |
222 | switch (size) { | 220 | switch (size) { |
223 | case 4: | 221 | case 4: |
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 3fb4e1f7f186..2c71ec4a3b18 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: atomic.h,v 1.22 2001/07/11 23:56:07 davem Exp $ | 1 | /* atomic.h: Thankfully the V9 is at least reasonable for this |
2 | * atomic.h: Thankfully the V9 is at least reasonable for this | ||
3 | * stuff. | 2 | * stuff. |
4 | * | 3 | * |
5 | * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) |
@@ -74,7 +73,7 @@ extern int atomic64_sub_ret(int, atomic64_t *); | |||
74 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) | 73 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
75 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 74 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
76 | 75 | ||
77 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 76 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
78 | { | 77 | { |
79 | int c, old; | 78 | int c, old; |
80 | c = atomic_read(v); | 79 | c = atomic_read(v); |
@@ -95,7 +94,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
95 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | 94 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) |
96 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 95 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
97 | 96 | ||
98 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 97 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
99 | { | 98 | { |
100 | long c, old; | 99 | long c, old; |
101 | c = atomic64_read(v); | 100 | c = atomic64_read(v); |
diff --git a/include/asm-sparc64/backoff.h b/include/asm-sparc64/backoff.h index 0e32f8b62fd2..dadd6c385c6c 100644 --- a/include/asm-sparc64/backoff.h +++ b/include/asm-sparc64/backoff.h | |||
@@ -21,7 +21,9 @@ | |||
21 | #else | 21 | #else |
22 | 22 | ||
23 | #define BACKOFF_SETUP(reg) | 23 | #define BACKOFF_SETUP(reg) |
24 | #define BACKOFF_SPIN(reg, tmp, label) | 24 | #define BACKOFF_SPIN(reg, tmp, label) \ |
25 | ba,pt %xcc, label; \ | ||
26 | nop; | ||
25 | 27 | ||
26 | #endif | 28 | #endif |
27 | 29 | ||
diff --git a/include/asm-sparc64/byteorder.h b/include/asm-sparc64/byteorder.h index c69b08af5fe0..3943022906fd 100644 --- a/include/asm-sparc64/byteorder.h +++ b/include/asm-sparc64/byteorder.h | |||
@@ -1,4 +1,3 @@ | |||
1 | /* $Id: byteorder.h,v 1.8 1997/12/18 02:44:14 ecd Exp $ */ | ||
2 | #ifndef _SPARC64_BYTEORDER_H | 1 | #ifndef _SPARC64_BYTEORDER_H |
3 | #define _SPARC64_BYTEORDER_H | 2 | #define _SPARC64_BYTEORDER_H |
4 | 3 | ||
@@ -7,7 +6,7 @@ | |||
7 | 6 | ||
8 | #ifdef __GNUC__ | 7 | #ifdef __GNUC__ |
9 | 8 | ||
10 | static __inline__ __u16 ___arch__swab16p(const __u16 *addr) | 9 | static inline __u16 ___arch__swab16p(const __u16 *addr) |
11 | { | 10 | { |
12 | __u16 ret; | 11 | __u16 ret; |
13 | 12 | ||
@@ -17,7 +16,7 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *addr) | |||
17 | return ret; | 16 | return ret; |
18 | } | 17 | } |
19 | 18 | ||
20 | static __inline__ __u32 ___arch__swab32p(const __u32 *addr) | 19 | static inline __u32 ___arch__swab32p(const __u32 *addr) |
21 | { | 20 | { |
22 | __u32 ret; | 21 | __u32 ret; |
23 | 22 | ||
@@ -27,7 +26,7 @@ static __inline__ __u32 ___arch__swab32p(const __u32 *addr) | |||
27 | return ret; | 26 | return ret; |
28 | } | 27 | } |
29 | 28 | ||
30 | static __inline__ __u64 ___arch__swab64p(const __u64 *addr) | 29 | static inline __u64 ___arch__swab64p(const __u64 *addr) |
31 | { | 30 | { |
32 | __u64 ret; | 31 | __u64 ret; |
33 | 32 | ||
diff --git a/include/asm-sparc64/fpumacro.h b/include/asm-sparc64/fpumacro.h index d583e5efd75d..cc463fec806f 100644 --- a/include/asm-sparc64/fpumacro.h +++ b/include/asm-sparc64/fpumacro.h | |||
@@ -16,7 +16,7 @@ struct fpustate { | |||
16 | 16 | ||
17 | #define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs) | 17 | #define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs) |
18 | 18 | ||
19 | static __inline__ unsigned long fprs_read(void) | 19 | static inline unsigned long fprs_read(void) |
20 | { | 20 | { |
21 | unsigned long retval; | 21 | unsigned long retval; |
22 | 22 | ||
@@ -25,7 +25,7 @@ static __inline__ unsigned long fprs_read(void) | |||
25 | return retval; | 25 | return retval; |
26 | } | 26 | } |
27 | 27 | ||
28 | static __inline__ void fprs_write(unsigned long val) | 28 | static inline void fprs_write(unsigned long val) |
29 | { | 29 | { |
30 | __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val)); | 30 | __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val)); |
31 | } | 31 | } |
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index cd7ef3097ac2..c299b853b5ba 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h | |||
@@ -1,4 +1,3 @@ | |||
1 | /* $Id: io.h,v 1.47 2001/12/13 10:36:02 davem Exp $ */ | ||
2 | #ifndef __SPARC64_IO_H | 1 | #ifndef __SPARC64_IO_H |
3 | #define __SPARC64_IO_H | 2 | #define __SPARC64_IO_H |
4 | 3 | ||
@@ -19,7 +18,7 @@ extern unsigned long kern_base, kern_size; | |||
19 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 18 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
20 | #define BIO_VMERGE_BOUNDARY 8192 | 19 | #define BIO_VMERGE_BOUNDARY 8192 |
21 | 20 | ||
22 | static __inline__ u8 _inb(unsigned long addr) | 21 | static inline u8 _inb(unsigned long addr) |
23 | { | 22 | { |
24 | u8 ret; | 23 | u8 ret; |
25 | 24 | ||
@@ -30,7 +29,7 @@ static __inline__ u8 _inb(unsigned long addr) | |||
30 | return ret; | 29 | return ret; |
31 | } | 30 | } |
32 | 31 | ||
33 | static __inline__ u16 _inw(unsigned long addr) | 32 | static inline u16 _inw(unsigned long addr) |
34 | { | 33 | { |
35 | u16 ret; | 34 | u16 ret; |
36 | 35 | ||
@@ -41,7 +40,7 @@ static __inline__ u16 _inw(unsigned long addr) | |||
41 | return ret; | 40 | return ret; |
42 | } | 41 | } |
43 | 42 | ||
44 | static __inline__ u32 _inl(unsigned long addr) | 43 | static inline u32 _inl(unsigned long addr) |
45 | { | 44 | { |
46 | u32 ret; | 45 | u32 ret; |
47 | 46 | ||
@@ -52,21 +51,21 @@ static __inline__ u32 _inl(unsigned long addr) | |||
52 | return ret; | 51 | return ret; |
53 | } | 52 | } |
54 | 53 | ||
55 | static __inline__ void _outb(u8 b, unsigned long addr) | 54 | static inline void _outb(u8 b, unsigned long addr) |
56 | { | 55 | { |
57 | __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */" | 56 | __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */" |
58 | : /* no outputs */ | 57 | : /* no outputs */ |
59 | : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)); | 58 | : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)); |
60 | } | 59 | } |
61 | 60 | ||
62 | static __inline__ void _outw(u16 w, unsigned long addr) | 61 | static inline void _outw(u16 w, unsigned long addr) |
63 | { | 62 | { |
64 | __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */" | 63 | __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */" |
65 | : /* no outputs */ | 64 | : /* no outputs */ |
66 | : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)); | 65 | : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)); |
67 | } | 66 | } |
68 | 67 | ||
69 | static __inline__ void _outl(u32 l, unsigned long addr) | 68 | static inline void _outl(u32 l, unsigned long addr) |
70 | { | 69 | { |
71 | __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */" | 70 | __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */" |
72 | : /* no outputs */ | 71 | : /* no outputs */ |
@@ -205,7 +204,7 @@ static inline void _writeq(u64 q, volatile void __iomem *addr) | |||
205 | #define writeq(__q, __addr) _writeq(__q, __addr) | 204 | #define writeq(__q, __addr) _writeq(__q, __addr) |
206 | 205 | ||
207 | /* Now versions without byte-swapping. */ | 206 | /* Now versions without byte-swapping. */ |
208 | static __inline__ u8 _raw_readb(unsigned long addr) | 207 | static inline u8 _raw_readb(unsigned long addr) |
209 | { | 208 | { |
210 | u8 ret; | 209 | u8 ret; |
211 | 210 | ||
@@ -216,7 +215,7 @@ static __inline__ u8 _raw_readb(unsigned long addr) | |||
216 | return ret; | 215 | return ret; |
217 | } | 216 | } |
218 | 217 | ||
219 | static __inline__ u16 _raw_readw(unsigned long addr) | 218 | static inline u16 _raw_readw(unsigned long addr) |
220 | { | 219 | { |
221 | u16 ret; | 220 | u16 ret; |
222 | 221 | ||
@@ -227,7 +226,7 @@ static __inline__ u16 _raw_readw(unsigned long addr) | |||
227 | return ret; | 226 | return ret; |
228 | } | 227 | } |
229 | 228 | ||
230 | static __inline__ u32 _raw_readl(unsigned long addr) | 229 | static inline u32 _raw_readl(unsigned long addr) |
231 | { | 230 | { |
232 | u32 ret; | 231 | u32 ret; |
233 | 232 | ||
@@ -238,7 +237,7 @@ static __inline__ u32 _raw_readl(unsigned long addr) | |||
238 | return ret; | 237 | return ret; |
239 | } | 238 | } |
240 | 239 | ||
241 | static __inline__ u64 _raw_readq(unsigned long addr) | 240 | static inline u64 _raw_readq(unsigned long addr) |
242 | { | 241 | { |
243 | u64 ret; | 242 | u64 ret; |
244 | 243 | ||
@@ -249,28 +248,28 @@ static __inline__ u64 _raw_readq(unsigned long addr) | |||
249 | return ret; | 248 | return ret; |
250 | } | 249 | } |
251 | 250 | ||
252 | static __inline__ void _raw_writeb(u8 b, unsigned long addr) | 251 | static inline void _raw_writeb(u8 b, unsigned long addr) |
253 | { | 252 | { |
254 | __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */" | 253 | __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */" |
255 | : /* no outputs */ | 254 | : /* no outputs */ |
256 | : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 255 | : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
257 | } | 256 | } |
258 | 257 | ||
259 | static __inline__ void _raw_writew(u16 w, unsigned long addr) | 258 | static inline void _raw_writew(u16 w, unsigned long addr) |
260 | { | 259 | { |
261 | __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */" | 260 | __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */" |
262 | : /* no outputs */ | 261 | : /* no outputs */ |
263 | : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 262 | : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
264 | } | 263 | } |
265 | 264 | ||
266 | static __inline__ void _raw_writel(u32 l, unsigned long addr) | 265 | static inline void _raw_writel(u32 l, unsigned long addr) |
267 | { | 266 | { |
268 | __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */" | 267 | __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */" |
269 | : /* no outputs */ | 268 | : /* no outputs */ |
270 | : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 269 | : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
271 | } | 270 | } |
272 | 271 | ||
273 | static __inline__ void _raw_writeq(u64 q, unsigned long addr) | 272 | static inline void _raw_writeq(u64 q, unsigned long addr) |
274 | { | 273 | { |
275 | __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */" | 274 | __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */" |
276 | : /* no outputs */ | 275 | : /* no outputs */ |
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index 182dba05c702..30cb76b47be1 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: irq.h,v 1.21 2002/01/23 11:27:36 davem Exp $ | 1 | /* irq.h: IRQ registers on the 64-bit Sparc. |
2 | * irq.h: IRQ registers on the 64-bit Sparc. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) | 4 | * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -67,21 +66,21 @@ extern void virt_irq_free(unsigned int virt_irq); | |||
67 | 66 | ||
68 | extern void fixup_irqs(void); | 67 | extern void fixup_irqs(void); |
69 | 68 | ||
70 | static __inline__ void set_softint(unsigned long bits) | 69 | static inline void set_softint(unsigned long bits) |
71 | { | 70 | { |
72 | __asm__ __volatile__("wr %0, 0x0, %%set_softint" | 71 | __asm__ __volatile__("wr %0, 0x0, %%set_softint" |
73 | : /* No outputs */ | 72 | : /* No outputs */ |
74 | : "r" (bits)); | 73 | : "r" (bits)); |
75 | } | 74 | } |
76 | 75 | ||
77 | static __inline__ void clear_softint(unsigned long bits) | 76 | static inline void clear_softint(unsigned long bits) |
78 | { | 77 | { |
79 | __asm__ __volatile__("wr %0, 0x0, %%clear_softint" | 78 | __asm__ __volatile__("wr %0, 0x0, %%clear_softint" |
80 | : /* No outputs */ | 79 | : /* No outputs */ |
81 | : "r" (bits)); | 80 | : "r" (bits)); |
82 | } | 81 | } |
83 | 82 | ||
84 | static __inline__ unsigned long get_softint(void) | 83 | static inline unsigned long get_softint(void) |
85 | { | 84 | { |
86 | unsigned long retval; | 85 | unsigned long retval; |
87 | 86 | ||
diff --git a/include/asm-sparc64/mostek.h b/include/asm-sparc64/mostek.h index d14dd8988161..c5652de2ace2 100644 --- a/include/asm-sparc64/mostek.h +++ b/include/asm-sparc64/mostek.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: mostek.h,v 1.4 2001/01/11 15:07:09 davem Exp $ | 1 | /* mostek.h: Describes the various Mostek time of day clock registers. |
2 | * mostek.h: Describes the various Mostek time of day clock registers. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1995 David S. Miller (davem@davemloft.net) |
5 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) | 4 | * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu) |
6 | */ | 5 | */ |
7 | 6 | ||
@@ -38,7 +37,7 @@ | |||
38 | * | 37 | * |
39 | * We now deal with physical addresses for I/O to the chip. -DaveM | 38 | * We now deal with physical addresses for I/O to the chip. -DaveM |
40 | */ | 39 | */ |
41 | static __inline__ u8 mostek_read(void __iomem *addr) | 40 | static inline u8 mostek_read(void __iomem *addr) |
42 | { | 41 | { |
43 | u8 ret; | 42 | u8 ret; |
44 | 43 | ||
@@ -48,7 +47,7 @@ static __inline__ u8 mostek_read(void __iomem *addr) | |||
48 | return ret; | 47 | return ret; |
49 | } | 48 | } |
50 | 49 | ||
51 | static __inline__ void mostek_write(void __iomem *addr, u8 val) | 50 | static inline void mostek_write(void __iomem *addr, u8 val) |
52 | { | 51 | { |
53 | __asm__ __volatile__("stba %0, [%1] %2" | 52 | __asm__ __volatile__("stba %0, [%1] %2" |
54 | : /* no outputs */ | 53 | : /* no outputs */ |
diff --git a/include/asm-sparc64/ns87303.h b/include/asm-sparc64/ns87303.h index 6d58fdf349b5..686defe6aaa0 100644 --- a/include/asm-sparc64/ns87303.h +++ b/include/asm-sparc64/ns87303.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: ns87303.h,v 1.3 2000/01/09 15:16:34 ecd Exp $ | 1 | /* ns87303.h: Configuration Register Description for the |
2 | * ns87303.h: Configuration Register Description for the | ||
3 | * National Semiconductor PC87303 (SuperIO). | 2 | * National Semiconductor PC87303 (SuperIO). |
4 | * | 3 | * |
5 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) | 4 | * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) |
@@ -85,7 +84,7 @@ | |||
85 | 84 | ||
86 | extern spinlock_t ns87303_lock; | 85 | extern spinlock_t ns87303_lock; |
87 | 86 | ||
88 | static __inline__ int ns87303_modify(unsigned long port, unsigned int index, | 87 | static inline int ns87303_modify(unsigned long port, unsigned int index, |
89 | unsigned char clr, unsigned char set) | 88 | unsigned char clr, unsigned char set) |
90 | { | 89 | { |
91 | static unsigned char reserved[] = { | 90 | static unsigned char reserved[] = { |
diff --git a/include/asm-sparc64/parport.h b/include/asm-sparc64/parport.h index 8116e8f6062c..e9555b246c8d 100644 --- a/include/asm-sparc64/parport.h +++ b/include/asm-sparc64/parport.h | |||
@@ -1,5 +1,4 @@ | |||
1 | /* $Id: parport.h,v 1.11 2001/05/11 07:54:24 davem Exp $ | 1 | /* parport.h: sparc64 specific parport initialization and dma. |
2 | * parport.h: sparc64 specific parport initialization and dma. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) | 3 | * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) |
5 | */ | 4 | */ |
@@ -42,7 +41,7 @@ static struct sparc_ebus_info { | |||
42 | 41 | ||
43 | static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); | 42 | static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); |
44 | 43 | ||
45 | static __inline__ int request_dma(unsigned int dmanr, const char *device_id) | 44 | static inline int request_dma(unsigned int dmanr, const char *device_id) |
46 | { | 45 | { |
47 | if (dmanr >= PARPORT_PC_MAX_PORTS) | 46 | if (dmanr >= PARPORT_PC_MAX_PORTS) |
48 | return -EINVAL; | 47 | return -EINVAL; |
@@ -51,7 +50,7 @@ static __inline__ int request_dma(unsigned int dmanr, const char *device_id) | |||
51 | return 0; | 50 | return 0; |
52 | } | 51 | } |
53 | 52 | ||
54 | static __inline__ void free_dma(unsigned int dmanr) | 53 | static inline void free_dma(unsigned int dmanr) |
55 | { | 54 | { |
56 | if (dmanr >= PARPORT_PC_MAX_PORTS) { | 55 | if (dmanr >= PARPORT_PC_MAX_PORTS) { |
57 | printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); | 56 | printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); |
@@ -63,7 +62,7 @@ static __inline__ void free_dma(unsigned int dmanr) | |||
63 | } | 62 | } |
64 | } | 63 | } |
65 | 64 | ||
66 | static __inline__ void enable_dma(unsigned int dmanr) | 65 | static inline void enable_dma(unsigned int dmanr) |
67 | { | 66 | { |
68 | ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); | 67 | ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); |
69 | 68 | ||
@@ -73,32 +72,32 @@ static __inline__ void enable_dma(unsigned int dmanr) | |||
73 | BUG(); | 72 | BUG(); |
74 | } | 73 | } |
75 | 74 | ||
76 | static __inline__ void disable_dma(unsigned int dmanr) | 75 | static inline void disable_dma(unsigned int dmanr) |
77 | { | 76 | { |
78 | ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); | 77 | ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); |
79 | } | 78 | } |
80 | 79 | ||
81 | static __inline__ void clear_dma_ff(unsigned int dmanr) | 80 | static inline void clear_dma_ff(unsigned int dmanr) |
82 | { | 81 | { |
83 | /* nothing */ | 82 | /* nothing */ |
84 | } | 83 | } |
85 | 84 | ||
86 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | 85 | static inline void set_dma_mode(unsigned int dmanr, char mode) |
87 | { | 86 | { |
88 | ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); | 87 | ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); |
89 | } | 88 | } |
90 | 89 | ||
91 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int addr) | 90 | static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) |
92 | { | 91 | { |
93 | sparc_ebus_dmas[dmanr].addr = addr; | 92 | sparc_ebus_dmas[dmanr].addr = addr; |
94 | } | 93 | } |
95 | 94 | ||
96 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | 95 | static inline void set_dma_count(unsigned int dmanr, unsigned int count) |
97 | { | 96 | { |
98 | sparc_ebus_dmas[dmanr].count = count; | 97 | sparc_ebus_dmas[dmanr].count = count; |
99 | } | 98 | } |
100 | 99 | ||
101 | static __inline__ unsigned int get_dma_residue(unsigned int dmanr) | 100 | static inline unsigned int get_dma_residue(unsigned int dmanr) |
102 | { | 101 | { |
103 | return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); | 102 | return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); |
104 | } | 103 | } |
diff --git a/include/asm-sparc64/posix_types.h b/include/asm-sparc64/posix_types.h index c86b9452c683..3426a65ecd35 100644 --- a/include/asm-sparc64/posix_types.h +++ b/include/asm-sparc64/posix_types.h | |||
@@ -53,7 +53,7 @@ typedef struct { | |||
53 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) | 53 | #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) |
54 | 54 | ||
55 | #undef __FD_SET | 55 | #undef __FD_SET |
56 | static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | 56 | static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) |
57 | { | 57 | { |
58 | unsigned long _tmp = fd / __NFDBITS; | 58 | unsigned long _tmp = fd / __NFDBITS; |
59 | unsigned long _rem = fd % __NFDBITS; | 59 | unsigned long _rem = fd % __NFDBITS; |
@@ -61,7 +61,7 @@ static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | #undef __FD_CLR | 63 | #undef __FD_CLR |
64 | static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | 64 | static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) |
65 | { | 65 | { |
66 | unsigned long _tmp = fd / __NFDBITS; | 66 | unsigned long _tmp = fd / __NFDBITS; |
67 | unsigned long _rem = fd % __NFDBITS; | 67 | unsigned long _rem = fd % __NFDBITS; |
@@ -69,7 +69,7 @@ static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | #undef __FD_ISSET | 71 | #undef __FD_ISSET |
72 | static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | 72 | static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) |
73 | { | 73 | { |
74 | unsigned long _tmp = fd / __NFDBITS; | 74 | unsigned long _tmp = fd / __NFDBITS; |
75 | unsigned long _rem = fd % __NFDBITS; | 75 | unsigned long _rem = fd % __NFDBITS; |
@@ -81,7 +81,7 @@ static __inline__ int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p) | |||
81 | * for 256 and 1024-bit fd_sets respectively) | 81 | * for 256 and 1024-bit fd_sets respectively) |
82 | */ | 82 | */ |
83 | #undef __FD_ZERO | 83 | #undef __FD_ZERO |
84 | static __inline__ void __FD_ZERO(__kernel_fd_set *p) | 84 | static inline void __FD_ZERO(__kernel_fd_set *p) |
85 | { | 85 | { |
86 | unsigned long *tmp = p->fds_bits; | 86 | unsigned long *tmp = p->fds_bits; |
87 | int i; | 87 | int i; |
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h index 0151cad486f3..24a04a55cf85 100644 --- a/include/asm-sparc64/sbus.h +++ b/include/asm-sparc64/sbus.h | |||
@@ -29,12 +29,12 @@ | |||
29 | * numbers + offsets, and vice versa. | 29 | * numbers + offsets, and vice versa. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | static __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) | 32 | static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset) |
33 | { | 33 | { |
34 | return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset)); | 34 | return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset)); |
35 | } | 35 | } |
36 | 36 | ||
37 | static __inline__ int sbus_dev_slot(unsigned long dev_addr) | 37 | static inline int sbus_dev_slot(unsigned long dev_addr) |
38 | { | 38 | { |
39 | return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28); | 39 | return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28); |
40 | } | 40 | } |
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index cf7807813e85..63b7040e8134 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $ | 1 | /* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations. |
2 | * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations. | ||
3 | * | 2 | * |
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 3 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
5 | */ | 4 | */ |
6 | 5 | ||
7 | #ifndef _SPARC64_SPITFIRE_H | 6 | #ifndef _SPARC64_SPITFIRE_H |
@@ -67,7 +66,7 @@ extern void cheetah_enable_pcache(void); | |||
67 | /* The data cache is write through, so this just invalidates the | 66 | /* The data cache is write through, so this just invalidates the |
68 | * specified line. | 67 | * specified line. |
69 | */ | 68 | */ |
70 | static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag) | 69 | static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag) |
71 | { | 70 | { |
72 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 71 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
73 | "membar #Sync" | 72 | "membar #Sync" |
@@ -81,7 +80,7 @@ static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long | |||
81 | * a flush instruction (to any address) is sufficient to handle | 80 | * a flush instruction (to any address) is sufficient to handle |
82 | * this issue after the line is invalidated. | 81 | * this issue after the line is invalidated. |
83 | */ | 82 | */ |
84 | static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag) | 83 | static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag) |
85 | { | 84 | { |
86 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 85 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
87 | "membar #Sync" | 86 | "membar #Sync" |
@@ -89,7 +88,7 @@ static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long | |||
89 | : "r" (tag), "r" (addr), "i" (ASI_IC_TAG)); | 88 | : "r" (tag), "r" (addr), "i" (ASI_IC_TAG)); |
90 | } | 89 | } |
91 | 90 | ||
92 | static __inline__ unsigned long spitfire_get_dtlb_data(int entry) | 91 | static inline unsigned long spitfire_get_dtlb_data(int entry) |
93 | { | 92 | { |
94 | unsigned long data; | 93 | unsigned long data; |
95 | 94 | ||
@@ -103,7 +102,7 @@ static __inline__ unsigned long spitfire_get_dtlb_data(int entry) | |||
103 | return data; | 102 | return data; |
104 | } | 103 | } |
105 | 104 | ||
106 | static __inline__ unsigned long spitfire_get_dtlb_tag(int entry) | 105 | static inline unsigned long spitfire_get_dtlb_tag(int entry) |
107 | { | 106 | { |
108 | unsigned long tag; | 107 | unsigned long tag; |
109 | 108 | ||
@@ -113,7 +112,7 @@ static __inline__ unsigned long spitfire_get_dtlb_tag(int entry) | |||
113 | return tag; | 112 | return tag; |
114 | } | 113 | } |
115 | 114 | ||
116 | static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data) | 115 | static inline void spitfire_put_dtlb_data(int entry, unsigned long data) |
117 | { | 116 | { |
118 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 117 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
119 | "membar #Sync" | 118 | "membar #Sync" |
@@ -122,7 +121,7 @@ static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data) | |||
122 | "i" (ASI_DTLB_DATA_ACCESS)); | 121 | "i" (ASI_DTLB_DATA_ACCESS)); |
123 | } | 122 | } |
124 | 123 | ||
125 | static __inline__ unsigned long spitfire_get_itlb_data(int entry) | 124 | static inline unsigned long spitfire_get_itlb_data(int entry) |
126 | { | 125 | { |
127 | unsigned long data; | 126 | unsigned long data; |
128 | 127 | ||
@@ -136,7 +135,7 @@ static __inline__ unsigned long spitfire_get_itlb_data(int entry) | |||
136 | return data; | 135 | return data; |
137 | } | 136 | } |
138 | 137 | ||
139 | static __inline__ unsigned long spitfire_get_itlb_tag(int entry) | 138 | static inline unsigned long spitfire_get_itlb_tag(int entry) |
140 | { | 139 | { |
141 | unsigned long tag; | 140 | unsigned long tag; |
142 | 141 | ||
@@ -146,7 +145,7 @@ static __inline__ unsigned long spitfire_get_itlb_tag(int entry) | |||
146 | return tag; | 145 | return tag; |
147 | } | 146 | } |
148 | 147 | ||
149 | static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) | 148 | static inline void spitfire_put_itlb_data(int entry, unsigned long data) |
150 | { | 149 | { |
151 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 150 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
152 | "membar #Sync" | 151 | "membar #Sync" |
@@ -155,7 +154,7 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) | |||
155 | "i" (ASI_ITLB_DATA_ACCESS)); | 154 | "i" (ASI_ITLB_DATA_ACCESS)); |
156 | } | 155 | } |
157 | 156 | ||
158 | static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) | 157 | static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page) |
159 | { | 158 | { |
160 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 159 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
161 | "membar #Sync" | 160 | "membar #Sync" |
@@ -163,7 +162,7 @@ static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) | |||
163 | : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP)); | 162 | : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP)); |
164 | } | 163 | } |
165 | 164 | ||
166 | static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page) | 165 | static inline void spitfire_flush_itlb_nucleus_page(unsigned long page) |
167 | { | 166 | { |
168 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 167 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
169 | "membar #Sync" | 168 | "membar #Sync" |
@@ -172,7 +171,7 @@ static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page) | |||
172 | } | 171 | } |
173 | 172 | ||
174 | /* Cheetah has "all non-locked" tlb flushes. */ | 173 | /* Cheetah has "all non-locked" tlb flushes. */ |
175 | static __inline__ void cheetah_flush_dtlb_all(void) | 174 | static inline void cheetah_flush_dtlb_all(void) |
176 | { | 175 | { |
177 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 176 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
178 | "membar #Sync" | 177 | "membar #Sync" |
@@ -180,7 +179,7 @@ static __inline__ void cheetah_flush_dtlb_all(void) | |||
180 | : "r" (0x80), "i" (ASI_DMMU_DEMAP)); | 179 | : "r" (0x80), "i" (ASI_DMMU_DEMAP)); |
181 | } | 180 | } |
182 | 181 | ||
183 | static __inline__ void cheetah_flush_itlb_all(void) | 182 | static inline void cheetah_flush_itlb_all(void) |
184 | { | 183 | { |
185 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 184 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" |
186 | "membar #Sync" | 185 | "membar #Sync" |
@@ -202,7 +201,7 @@ static __inline__ void cheetah_flush_itlb_all(void) | |||
202 | * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes | 201 | * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes |
203 | * the problem for me. -DaveM | 202 | * the problem for me. -DaveM |
204 | */ | 203 | */ |
205 | static __inline__ unsigned long cheetah_get_ldtlb_data(int entry) | 204 | static inline unsigned long cheetah_get_ldtlb_data(int entry) |
206 | { | 205 | { |
207 | unsigned long data; | 206 | unsigned long data; |
208 | 207 | ||
@@ -215,7 +214,7 @@ static __inline__ unsigned long cheetah_get_ldtlb_data(int entry) | |||
215 | return data; | 214 | return data; |
216 | } | 215 | } |
217 | 216 | ||
218 | static __inline__ unsigned long cheetah_get_litlb_data(int entry) | 217 | static inline unsigned long cheetah_get_litlb_data(int entry) |
219 | { | 218 | { |
220 | unsigned long data; | 219 | unsigned long data; |
221 | 220 | ||
@@ -228,7 +227,7 @@ static __inline__ unsigned long cheetah_get_litlb_data(int entry) | |||
228 | return data; | 227 | return data; |
229 | } | 228 | } |
230 | 229 | ||
231 | static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry) | 230 | static inline unsigned long cheetah_get_ldtlb_tag(int entry) |
232 | { | 231 | { |
233 | unsigned long tag; | 232 | unsigned long tag; |
234 | 233 | ||
@@ -240,7 +239,7 @@ static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry) | |||
240 | return tag; | 239 | return tag; |
241 | } | 240 | } |
242 | 241 | ||
243 | static __inline__ unsigned long cheetah_get_litlb_tag(int entry) | 242 | static inline unsigned long cheetah_get_litlb_tag(int entry) |
244 | { | 243 | { |
245 | unsigned long tag; | 244 | unsigned long tag; |
246 | 245 | ||
@@ -252,7 +251,7 @@ static __inline__ unsigned long cheetah_get_litlb_tag(int entry) | |||
252 | return tag; | 251 | return tag; |
253 | } | 252 | } |
254 | 253 | ||
255 | static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data) | 254 | static inline void cheetah_put_ldtlb_data(int entry, unsigned long data) |
256 | { | 255 | { |
257 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 256 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
258 | "membar #Sync" | 257 | "membar #Sync" |
@@ -262,7 +261,7 @@ static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data) | |||
262 | "i" (ASI_DTLB_DATA_ACCESS)); | 261 | "i" (ASI_DTLB_DATA_ACCESS)); |
263 | } | 262 | } |
264 | 263 | ||
265 | static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data) | 264 | static inline void cheetah_put_litlb_data(int entry, unsigned long data) |
266 | { | 265 | { |
267 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 266 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
268 | "membar #Sync" | 267 | "membar #Sync" |
@@ -272,7 +271,7 @@ static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data) | |||
272 | "i" (ASI_ITLB_DATA_ACCESS)); | 271 | "i" (ASI_ITLB_DATA_ACCESS)); |
273 | } | 272 | } |
274 | 273 | ||
275 | static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb) | 274 | static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb) |
276 | { | 275 | { |
277 | unsigned long data; | 276 | unsigned long data; |
278 | 277 | ||
@@ -284,7 +283,7 @@ static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb) | |||
284 | return data; | 283 | return data; |
285 | } | 284 | } |
286 | 285 | ||
287 | static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb) | 286 | static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb) |
288 | { | 287 | { |
289 | unsigned long tag; | 288 | unsigned long tag; |
290 | 289 | ||
@@ -294,7 +293,7 @@ static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb) | |||
294 | return tag; | 293 | return tag; |
295 | } | 294 | } |
296 | 295 | ||
297 | static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb) | 296 | static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb) |
298 | { | 297 | { |
299 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 298 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
300 | "membar #Sync" | 299 | "membar #Sync" |
@@ -304,7 +303,7 @@ static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int | |||
304 | "i" (ASI_DTLB_DATA_ACCESS)); | 303 | "i" (ASI_DTLB_DATA_ACCESS)); |
305 | } | 304 | } |
306 | 305 | ||
307 | static __inline__ unsigned long cheetah_get_itlb_data(int entry) | 306 | static inline unsigned long cheetah_get_itlb_data(int entry) |
308 | { | 307 | { |
309 | unsigned long data; | 308 | unsigned long data; |
310 | 309 | ||
@@ -317,7 +316,7 @@ static __inline__ unsigned long cheetah_get_itlb_data(int entry) | |||
317 | return data; | 316 | return data; |
318 | } | 317 | } |
319 | 318 | ||
320 | static __inline__ unsigned long cheetah_get_itlb_tag(int entry) | 319 | static inline unsigned long cheetah_get_itlb_tag(int entry) |
321 | { | 320 | { |
322 | unsigned long tag; | 321 | unsigned long tag; |
323 | 322 | ||
@@ -327,7 +326,7 @@ static __inline__ unsigned long cheetah_get_itlb_tag(int entry) | |||
327 | return tag; | 326 | return tag; |
328 | } | 327 | } |
329 | 328 | ||
330 | static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data) | 329 | static inline void cheetah_put_itlb_data(int entry, unsigned long data) |
331 | { | 330 | { |
332 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | 331 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" |
333 | "membar #Sync" | 332 | "membar #Sync" |
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 3f175fa7e6d2..159e62b51d70 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h | |||
@@ -1,4 +1,3 @@ | |||
1 | /* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */ | ||
2 | #ifndef __SPARC64_SYSTEM_H | 1 | #ifndef __SPARC64_SYSTEM_H |
3 | #define __SPARC64_SYSTEM_H | 2 | #define __SPARC64_SYSTEM_H |
4 | 3 | ||
@@ -240,7 +239,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long | |||
240 | 239 | ||
241 | extern void __xchg_called_with_bad_pointer(void); | 240 | extern void __xchg_called_with_bad_pointer(void); |
242 | 241 | ||
243 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | 242 | static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, |
244 | int size) | 243 | int size) |
245 | { | 244 | { |
246 | switch (size) { | 245 | switch (size) { |
@@ -263,7 +262,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret | |||
263 | 262 | ||
264 | #define __HAVE_ARCH_CMPXCHG 1 | 263 | #define __HAVE_ARCH_CMPXCHG 1 |
265 | 264 | ||
266 | static __inline__ unsigned long | 265 | static inline unsigned long |
267 | __cmpxchg_u32(volatile int *m, int old, int new) | 266 | __cmpxchg_u32(volatile int *m, int old, int new) |
268 | { | 267 | { |
269 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" | 268 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" |
@@ -276,7 +275,7 @@ __cmpxchg_u32(volatile int *m, int old, int new) | |||
276 | return new; | 275 | return new; |
277 | } | 276 | } |
278 | 277 | ||
279 | static __inline__ unsigned long | 278 | static inline unsigned long |
280 | __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) | 279 | __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) |
281 | { | 280 | { |
282 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" | 281 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" |
@@ -293,7 +292,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) | |||
293 | if something tries to do an invalid cmpxchg(). */ | 292 | if something tries to do an invalid cmpxchg(). */ |
294 | extern void __cmpxchg_called_with_bad_pointer(void); | 293 | extern void __cmpxchg_called_with_bad_pointer(void); |
295 | 294 | ||
296 | static __inline__ unsigned long | 295 | static inline unsigned long |
297 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 296 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
298 | { | 297 | { |
299 | switch (size) { | 298 | switch (size) { |
diff --git a/include/asm-sparc64/upa.h b/include/asm-sparc64/upa.h index 7ae09a22e408..5b1633223f92 100644 --- a/include/asm-sparc64/upa.h +++ b/include/asm-sparc64/upa.h | |||
@@ -1,4 +1,3 @@ | |||
1 | /* $Id: upa.h,v 1.3 1999/09/21 14:39:47 davem Exp $ */ | ||
2 | #ifndef _SPARC64_UPA_H | 1 | #ifndef _SPARC64_UPA_H |
3 | #define _SPARC64_UPA_H | 2 | #define _SPARC64_UPA_H |
4 | 3 | ||
@@ -25,7 +24,7 @@ | |||
25 | 24 | ||
26 | /* UPA I/O space accessors */ | 25 | /* UPA I/O space accessors */ |
27 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
28 | static __inline__ unsigned char _upa_readb(unsigned long addr) | 27 | static inline unsigned char _upa_readb(unsigned long addr) |
29 | { | 28 | { |
30 | unsigned char ret; | 29 | unsigned char ret; |
31 | 30 | ||
@@ -36,7 +35,7 @@ static __inline__ unsigned char _upa_readb(unsigned long addr) | |||
36 | return ret; | 35 | return ret; |
37 | } | 36 | } |
38 | 37 | ||
39 | static __inline__ unsigned short _upa_readw(unsigned long addr) | 38 | static inline unsigned short _upa_readw(unsigned long addr) |
40 | { | 39 | { |
41 | unsigned short ret; | 40 | unsigned short ret; |
42 | 41 | ||
@@ -47,7 +46,7 @@ static __inline__ unsigned short _upa_readw(unsigned long addr) | |||
47 | return ret; | 46 | return ret; |
48 | } | 47 | } |
49 | 48 | ||
50 | static __inline__ unsigned int _upa_readl(unsigned long addr) | 49 | static inline unsigned int _upa_readl(unsigned long addr) |
51 | { | 50 | { |
52 | unsigned int ret; | 51 | unsigned int ret; |
53 | 52 | ||
@@ -58,7 +57,7 @@ static __inline__ unsigned int _upa_readl(unsigned long addr) | |||
58 | return ret; | 57 | return ret; |
59 | } | 58 | } |
60 | 59 | ||
61 | static __inline__ unsigned long _upa_readq(unsigned long addr) | 60 | static inline unsigned long _upa_readq(unsigned long addr) |
62 | { | 61 | { |
63 | unsigned long ret; | 62 | unsigned long ret; |
64 | 63 | ||
@@ -69,28 +68,28 @@ static __inline__ unsigned long _upa_readq(unsigned long addr) | |||
69 | return ret; | 68 | return ret; |
70 | } | 69 | } |
71 | 70 | ||
72 | static __inline__ void _upa_writeb(unsigned char b, unsigned long addr) | 71 | static inline void _upa_writeb(unsigned char b, unsigned long addr) |
73 | { | 72 | { |
74 | __asm__ __volatile__("stba\t%0, [%1] %2\t/* upa_writeb */" | 73 | __asm__ __volatile__("stba\t%0, [%1] %2\t/* upa_writeb */" |
75 | : /* no outputs */ | 74 | : /* no outputs */ |
76 | : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 75 | : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
77 | } | 76 | } |
78 | 77 | ||
79 | static __inline__ void _upa_writew(unsigned short w, unsigned long addr) | 78 | static inline void _upa_writew(unsigned short w, unsigned long addr) |
80 | { | 79 | { |
81 | __asm__ __volatile__("stha\t%0, [%1] %2\t/* upa_writew */" | 80 | __asm__ __volatile__("stha\t%0, [%1] %2\t/* upa_writew */" |
82 | : /* no outputs */ | 81 | : /* no outputs */ |
83 | : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 82 | : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
84 | } | 83 | } |
85 | 84 | ||
86 | static __inline__ void _upa_writel(unsigned int l, unsigned long addr) | 85 | static inline void _upa_writel(unsigned int l, unsigned long addr) |
87 | { | 86 | { |
88 | __asm__ __volatile__("stwa\t%0, [%1] %2\t/* upa_writel */" | 87 | __asm__ __volatile__("stwa\t%0, [%1] %2\t/* upa_writel */" |
89 | : /* no outputs */ | 88 | : /* no outputs */ |
90 | : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); | 89 | : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); |
91 | } | 90 | } |
92 | 91 | ||
93 | static __inline__ void _upa_writeq(unsigned long q, unsigned long addr) | 92 | static inline void _upa_writeq(unsigned long q, unsigned long addr) |
94 | { | 93 | { |
95 | __asm__ __volatile__("stxa\t%0, [%1] %2\t/* upa_writeq */" | 94 | __asm__ __volatile__("stxa\t%0, [%1] %2\t/* upa_writeq */" |
96 | : /* no outputs */ | 95 | : /* no outputs */ |
diff --git a/include/asm-sparc64/visasm.h b/include/asm-sparc64/visasm.h index a74078551e0f..34f2ec64933b 100644 --- a/include/asm-sparc64/visasm.h +++ b/include/asm-sparc64/visasm.h | |||
@@ -1,4 +1,3 @@ | |||
1 | /* $Id: visasm.h,v 1.5 2001/04/24 01:09:12 davem Exp $ */ | ||
2 | #ifndef _SPARC64_VISASM_H | 1 | #ifndef _SPARC64_VISASM_H |
3 | #define _SPARC64_VISASM_H | 2 | #define _SPARC64_VISASM_H |
4 | 3 | ||
@@ -44,7 +43,7 @@ | |||
44 | wr %o5, 0, %fprs; | 43 | wr %o5, 0, %fprs; |
45 | 44 | ||
46 | #ifndef __ASSEMBLY__ | 45 | #ifndef __ASSEMBLY__ |
47 | static __inline__ void save_and_clear_fpu(void) { | 46 | static inline void save_and_clear_fpu(void) { |
48 | __asm__ __volatile__ ( | 47 | __asm__ __volatile__ ( |
49 | " rd %%fprs, %%o5\n" | 48 | " rd %%fprs, %%o5\n" |
50 | " andcc %%o5, %0, %%g0\n" | 49 | " andcc %%o5, %0, %%g0\n" |
diff --git a/include/asm-um/unistd.h b/include/asm-um/unistd.h index 732c83f04c3d..38bd9d94ee46 100644 --- a/include/asm-um/unistd.h +++ b/include/asm-um/unistd.h | |||
@@ -14,7 +14,6 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]); | |||
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | /* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */ | 16 | /* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */ |
17 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
18 | #define __ARCH_WANT_OLD_READDIR | 17 | #define __ARCH_WANT_OLD_READDIR |
19 | #define __ARCH_WANT_SYS_ALARM | 18 | #define __ARCH_WANT_SYS_ALARM |
20 | #define __ARCH_WANT_SYS_GETHOSTNAME | 19 | #define __ARCH_WANT_SYS_GETHOSTNAME |
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 36ebb5b02b4f..0b40f6d20bea 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h | |||
@@ -183,9 +183,12 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr) | |||
183 | * @nr: Bit to set | 183 | * @nr: Bit to set |
184 | * @addr: Address to count from | 184 | * @addr: Address to count from |
185 | * | 185 | * |
186 | * This is the same as test_and_set_bit on x86 | 186 | * This is the same as test_and_set_bit on x86. |
187 | */ | 187 | */ |
188 | #define test_and_set_bit_lock test_and_set_bit | 188 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) |
189 | { | ||
190 | return test_and_set_bit(nr, addr); | ||
191 | } | ||
189 | 192 | ||
190 | /** | 193 | /** |
191 | * __test_and_set_bit - Set a bit and return its old value | 194 | * __test_and_set_bit - Set a bit and return its old value |
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index b4d47940b959..766bcc0470a6 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h | |||
@@ -29,7 +29,7 @@ | |||
29 | * Note that @nr may be almost arbitrarily large; this function is not | 29 | * Note that @nr may be almost arbitrarily large; this function is not |
30 | * restricted to acting on a single-word quantity. | 30 | * restricted to acting on a single-word quantity. |
31 | */ | 31 | */ |
32 | static __inline__ void set_bit(int nr, volatile void * addr) | 32 | static inline void set_bit(int nr, volatile void *addr) |
33 | { | 33 | { |
34 | __asm__ __volatile__( LOCK_PREFIX | 34 | __asm__ __volatile__( LOCK_PREFIX |
35 | "btsl %1,%0" | 35 | "btsl %1,%0" |
@@ -46,7 +46,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
46 | * If it's called on the same region of memory simultaneously, the effect | 46 | * If it's called on the same region of memory simultaneously, the effect |
47 | * may be that only one operation succeeds. | 47 | * may be that only one operation succeeds. |
48 | */ | 48 | */ |
49 | static __inline__ void __set_bit(int nr, volatile void * addr) | 49 | static inline void __set_bit(int nr, volatile void *addr) |
50 | { | 50 | { |
51 | __asm__ volatile( | 51 | __asm__ volatile( |
52 | "btsl %1,%0" | 52 | "btsl %1,%0" |
@@ -64,7 +64,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr) | |||
64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 64 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
65 | * in order to ensure changes are visible on other processors. | 65 | * in order to ensure changes are visible on other processors. |
66 | */ | 66 | */ |
67 | static __inline__ void clear_bit(int nr, volatile void * addr) | 67 | static inline void clear_bit(int nr, volatile void *addr) |
68 | { | 68 | { |
69 | __asm__ __volatile__( LOCK_PREFIX | 69 | __asm__ __volatile__( LOCK_PREFIX |
70 | "btrl %1,%0" | 70 | "btrl %1,%0" |
@@ -86,7 +86,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad | |||
86 | clear_bit(nr, addr); | 86 | clear_bit(nr, addr); |
87 | } | 87 | } |
88 | 88 | ||
89 | static __inline__ void __clear_bit(int nr, volatile void * addr) | 89 | static inline void __clear_bit(int nr, volatile void *addr) |
90 | { | 90 | { |
91 | __asm__ __volatile__( | 91 | __asm__ __volatile__( |
92 | "btrl %1,%0" | 92 | "btrl %1,%0" |
@@ -124,7 +124,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long * | |||
124 | * If it's called on the same region of memory simultaneously, the effect | 124 | * If it's called on the same region of memory simultaneously, the effect |
125 | * may be that only one operation succeeds. | 125 | * may be that only one operation succeeds. |
126 | */ | 126 | */ |
127 | static __inline__ void __change_bit(int nr, volatile void * addr) | 127 | static inline void __change_bit(int nr, volatile void *addr) |
128 | { | 128 | { |
129 | __asm__ __volatile__( | 129 | __asm__ __volatile__( |
130 | "btcl %1,%0" | 130 | "btcl %1,%0" |
@@ -141,7 +141,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr) | |||
141 | * Note that @nr may be almost arbitrarily large; this function is not | 141 | * Note that @nr may be almost arbitrarily large; this function is not |
142 | * restricted to acting on a single-word quantity. | 142 | * restricted to acting on a single-word quantity. |
143 | */ | 143 | */ |
144 | static __inline__ void change_bit(int nr, volatile void * addr) | 144 | static inline void change_bit(int nr, volatile void *addr) |
145 | { | 145 | { |
146 | __asm__ __volatile__( LOCK_PREFIX | 146 | __asm__ __volatile__( LOCK_PREFIX |
147 | "btcl %1,%0" | 147 | "btcl %1,%0" |
@@ -157,7 +157,7 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
157 | * This operation is atomic and cannot be reordered. | 157 | * This operation is atomic and cannot be reordered. |
158 | * It also implies a memory barrier. | 158 | * It also implies a memory barrier. |
159 | */ | 159 | */ |
160 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 160 | static inline int test_and_set_bit(int nr, volatile void *addr) |
161 | { | 161 | { |
162 | int oldbit; | 162 | int oldbit; |
163 | 163 | ||
@@ -173,9 +173,12 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
173 | * @nr: Bit to set | 173 | * @nr: Bit to set |
174 | * @addr: Address to count from | 174 | * @addr: Address to count from |
175 | * | 175 | * |
176 | * This is the same as test_and_set_bit on x86 | 176 | * This is the same as test_and_set_bit on x86. |
177 | */ | 177 | */ |
178 | #define test_and_set_bit_lock test_and_set_bit | 178 | static inline int test_and_set_bit_lock(int nr, volatile void *addr) |
179 | { | ||
180 | return test_and_set_bit(nr, addr); | ||
181 | } | ||
179 | 182 | ||
180 | /** | 183 | /** |
181 | * __test_and_set_bit - Set a bit and return its old value | 184 | * __test_and_set_bit - Set a bit and return its old value |
@@ -186,7 +189,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
186 | * If two examples of this operation race, one can appear to succeed | 189 | * If two examples of this operation race, one can appear to succeed |
187 | * but actually fail. You must protect multiple accesses with a lock. | 190 | * but actually fail. You must protect multiple accesses with a lock. |
188 | */ | 191 | */ |
189 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | 192 | static inline int __test_and_set_bit(int nr, volatile void *addr) |
190 | { | 193 | { |
191 | int oldbit; | 194 | int oldbit; |
192 | 195 | ||
@@ -205,7 +208,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | |||
205 | * This operation is atomic and cannot be reordered. | 208 | * This operation is atomic and cannot be reordered. |
206 | * It also implies a memory barrier. | 209 | * It also implies a memory barrier. |
207 | */ | 210 | */ |
208 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 211 | static inline int test_and_clear_bit(int nr, volatile void *addr) |
209 | { | 212 | { |
210 | int oldbit; | 213 | int oldbit; |
211 | 214 | ||
@@ -225,7 +228,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
225 | * If two examples of this operation race, one can appear to succeed | 228 | * If two examples of this operation race, one can appear to succeed |
226 | * but actually fail. You must protect multiple accesses with a lock. | 229 | * but actually fail. You must protect multiple accesses with a lock. |
227 | */ | 230 | */ |
228 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | 231 | static inline int __test_and_clear_bit(int nr, volatile void *addr) |
229 | { | 232 | { |
230 | int oldbit; | 233 | int oldbit; |
231 | 234 | ||
@@ -237,7 +240,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | |||
237 | } | 240 | } |
238 | 241 | ||
239 | /* WARNING: non atomic and it can be reordered! */ | 242 | /* WARNING: non atomic and it can be reordered! */ |
240 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 243 | static inline int __test_and_change_bit(int nr, volatile void *addr) |
241 | { | 244 | { |
242 | int oldbit; | 245 | int oldbit; |
243 | 246 | ||
@@ -256,7 +259,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | |||
256 | * This operation is atomic and cannot be reordered. | 259 | * This operation is atomic and cannot be reordered. |
257 | * It also implies a memory barrier. | 260 | * It also implies a memory barrier. |
258 | */ | 261 | */ |
259 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 262 | static inline int test_and_change_bit(int nr, volatile void *addr) |
260 | { | 263 | { |
261 | int oldbit; | 264 | int oldbit; |
262 | 265 | ||
@@ -273,15 +276,15 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
273 | * @nr: bit number to test | 276 | * @nr: bit number to test |
274 | * @addr: Address to start counting from | 277 | * @addr: Address to start counting from |
275 | */ | 278 | */ |
276 | static int test_bit(int nr, const volatile void * addr); | 279 | static int test_bit(int nr, const volatile void *addr); |
277 | #endif | 280 | #endif |
278 | 281 | ||
279 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) | 282 | static inline int constant_test_bit(int nr, const volatile void *addr) |
280 | { | 283 | { |
281 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | 284 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; |
282 | } | 285 | } |
283 | 286 | ||
284 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) | 287 | static inline int variable_test_bit(int nr, volatile const void *addr) |
285 | { | 288 | { |
286 | int oldbit; | 289 | int oldbit; |
287 | 290 | ||
@@ -299,10 +302,10 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr) | |||
299 | 302 | ||
300 | #undef ADDR | 303 | #undef ADDR |
301 | 304 | ||
302 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); | 305 | extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); |
303 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); | 306 | extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); |
304 | extern long find_first_bit(const unsigned long * addr, unsigned long size); | 307 | extern long find_first_bit(const unsigned long *addr, unsigned long size); |
305 | extern long find_next_bit(const unsigned long * addr, long size, long offset); | 308 | extern long find_next_bit(const unsigned long *addr, long size, long offset); |
306 | 309 | ||
307 | /* return index of first bet set in val or max when no bit is set */ | 310 | /* return index of first bet set in val or max when no bit is set */ |
308 | static inline long __scanbit(unsigned long val, unsigned long max) | 311 | static inline long __scanbit(unsigned long val, unsigned long max) |
@@ -363,7 +366,7 @@ static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | |||
363 | * | 366 | * |
364 | * Undefined if no zero exists, so code should check against ~0UL first. | 367 | * Undefined if no zero exists, so code should check against ~0UL first. |
365 | */ | 368 | */ |
366 | static __inline__ unsigned long ffz(unsigned long word) | 369 | static inline unsigned long ffz(unsigned long word) |
367 | { | 370 | { |
368 | __asm__("bsfq %1,%0" | 371 | __asm__("bsfq %1,%0" |
369 | :"=r" (word) | 372 | :"=r" (word) |
@@ -377,7 +380,7 @@ static __inline__ unsigned long ffz(unsigned long word) | |||
377 | * | 380 | * |
378 | * Undefined if no bit exists, so code should check against 0 first. | 381 | * Undefined if no bit exists, so code should check against 0 first. |
379 | */ | 382 | */ |
380 | static __inline__ unsigned long __ffs(unsigned long word) | 383 | static inline unsigned long __ffs(unsigned long word) |
381 | { | 384 | { |
382 | __asm__("bsfq %1,%0" | 385 | __asm__("bsfq %1,%0" |
383 | :"=r" (word) | 386 | :"=r" (word) |
@@ -391,7 +394,7 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
391 | * | 394 | * |
392 | * Undefined if no zero exists, so code should check against ~0UL first. | 395 | * Undefined if no zero exists, so code should check against ~0UL first. |
393 | */ | 396 | */ |
394 | static __inline__ unsigned long __fls(unsigned long word) | 397 | static inline unsigned long __fls(unsigned long word) |
395 | { | 398 | { |
396 | __asm__("bsrq %1,%0" | 399 | __asm__("bsrq %1,%0" |
397 | :"=r" (word) | 400 | :"=r" (word) |
@@ -411,7 +414,7 @@ static __inline__ unsigned long __fls(unsigned long word) | |||
411 | * the libc and compiler builtin ffs routines, therefore | 414 | * the libc and compiler builtin ffs routines, therefore |
412 | * differs in spirit from the above ffz (man ffs). | 415 | * differs in spirit from the above ffz (man ffs). |
413 | */ | 416 | */ |
414 | static __inline__ int ffs(int x) | 417 | static inline int ffs(int x) |
415 | { | 418 | { |
416 | int r; | 419 | int r; |
417 | 420 | ||
@@ -427,7 +430,7 @@ static __inline__ int ffs(int x) | |||
427 | * | 430 | * |
428 | * This is defined the same way as fls. | 431 | * This is defined the same way as fls. |
429 | */ | 432 | */ |
430 | static __inline__ int fls64(__u64 x) | 433 | static inline int fls64(__u64 x) |
431 | { | 434 | { |
432 | if (x == 0) | 435 | if (x == 0) |
433 | return 0; | 436 | return 0; |
@@ -440,7 +443,7 @@ static __inline__ int fls64(__u64 x) | |||
440 | * | 443 | * |
441 | * This is defined the same way as ffs. | 444 | * This is defined the same way as ffs. |
442 | */ | 445 | */ |
443 | static __inline__ int fls(int x) | 446 | static inline int fls(int x) |
444 | { | 447 | { |
445 | int r; | 448 | int r; |
446 | 449 | ||
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h index f948491eb56a..9c5092b6aa9f 100644 --- a/include/asm-x86/lguest_hcall.h +++ b/include/asm-x86/lguest_hcall.h | |||
@@ -18,12 +18,17 @@ | |||
18 | #define LHCALL_LOAD_TLS 16 | 18 | #define LHCALL_LOAD_TLS 16 |
19 | #define LHCALL_NOTIFY 17 | 19 | #define LHCALL_NOTIFY 17 |
20 | 20 | ||
21 | #define LGUEST_TRAP_ENTRY 0x1F | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | #include <asm/hw_irq.h> | ||
25 | |||
21 | /*G:031 First, how does our Guest contact the Host to ask for privileged | 26 | /*G:031 First, how does our Guest contact the Host to ask for privileged |
22 | * operations? There are two ways: the direct way is to make a "hypercall", | 27 | * operations? There are two ways: the direct way is to make a "hypercall", |
23 | * to make requests of the Host Itself. | 28 | * to make requests of the Host Itself. |
24 | * | 29 | * |
25 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and | 30 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
26 | * above are used by real hardware interrupts). Seventeen hypercalls are | 31 | * above are used by real hardware interrupts). Fifteen hypercalls are |
27 | * available: the hypercall number is put in the %eax register, and the | 32 | * available: the hypercall number is put in the %eax register, and the |
28 | * arguments (when required) are placed in %edx, %ebx and %ecx. If a return | 33 | * arguments (when required) are placed in %edx, %ebx and %ecx. If a return |
29 | * value makes sense, it's returned in %eax. | 34 | * value makes sense, it's returned in %eax. |
@@ -31,20 +36,15 @@ | |||
31 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 36 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
32 | * Host, rather than returning failure. This reflects Winston Churchill's | 37 | * Host, rather than returning failure. This reflects Winston Churchill's |
33 | * definition of a gentleman: "someone who is only rude intentionally". */ | 38 | * definition of a gentleman: "someone who is only rude intentionally". */ |
34 | #define LGUEST_TRAP_ENTRY 0x1F | ||
35 | |||
36 | #ifndef __ASSEMBLY__ | ||
37 | #include <asm/hw_irq.h> | ||
38 | |||
39 | static inline unsigned long | 39 | static inline unsigned long |
40 | hcall(unsigned long call, | 40 | hcall(unsigned long call, |
41 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 41 | unsigned long arg1, unsigned long arg2, unsigned long arg3) |
42 | { | 42 | { |
43 | /* "int" is the Intel instruction to trigger a trap. */ | 43 | /* "int" is the Intel instruction to trigger a trap. */ |
44 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | 44 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) |
45 | /* The call is in %eax (aka "a"), and can be replaced */ | 45 | /* The call in %eax (aka "a") might be overwritten */ |
46 | : "=a"(call) | 46 | : "=a"(call) |
47 | /* The other arguments are in %eax, %edx, %ebx & %ecx */ | 47 | /* The arguments are in %eax, %edx, %ebx & %ecx */ |
48 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) | 48 | : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3) |
49 | /* "memory" means this might write somewhere in memory. | 49 | /* "memory" means this might write somewhere in memory. |
50 | * This isn't true for all calls, but it's safe to tell | 50 | * This isn't true for all calls, but it's safe to tell |
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 7056d8684522..e10b7affdfe5 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h | |||
@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu) | |||
94 | { | 94 | { |
95 | smp_ops.smp_send_reschedule(cpu); | 95 | smp_ops.smp_send_reschedule(cpu); |
96 | } | 96 | } |
97 | extern int smp_call_function_mask(cpumask_t mask, | 97 | static inline int smp_call_function_mask(cpumask_t mask, |
98 | void (*func) (void *info), void *info, | 98 | void (*func) (void *info), void *info, |
99 | int wait); | 99 | int wait) |
100 | { | ||
101 | return smp_ops.smp_call_function_mask(mask, func, info, wait); | ||
102 | } | ||
100 | 103 | ||
101 | void native_smp_prepare_boot_cpu(void); | 104 | void native_smp_prepare_boot_cpu(void); |
102 | void native_smp_prepare_cpus(unsigned int max_cpus); | 105 | void native_smp_prepare_cpus(unsigned int max_cpus); |
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index 8bd9d2c02a24..3c7d537dd15d 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h | |||
@@ -11,10 +11,10 @@ | |||
11 | #ifndef _XTENSA_DMA_MAPPING_H | 11 | #ifndef _XTENSA_DMA_MAPPING_H |
12 | #define _XTENSA_DMA_MAPPING_H | 12 | #define _XTENSA_DMA_MAPPING_H |
13 | 13 | ||
14 | #include <asm/scatterlist.h> | ||
15 | #include <asm/cache.h> | 14 | #include <asm/cache.h> |
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/scatterlist.h> | ||
18 | 18 | ||
19 | /* | 19 | /* |
20 | * DMA-consistent mapping functions. | 20 | * DMA-consistent mapping functions. |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 8263a7b74d34..128dc7ad4901 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -180,6 +180,7 @@ enum { | |||
180 | ATA_CMD_VERIFY_EXT = 0x42, | 180 | ATA_CMD_VERIFY_EXT = 0x42, |
181 | ATA_CMD_STANDBYNOW1 = 0xE0, | 181 | ATA_CMD_STANDBYNOW1 = 0xE0, |
182 | ATA_CMD_IDLEIMMEDIATE = 0xE1, | 182 | ATA_CMD_IDLEIMMEDIATE = 0xE1, |
183 | ATA_CMD_SLEEP = 0xE6, | ||
183 | ATA_CMD_INIT_DEV_PARAMS = 0x91, | 184 | ATA_CMD_INIT_DEV_PARAMS = 0x91, |
184 | ATA_CMD_READ_NATIVE_MAX = 0xF8, | 185 | ATA_CMD_READ_NATIVE_MAX = 0xF8, |
185 | ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, | 186 | ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, |
@@ -235,6 +236,7 @@ enum { | |||
235 | 236 | ||
236 | /* SETFEATURE Sector counts for SATA features */ | 237 | /* SETFEATURE Sector counts for SATA features */ |
237 | SATA_AN = 0x05, /* Asynchronous Notification */ | 238 | SATA_AN = 0x05, /* Asynchronous Notification */ |
239 | SATA_DIPM = 0x03, /* Device Initiated Power Management */ | ||
238 | 240 | ||
239 | /* ATAPI stuff */ | 241 | /* ATAPI stuff */ |
240 | ATAPI_PKT_DMA = (1 << 0), | 242 | ATAPI_PKT_DMA = (1 << 0), |
@@ -377,6 +379,26 @@ struct ata_taskfile { | |||
377 | 379 | ||
378 | #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) | 380 | #define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) |
379 | 381 | ||
382 | static inline bool ata_id_has_hipm(const u16 *id) | ||
383 | { | ||
384 | u16 val = id[76]; | ||
385 | |||
386 | if (val == 0 || val == 0xffff) | ||
387 | return false; | ||
388 | |||
389 | return val & (1 << 9); | ||
390 | } | ||
391 | |||
392 | static inline bool ata_id_has_dipm(const u16 *id) | ||
393 | { | ||
394 | u16 val = id[78]; | ||
395 | |||
396 | if (val == 0 || val == 0xffff) | ||
397 | return false; | ||
398 | |||
399 | return val & (1 << 3); | ||
400 | } | ||
401 | |||
380 | static inline int ata_id_has_fua(const u16 *id) | 402 | static inline int ata_id_has_fua(const u16 *id) |
381 | { | 403 | { |
382 | if ((id[84] & 0xC000) != 0x4000) | 404 | if ((id[84] & 0xC000) != 0x4000) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bbf906a0b419..8396db24d019 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -341,7 +341,6 @@ enum blk_queue_state { | |||
341 | struct blk_queue_tag { | 341 | struct blk_queue_tag { |
342 | struct request **tag_index; /* map of busy tags */ | 342 | struct request **tag_index; /* map of busy tags */ |
343 | unsigned long *tag_map; /* bit map of free/busy tags */ | 343 | unsigned long *tag_map; /* bit map of free/busy tags */ |
344 | struct list_head busy_list; /* fifo list of busy tags */ | ||
345 | int busy; /* current depth */ | 344 | int busy; /* current depth */ |
346 | int max_depth; /* what we will send to device */ | 345 | int max_depth; /* what we will send to device */ |
347 | int real_max_depth; /* what the array can hold */ | 346 | int real_max_depth; /* what the array can hold */ |
@@ -435,6 +434,7 @@ struct request_queue | |||
435 | unsigned int dma_alignment; | 434 | unsigned int dma_alignment; |
436 | 435 | ||
437 | struct blk_queue_tag *queue_tags; | 436 | struct blk_queue_tag *queue_tags; |
437 | struct list_head tag_busy_list; | ||
438 | 438 | ||
439 | unsigned int nr_sorted; | 439 | unsigned int nr_sorted; |
440 | unsigned int in_flight; | 440 | unsigned int in_flight; |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index c83534ee1e79..0365ec9fc0c9 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -59,7 +59,6 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata, | |||
59 | unsigned long align, | 59 | unsigned long align, |
60 | unsigned long goal, | 60 | unsigned long goal, |
61 | unsigned long limit); | 61 | unsigned long limit); |
62 | extern void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size); | ||
63 | 62 | ||
64 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 63 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE |
65 | extern void reserve_bootmem(unsigned long addr, unsigned long size); | 64 | extern void reserve_bootmem(unsigned long addr, unsigned long size); |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c811c8b979ac..c68b67b86ef1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -101,6 +101,12 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
101 | #undef __must_check | 101 | #undef __must_check |
102 | #define __must_check | 102 | #define __must_check |
103 | #endif | 103 | #endif |
104 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED | ||
105 | #undef __deprecated | ||
106 | #undef __deprecated_for_modules | ||
107 | #define __deprecated | ||
108 | #define __deprecated_for_modules | ||
109 | #endif | ||
104 | 110 | ||
105 | /* | 111 | /* |
106 | * Allow us to avoid 'defined but not used' warnings on functions and data, | 112 | * Allow us to avoid 'defined but not used' warnings on functions and data, |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 268c5a4a2bd4..33d6aaf94447 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -42,15 +42,15 @@ static inline void init_completion(struct completion *x) | |||
42 | init_waitqueue_head(&x->wait); | 42 | init_waitqueue_head(&x->wait); |
43 | } | 43 | } |
44 | 44 | ||
45 | extern void FASTCALL(wait_for_completion(struct completion *)); | 45 | extern void wait_for_completion(struct completion *); |
46 | extern int FASTCALL(wait_for_completion_interruptible(struct completion *x)); | 46 | extern int wait_for_completion_interruptible(struct completion *x); |
47 | extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x, | 47 | extern unsigned long wait_for_completion_timeout(struct completion *x, |
48 | unsigned long timeout)); | 48 | unsigned long timeout); |
49 | extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout( | 49 | extern unsigned long wait_for_completion_interruptible_timeout( |
50 | struct completion *x, unsigned long timeout)); | 50 | struct completion *x, unsigned long timeout); |
51 | 51 | ||
52 | extern void FASTCALL(complete(struct completion *)); | 52 | extern void complete(struct completion *); |
53 | extern void FASTCALL(complete_all(struct completion *)); | 53 | extern void complete_all(struct completion *); |
54 | 54 | ||
55 | #define INIT_COMPLETION(x) ((x).done = 0) | 55 | #define INIT_COMPLETION(x) ((x).done = 0) |
56 | 56 | ||
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index f3fc4392e93d..333c3ea82a5d 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -144,6 +144,8 @@ enum dccp_reset_codes { | |||
144 | DCCP_RESET_CODE_TOO_BUSY, | 144 | DCCP_RESET_CODE_TOO_BUSY, |
145 | DCCP_RESET_CODE_BAD_INIT_COOKIE, | 145 | DCCP_RESET_CODE_BAD_INIT_COOKIE, |
146 | DCCP_RESET_CODE_AGGRESSION_PENALTY, | 146 | DCCP_RESET_CODE_AGGRESSION_PENALTY, |
147 | |||
148 | DCCP_MAX_RESET_CODES /* Leave at the end! */ | ||
147 | }; | 149 | }; |
148 | 150 | ||
149 | /* DCCP options */ | 151 | /* DCCP options */ |
@@ -270,10 +272,9 @@ static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) | |||
270 | return memset(skb_transport_header(skb), 0, headlen); | 272 | return memset(skb_transport_header(skb), 0, headlen); |
271 | } | 273 | } |
272 | 274 | ||
273 | static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb) | 275 | static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh) |
274 | { | 276 | { |
275 | return (struct dccp_hdr_ext *)(skb_transport_header(skb) + | 277 | return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh)); |
276 | sizeof(struct dccp_hdr)); | ||
277 | } | 278 | } |
278 | 279 | ||
279 | static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) | 280 | static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) |
@@ -287,13 +288,12 @@ static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb) | |||
287 | return __dccp_basic_hdr_len(dh); | 288 | return __dccp_basic_hdr_len(dh); |
288 | } | 289 | } |
289 | 290 | ||
290 | static inline __u64 dccp_hdr_seq(const struct sk_buff *skb) | 291 | static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh) |
291 | { | 292 | { |
292 | const struct dccp_hdr *dh = dccp_hdr(skb); | ||
293 | __u64 seq_nr = ntohs(dh->dccph_seq); | 293 | __u64 seq_nr = ntohs(dh->dccph_seq); |
294 | 294 | ||
295 | if (dh->dccph_x != 0) | 295 | if (dh->dccph_x != 0) |
296 | seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(skb)->dccph_seq_low); | 296 | seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low); |
297 | else | 297 | else |
298 | seq_nr += (u32)dh->dccph_seq2 << 16; | 298 | seq_nr += (u32)dh->dccph_seq2 << 16; |
299 | 299 | ||
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index d2a96cbf4f0e..cf79853967ff 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h | |||
@@ -32,18 +32,13 @@ | |||
32 | * On x86-64 make the 64bit structure have the same alignment as the | 32 | * On x86-64 make the 64bit structure have the same alignment as the |
33 | * 32bit structure. This makes 32bit emulation easier. | 33 | * 32bit structure. This makes 32bit emulation easier. |
34 | * | 34 | * |
35 | * UML/x86_64 needs the same packing as x86_64 - UML + UML_X86 + | 35 | * UML/x86_64 needs the same packing as x86_64 |
36 | * 64_BIT adds up to UML/x86_64. | ||
37 | */ | 36 | */ |
38 | #ifdef __x86_64__ | 37 | #ifdef __x86_64__ |
39 | #define EPOLL_PACKED __attribute__((packed)) | 38 | #define EPOLL_PACKED __attribute__((packed)) |
40 | #else | 39 | #else |
41 | #if defined(CONFIG_UML) && defined(CONFIG_UML_X86) && defined(CONFIG_64BIT) | ||
42 | #define EPOLL_PACKED __attribute__((packed)) | ||
43 | #else | ||
44 | #define EPOLL_PACKED | 40 | #define EPOLL_PACKED |
45 | #endif | 41 | #endif |
46 | #endif | ||
47 | 42 | ||
48 | struct epoll_event { | 43 | struct epoll_event { |
49 | __u32 events; | 44 | __u32 events; |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 8beb29134626..175e63f4a8c0 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -12,8 +12,8 @@ | |||
12 | #define LG_CLOCK_MAX_DELTA ULONG_MAX | 12 | #define LG_CLOCK_MAX_DELTA ULONG_MAX |
13 | 13 | ||
14 | /*G:032 The second method of communicating with the Host is to via "struct | 14 | /*G:032 The second method of communicating with the Host is to via "struct |
15 | * lguest_data". The Guest's very first hypercall is to tell the Host where | 15 | * lguest_data". Once the Guest's initialization hypercall tells the Host where |
16 | * this is, and then the Guest and Host both publish information in it. :*/ | 16 | * this is, the Guest and Host both publish information in it. :*/ |
17 | struct lguest_data | 17 | struct lguest_data |
18 | { | 18 | { |
19 | /* 512 == enabled (same as eflags in normal hardware). The Guest | 19 | /* 512 == enabled (same as eflags in normal hardware). The Guest |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index 61e1e3e6b1cc..697104da91f1 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -1,17 +1,7 @@ | |||
1 | #ifndef _ASM_LGUEST_USER | 1 | #ifndef _LINUX_LGUEST_LAUNCHER |
2 | #define _ASM_LGUEST_USER | 2 | #define _LINUX_LGUEST_LAUNCHER |
3 | /* Everything the "lguest" userspace program needs to know. */ | 3 | /* Everything the "lguest" userspace program needs to know. */ |
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | /* They can register up to 32 arrays of lguest_dma. */ | ||
6 | #define LGUEST_MAX_DMA 32 | ||
7 | /* At most we can dma 16 lguest_dma in one op. */ | ||
8 | #define LGUEST_MAX_DMA_SECTIONS 16 | ||
9 | |||
10 | /* How many devices? Assume each one wants up to two dma arrays per device. */ | ||
11 | #define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2) | ||
12 | |||
13 | /* Where the Host expects the Guest to SEND_DMA console output to. */ | ||
14 | #define LGUEST_CONSOLE_DMA_KEY 0 | ||
15 | 5 | ||
16 | /*D:010 | 6 | /*D:010 |
17 | * Drivers | 7 | * Drivers |
@@ -20,7 +10,11 @@ | |||
20 | * real devices (think of the damage it could do!) we provide virtual devices. | 10 | * real devices (think of the damage it could do!) we provide virtual devices. |
21 | * We could emulate a PCI bus with various devices on it, but that is a fairly | 11 | * We could emulate a PCI bus with various devices on it, but that is a fairly |
22 | * complex burden for the Host and suboptimal for the Guest, so we have our own | 12 | * complex burden for the Host and suboptimal for the Guest, so we have our own |
23 | * "lguest" bus and simple drivers. | 13 | * simple lguest bus and we use "virtio" drivers. These drivers need a set of |
14 | * routines from us which will actually do the virtual I/O, but they handle all | ||
15 | * the net/block/console stuff themselves. This means that if we want to add | ||
16 | * a new device, we simply need to write a new virtio driver and create support | ||
17 | * for it in the Launcher: this code won't need to change. | ||
24 | * | 18 | * |
25 | * Devices are described by a simplified ID, a status byte, and some "config" | 19 | * Devices are described by a simplified ID, a status byte, and some "config" |
26 | * bytes which describe this device's configuration. This is placed by the | 20 | * bytes which describe this device's configuration. This is placed by the |
@@ -51,9 +45,9 @@ struct lguest_vqconfig { | |||
51 | /* Write command first word is a request. */ | 45 | /* Write command first word is a request. */ |
52 | enum lguest_req | 46 | enum lguest_req |
53 | { | 47 | { |
54 | LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */ | 48 | LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ |
55 | LHREQ_GETDMA, /* No longer used */ | 49 | LHREQ_GETDMA, /* No longer used */ |
56 | LHREQ_IRQ, /* + irq */ | 50 | LHREQ_IRQ, /* + irq */ |
57 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | 51 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ |
58 | }; | 52 | }; |
59 | #endif /* _ASM_LGUEST_USER */ | 53 | #endif /* _LINUX_LGUEST_LAUNCHER */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 6fd24e03622e..147ccc40c8af 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -133,11 +133,14 @@ enum { | |||
133 | ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ | 133 | ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ |
134 | ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ | 134 | ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ |
135 | ATA_DFLAG_AN = (1 << 7), /* AN configured */ | 135 | ATA_DFLAG_AN = (1 << 7), /* AN configured */ |
136 | ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */ | ||
137 | ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */ | ||
136 | ATA_DFLAG_CFG_MASK = (1 << 12) - 1, | 138 | ATA_DFLAG_CFG_MASK = (1 << 12) - 1, |
137 | 139 | ||
138 | ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ | 140 | ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ |
139 | ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ | 141 | ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ |
140 | ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ | 142 | ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ |
143 | ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ | ||
141 | ATA_DFLAG_INIT_MASK = (1 << 16) - 1, | 144 | ATA_DFLAG_INIT_MASK = (1 << 16) - 1, |
142 | 145 | ||
143 | ATA_DFLAG_DETACH = (1 << 16), | 146 | ATA_DFLAG_DETACH = (1 << 16), |
@@ -185,6 +188,7 @@ enum { | |||
185 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ | 188 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ |
186 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ | 189 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ |
187 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ | 190 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ |
191 | ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ | ||
188 | 192 | ||
189 | /* The following flag belongs to ap->pflags but is kept in | 193 | /* The following flag belongs to ap->pflags but is kept in |
190 | * ap->flags because it's referenced in many LLDs and will be | 194 | * ap->flags because it's referenced in many LLDs and will be |
@@ -234,6 +238,13 @@ enum { | |||
234 | ATA_TMOUT_INTERNAL = 30 * HZ, | 238 | ATA_TMOUT_INTERNAL = 30 * HZ, |
235 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, | 239 | ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, |
236 | 240 | ||
241 | /* FIXME: GoVault needs 2s but we can't afford that without | ||
242 | * parallel probing. 800ms is enough for iVDR disk | ||
243 | * HHD424020F7SV00. Increase to 2secs when parallel probing | ||
244 | * is in place. | ||
245 | */ | ||
246 | ATA_TMOUT_FF_WAIT = 4 * HZ / 5, | ||
247 | |||
237 | /* ATA bus states */ | 248 | /* ATA bus states */ |
238 | BUS_UNKNOWN = 0, | 249 | BUS_UNKNOWN = 0, |
239 | BUS_DMA = 1, | 250 | BUS_DMA = 1, |
@@ -294,6 +305,7 @@ enum { | |||
294 | ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ | 305 | ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ |
295 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 306 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
296 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 307 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
308 | ATA_EHI_LPM = (1 << 4), /* link power management action */ | ||
297 | 309 | ||
298 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ | 310 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ |
299 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 311 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
@@ -325,6 +337,7 @@ enum { | |||
325 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ | 337 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ |
326 | ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ | 338 | ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ |
327 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ | 339 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ |
340 | ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ | ||
328 | 341 | ||
329 | /* DMA mask for user DMA control: User visible values; DO NOT | 342 | /* DMA mask for user DMA control: User visible values; DO NOT |
330 | renumber */ | 343 | renumber */ |
@@ -370,6 +383,18 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, | |||
370 | unsigned long deadline); | 383 | unsigned long deadline); |
371 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); | 384 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); |
372 | 385 | ||
386 | /* | ||
387 | * host pm policy: If you alter this, you also need to alter libata-scsi.c | ||
388 | * (for the ascii descriptions) | ||
389 | */ | ||
390 | enum link_pm { | ||
391 | NOT_AVAILABLE, | ||
392 | MIN_POWER, | ||
393 | MAX_PERFORMANCE, | ||
394 | MEDIUM_POWER, | ||
395 | }; | ||
396 | extern struct class_device_attribute class_device_attr_link_power_management_policy; | ||
397 | |||
373 | struct ata_ioports { | 398 | struct ata_ioports { |
374 | void __iomem *cmd_addr; | 399 | void __iomem *cmd_addr; |
375 | void __iomem *data_addr; | 400 | void __iomem *data_addr; |
@@ -616,6 +641,7 @@ struct ata_port { | |||
616 | 641 | ||
617 | pm_message_t pm_mesg; | 642 | pm_message_t pm_mesg; |
618 | int *pm_result; | 643 | int *pm_result; |
644 | enum link_pm pm_policy; | ||
619 | 645 | ||
620 | struct timer_list fastdrain_timer; | 646 | struct timer_list fastdrain_timer; |
621 | unsigned long fastdrain_cnt; | 647 | unsigned long fastdrain_cnt; |
@@ -683,7 +709,8 @@ struct ata_port_operations { | |||
683 | 709 | ||
684 | int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); | 710 | int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); |
685 | int (*port_resume) (struct ata_port *ap); | 711 | int (*port_resume) (struct ata_port *ap); |
686 | 712 | int (*enable_pm) (struct ata_port *ap, enum link_pm policy); | |
713 | void (*disable_pm) (struct ata_port *ap); | ||
687 | int (*port_start) (struct ata_port *ap); | 714 | int (*port_start) (struct ata_port *ap); |
688 | void (*port_stop) (struct ata_port *ap); | 715 | void (*port_stop) (struct ata_port *ap); |
689 | 716 | ||
@@ -799,6 +826,7 @@ extern void ata_host_resume(struct ata_host *host); | |||
799 | extern int ata_ratelimit(void); | 826 | extern int ata_ratelimit(void); |
800 | extern int ata_busy_sleep(struct ata_port *ap, | 827 | extern int ata_busy_sleep(struct ata_port *ap, |
801 | unsigned long timeout_pat, unsigned long timeout); | 828 | unsigned long timeout_pat, unsigned long timeout); |
829 | extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline); | ||
802 | extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); | 830 | extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline); |
803 | extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, | 831 | extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, |
804 | void *data, unsigned long delay); | 832 | void *data, unsigned long delay); |
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 3f272396642b..8df230a279a0 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h | |||
@@ -8,6 +8,9 @@ | |||
8 | #define MV643XX_ETH_NAME "mv643xx_eth" | 8 | #define MV643XX_ETH_NAME "mv643xx_eth" |
9 | #define MV643XX_ETH_SHARED_REGS 0x2000 | 9 | #define MV643XX_ETH_SHARED_REGS 0x2000 |
10 | #define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 | 10 | #define MV643XX_ETH_SHARED_REGS_SIZE 0x2000 |
11 | #define MV643XX_ETH_BAR_4 0x220 | ||
12 | #define MV643XX_ETH_SIZE_REG_4 0x224 | ||
13 | #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x0290 | ||
11 | 14 | ||
12 | struct mv643xx_eth_platform_data { | 15 | struct mv643xx_eth_platform_data { |
13 | int port_number; | 16 | int port_number; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c4de536cefa3..9b0c8f12373e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -390,7 +390,7 @@ static inline void napi_complete(struct napi_struct *n) | |||
390 | static inline void napi_disable(struct napi_struct *n) | 390 | static inline void napi_disable(struct napi_struct *n) |
391 | { | 391 | { |
392 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 392 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
393 | msleep_interruptible(1); | 393 | msleep(1); |
394 | } | 394 | } |
395 | 395 | ||
396 | /** | 396 | /** |
@@ -669,6 +669,8 @@ struct net_device | |||
669 | #define HAVE_SET_MAC_ADDR | 669 | #define HAVE_SET_MAC_ADDR |
670 | int (*set_mac_address)(struct net_device *dev, | 670 | int (*set_mac_address)(struct net_device *dev, |
671 | void *addr); | 671 | void *addr); |
672 | #define HAVE_VALIDATE_ADDR | ||
673 | int (*validate_addr)(struct net_device *dev); | ||
672 | #define HAVE_PRIVATE_IOCTL | 674 | #define HAVE_PRIVATE_IOCTL |
673 | int (*do_ioctl)(struct net_device *dev, | 675 | int (*do_ioctl)(struct net_device *dev, |
674 | struct ifreq *ifr, int cmd); | 676 | struct ifreq *ifr, int cmd); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4e10a074ca56..e44aac8cf5ff 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1236,6 +1236,10 @@ | |||
1236 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 | 1236 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 |
1237 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C | 1237 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C |
1238 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 | 1238 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 |
1239 | #define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760 | ||
1240 | #define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761 | ||
1241 | #define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762 | ||
1242 | #define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763 | ||
1239 | 1243 | ||
1240 | #define PCI_VENDOR_ID_IMS 0x10e0 | 1244 | #define PCI_VENDOR_ID_IMS 0x10e0 |
1241 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 | 1245 | #define PCI_DEVICE_ID_IMS_TT128 0x9128 |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index df7ddcee7c4b..32326c293d7b 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _LINUX_SCATTERLIST_H | 1 | #ifndef _LINUX_SCATTERLIST_H |
2 | #define _LINUX_SCATTERLIST_H | 2 | #define _LINUX_SCATTERLIST_H |
3 | 3 | ||
4 | #include <asm/types.h> | ||
4 | #include <asm/scatterlist.h> | 5 | #include <asm/scatterlist.h> |
5 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
6 | #include <linux/string.h> | 7 | #include <linux/string.h> |
@@ -26,18 +27,16 @@ | |||
26 | #define SG_MAGIC 0x87654321 | 27 | #define SG_MAGIC 0x87654321 |
27 | 28 | ||
28 | /** | 29 | /** |
29 | * sg_set_page - Set sg entry to point at given page | 30 | * sg_assign_page - Assign a given page to an SG entry |
30 | * @sg: SG entry | 31 | * @sg: SG entry |
31 | * @page: The page | 32 | * @page: The page |
32 | * | 33 | * |
33 | * Description: | 34 | * Description: |
34 | * Use this function to set an sg entry pointing at a page, never assign | 35 | * Assign page to sg entry. Also see sg_set_page(), the most commonly used |
35 | * the page directly. We encode sg table information in the lower bits | 36 | * variant. |
36 | * of the page pointer. See sg_page() for looking up the page belonging | ||
37 | * to an sg entry. | ||
38 | * | 37 | * |
39 | **/ | 38 | **/ |
40 | static inline void sg_set_page(struct scatterlist *sg, struct page *page) | 39 | static inline void sg_assign_page(struct scatterlist *sg, struct page *page) |
41 | { | 40 | { |
42 | unsigned long page_link = sg->page_link & 0x3; | 41 | unsigned long page_link = sg->page_link & 0x3; |
43 | 42 | ||
@@ -52,6 +51,28 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page) | |||
52 | sg->page_link = page_link | (unsigned long) page; | 51 | sg->page_link = page_link | (unsigned long) page; |
53 | } | 52 | } |
54 | 53 | ||
54 | /** | ||
55 | * sg_set_page - Set sg entry to point at given page | ||
56 | * @sg: SG entry | ||
57 | * @page: The page | ||
58 | * @len: Length of data | ||
59 | * @offset: Offset into page | ||
60 | * | ||
61 | * Description: | ||
62 | * Use this function to set an sg entry pointing at a page, never assign | ||
63 | * the page directly. We encode sg table information in the lower bits | ||
64 | * of the page pointer. See sg_page() for looking up the page belonging | ||
65 | * to an sg entry. | ||
66 | * | ||
67 | **/ | ||
68 | static inline void sg_set_page(struct scatterlist *sg, struct page *page, | ||
69 | unsigned int len, unsigned int offset) | ||
70 | { | ||
71 | sg_assign_page(sg, page); | ||
72 | sg->offset = offset; | ||
73 | sg->length = len; | ||
74 | } | ||
75 | |||
55 | #define sg_page(sg) ((struct page *) ((sg)->page_link & ~0x3)) | 76 | #define sg_page(sg) ((struct page *) ((sg)->page_link & ~0x3)) |
56 | 77 | ||
57 | /** | 78 | /** |
@@ -64,9 +85,7 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page) | |||
64 | static inline void sg_set_buf(struct scatterlist *sg, const void *buf, | 85 | static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
65 | unsigned int buflen) | 86 | unsigned int buflen) |
66 | { | 87 | { |
67 | sg_set_page(sg, virt_to_page(buf)); | 88 | sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
68 | sg->offset = offset_in_page(buf); | ||
69 | sg->length = buflen; | ||
70 | } | 89 | } |
71 | 90 | ||
72 | /* | 91 | /* |
@@ -131,7 +150,7 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl, | |||
131 | struct scatterlist *ret = &sgl[nents - 1]; | 150 | struct scatterlist *ret = &sgl[nents - 1]; |
132 | #else | 151 | #else |
133 | struct scatterlist *sg, *ret = NULL; | 152 | struct scatterlist *sg, *ret = NULL; |
134 | int i; | 153 | unsigned int i; |
135 | 154 | ||
136 | for_each_sg(sgl, sg, nents, i) | 155 | for_each_sg(sgl, sg, nents, i) |
137 | ret = sg; | 156 | ret = sg; |
@@ -160,7 +179,11 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, | |||
160 | #ifndef ARCH_HAS_SG_CHAIN | 179 | #ifndef ARCH_HAS_SG_CHAIN |
161 | BUG(); | 180 | BUG(); |
162 | #endif | 181 | #endif |
163 | prv[prv_nents - 1].page_link = (unsigned long) sgl | 0x01; | 182 | /* |
183 | * Set lowest bit to indicate a link pointer, and make sure to clear | ||
184 | * the termination bit if it happens to be set. | ||
185 | */ | ||
186 | prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; | ||
164 | } | 187 | } |
165 | 188 | ||
166 | /** | 189 | /** |
@@ -220,7 +243,7 @@ static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |||
220 | sg_mark_end(sgl, nents); | 243 | sg_mark_end(sgl, nents); |
221 | #ifdef CONFIG_DEBUG_SG | 244 | #ifdef CONFIG_DEBUG_SG |
222 | { | 245 | { |
223 | int i; | 246 | unsigned int i; |
224 | for (i = 0; i < nents; i++) | 247 | for (i = 0; i < nents; i++) |
225 | sgl[i].sg_magic = SG_MAGIC; | 248 | sgl[i].sg_magic = SG_MAGIC; |
226 | } | 249 | } |
@@ -237,7 +260,7 @@ static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |||
237 | * on the sg page. | 260 | * on the sg page. |
238 | * | 261 | * |
239 | **/ | 262 | **/ |
240 | static inline unsigned long sg_phys(struct scatterlist *sg) | 263 | static inline dma_addr_t sg_phys(struct scatterlist *sg) |
241 | { | 264 | { |
242 | return page_to_phys(sg_page(sg)) + sg->offset; | 265 | return page_to_phys(sg_page(sg)) + sg->offset; |
243 | } | 266 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 13df99fb2769..3c07d595979f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -828,12 +828,17 @@ struct sched_class { | |||
828 | struct task_struct * (*pick_next_task) (struct rq *rq); | 828 | struct task_struct * (*pick_next_task) (struct rq *rq); |
829 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 829 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
830 | 830 | ||
831 | #ifdef CONFIG_SMP | ||
831 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, | 832 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, |
832 | struct rq *busiest, | 833 | struct rq *busiest, unsigned long max_load_move, |
833 | unsigned long max_nr_move, unsigned long max_load_move, | ||
834 | struct sched_domain *sd, enum cpu_idle_type idle, | 834 | struct sched_domain *sd, enum cpu_idle_type idle, |
835 | int *all_pinned, int *this_best_prio); | 835 | int *all_pinned, int *this_best_prio); |
836 | 836 | ||
837 | int (*move_one_task) (struct rq *this_rq, int this_cpu, | ||
838 | struct rq *busiest, struct sched_domain *sd, | ||
839 | enum cpu_idle_type idle); | ||
840 | #endif | ||
841 | |||
837 | void (*set_curr_task) (struct rq *rq); | 842 | void (*set_curr_task) (struct rq *rq); |
838 | void (*task_tick) (struct rq *rq, struct task_struct *p); | 843 | void (*task_tick) (struct rq *rq, struct task_struct *p); |
839 | void (*task_new) (struct rq *rq, struct task_struct *p); | 844 | void (*task_new) (struct rq *rq, struct task_struct *p); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fd4e12f24270..94e49915a8c0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -994,7 +994,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) | |||
994 | * | 994 | * |
995 | * Return the number of bytes of free space at the head of an &sk_buff. | 995 | * Return the number of bytes of free space at the head of an &sk_buff. |
996 | */ | 996 | */ |
997 | static inline int skb_headroom(const struct sk_buff *skb) | 997 | static inline unsigned int skb_headroom(const struct sk_buff *skb) |
998 | { | 998 | { |
999 | return skb->data - skb->head; | 999 | return skb->data - skb->head; |
1000 | } | 1000 | } |
@@ -1347,7 +1347,7 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, | |||
1347 | * Returns true if modifying the header part of the cloned buffer | 1347 | * Returns true if modifying the header part of the cloned buffer |
1348 | * does not requires the data to be copied. | 1348 | * does not requires the data to be copied. |
1349 | */ | 1349 | */ |
1350 | static inline int skb_clone_writable(struct sk_buff *skb, int len) | 1350 | static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) |
1351 | { | 1351 | { |
1352 | return !skb_header_cloned(skb) && | 1352 | return !skb_header_cloned(skb) && |
1353 | skb_headroom(skb) + len <= skb->hdr_len; | 1353 | skb_headroom(skb) + len <= skb->hdr_len; |
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 0013a0d8dc6b..87b895d5c786 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h | |||
@@ -41,17 +41,17 @@ | |||
41 | #define _LINUX_SUNRPC_RPC_RDMA_H | 41 | #define _LINUX_SUNRPC_RPC_RDMA_H |
42 | 42 | ||
43 | struct rpcrdma_segment { | 43 | struct rpcrdma_segment { |
44 | uint32_t rs_handle; /* Registered memory handle */ | 44 | __be32 rs_handle; /* Registered memory handle */ |
45 | uint32_t rs_length; /* Length of the chunk in bytes */ | 45 | __be32 rs_length; /* Length of the chunk in bytes */ |
46 | uint64_t rs_offset; /* Chunk virtual address or offset */ | 46 | __be64 rs_offset; /* Chunk virtual address or offset */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * read chunk(s), encoded as a linked list. | 50 | * read chunk(s), encoded as a linked list. |
51 | */ | 51 | */ |
52 | struct rpcrdma_read_chunk { | 52 | struct rpcrdma_read_chunk { |
53 | uint32_t rc_discrim; /* 1 indicates presence */ | 53 | __be32 rc_discrim; /* 1 indicates presence */ |
54 | uint32_t rc_position; /* Position in XDR stream */ | 54 | __be32 rc_position; /* Position in XDR stream */ |
55 | struct rpcrdma_segment rc_target; | 55 | struct rpcrdma_segment rc_target; |
56 | }; | 56 | }; |
57 | 57 | ||
@@ -66,29 +66,29 @@ struct rpcrdma_write_chunk { | |||
66 | * write chunk(s), encoded as a counted array. | 66 | * write chunk(s), encoded as a counted array. |
67 | */ | 67 | */ |
68 | struct rpcrdma_write_array { | 68 | struct rpcrdma_write_array { |
69 | uint32_t wc_discrim; /* 1 indicates presence */ | 69 | __be32 wc_discrim; /* 1 indicates presence */ |
70 | uint32_t wc_nchunks; /* Array count */ | 70 | __be32 wc_nchunks; /* Array count */ |
71 | struct rpcrdma_write_chunk wc_array[0]; | 71 | struct rpcrdma_write_chunk wc_array[0]; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct rpcrdma_msg { | 74 | struct rpcrdma_msg { |
75 | uint32_t rm_xid; /* Mirrors the RPC header xid */ | 75 | __be32 rm_xid; /* Mirrors the RPC header xid */ |
76 | uint32_t rm_vers; /* Version of this protocol */ | 76 | __be32 rm_vers; /* Version of this protocol */ |
77 | uint32_t rm_credit; /* Buffers requested/granted */ | 77 | __be32 rm_credit; /* Buffers requested/granted */ |
78 | uint32_t rm_type; /* Type of message (enum rpcrdma_proc) */ | 78 | __be32 rm_type; /* Type of message (enum rpcrdma_proc) */ |
79 | union { | 79 | union { |
80 | 80 | ||
81 | struct { /* no chunks */ | 81 | struct { /* no chunks */ |
82 | uint32_t rm_empty[3]; /* 3 empty chunk lists */ | 82 | __be32 rm_empty[3]; /* 3 empty chunk lists */ |
83 | } rm_nochunks; | 83 | } rm_nochunks; |
84 | 84 | ||
85 | struct { /* no chunks and padded */ | 85 | struct { /* no chunks and padded */ |
86 | uint32_t rm_align; /* Padding alignment */ | 86 | __be32 rm_align; /* Padding alignment */ |
87 | uint32_t rm_thresh; /* Padding threshold */ | 87 | __be32 rm_thresh; /* Padding threshold */ |
88 | uint32_t rm_pempty[3]; /* 3 empty chunk lists */ | 88 | __be32 rm_pempty[3]; /* 3 empty chunk lists */ |
89 | } rm_padded; | 89 | } rm_padded; |
90 | 90 | ||
91 | uint32_t rm_chunks[0]; /* read, write and reply chunks */ | 91 | __be32 rm_chunks[0]; /* read, write and reply chunks */ |
92 | 92 | ||
93 | } rm_body; | 93 | } rm_body; |
94 | }; | 94 | }; |
diff --git a/include/linux/types.h b/include/linux/types.h index 4f0dad21c917..f4f8d19158e4 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -37,6 +37,8 @@ typedef __kernel_gid32_t gid_t; | |||
37 | typedef __kernel_uid16_t uid16_t; | 37 | typedef __kernel_uid16_t uid16_t; |
38 | typedef __kernel_gid16_t gid16_t; | 38 | typedef __kernel_gid16_t gid16_t; |
39 | 39 | ||
40 | typedef unsigned long uintptr_t; | ||
41 | |||
40 | #ifdef CONFIG_UID16 | 42 | #ifdef CONFIG_UID16 |
41 | /* This is defined by include/asm-{arch}/posix_types.h */ | 43 | /* This is defined by include/asm-{arch}/posix_types.h */ |
42 | typedef __kernel_old_uid_t old_uid_t; | 44 | typedef __kernel_old_uid_t old_uid_t; |
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 8228b57eb18f..4427dcd1e53a 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <net/inet_connection_sock.h> | 27 | #include <net/inet_connection_sock.h> |
28 | #include <net/inet_sock.h> | 28 | #include <net/inet_sock.h> |
29 | #include <net/route.h> | ||
30 | #include <net/sock.h> | 29 | #include <net/sock.h> |
31 | #include <net/tcp_states.h> | 30 | #include <net/tcp_states.h> |
32 | 31 | ||
@@ -266,11 +265,6 @@ out: | |||
266 | wake_up(&hashinfo->lhash_wait); | 265 | wake_up(&hashinfo->lhash_wait); |
267 | } | 266 | } |
268 | 267 | ||
269 | static inline int inet_iif(const struct sk_buff *skb) | ||
270 | { | ||
271 | return ((struct rtable *)skb->dst)->rt_iif; | ||
272 | } | ||
273 | |||
274 | extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo, | 268 | extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo, |
275 | const __be32 daddr, | 269 | const __be32 daddr, |
276 | const unsigned short hnum, | 270 | const unsigned short hnum, |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 62daf214931f..70013c5f4e59 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <net/flow.h> | 24 | #include <net/flow.h> |
25 | #include <net/sock.h> | 25 | #include <net/sock.h> |
26 | #include <net/request_sock.h> | 26 | #include <net/request_sock.h> |
27 | #include <net/route.h> | ||
27 | 28 | ||
28 | /** struct ip_options - IP Options | 29 | /** struct ip_options - IP Options |
29 | * | 30 | * |
@@ -190,4 +191,10 @@ static inline int inet_sk_ehashfn(const struct sock *sk) | |||
190 | return inet_ehashfn(laddr, lport, faddr, fport); | 191 | return inet_ehashfn(laddr, lport, faddr, fport); |
191 | } | 192 | } |
192 | 193 | ||
194 | |||
195 | static inline int inet_iif(const struct sk_buff *skb) | ||
196 | { | ||
197 | return ((struct rtable *)skb->dst)->rt_iif; | ||
198 | } | ||
199 | |||
193 | #endif /* _INET_SOCK_H */ | 200 | #endif /* _INET_SOCK_H */ |
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h index 8dabdd603fe1..eea2e6152389 100644 --- a/include/net/irda/ircomm_tty.h +++ b/include/net/irda/ircomm_tty.h | |||
@@ -127,7 +127,6 @@ extern int ircomm_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
127 | unsigned int cmd, unsigned long arg); | 127 | unsigned int cmd, unsigned long arg); |
128 | extern void ircomm_tty_set_termios(struct tty_struct *tty, | 128 | extern void ircomm_tty_set_termios(struct tty_struct *tty, |
129 | struct ktermios *old_termios); | 129 | struct ktermios *old_termios); |
130 | extern hashbin_t *ircomm_tty; | ||
131 | 130 | ||
132 | #endif | 131 | #endif |
133 | 132 | ||
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 93aa87d32804..5279466606d2 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -102,11 +102,9 @@ static inline void release_net(struct net *net) | |||
102 | #ifdef CONFIG_NET_NS | 102 | #ifdef CONFIG_NET_NS |
103 | #define __net_init | 103 | #define __net_init |
104 | #define __net_exit | 104 | #define __net_exit |
105 | #define __net_initdata | ||
106 | #else | 105 | #else |
107 | #define __net_init __init | 106 | #define __net_init __init |
108 | #define __net_exit __exit_refok | 107 | #define __net_exit __exit_refok |
109 | #define __net_initdata __initdata | ||
110 | #endif | 108 | #endif |
111 | 109 | ||
112 | struct pernet_operations { | 110 | struct pernet_operations { |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a02ec9e5fea5..c9265518a378 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -316,4 +316,19 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) | |||
316 | return rtab->data[slot]; | 316 | return rtab->data[slot]; |
317 | } | 317 | } |
318 | 318 | ||
319 | #ifdef CONFIG_NET_CLS_ACT | ||
320 | static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask) | ||
321 | { | ||
322 | struct sk_buff *n = skb_clone(skb, gfp_mask); | ||
323 | |||
324 | if (n) { | ||
325 | n->tc_verd = SET_TC_VERD(n->tc_verd, 0); | ||
326 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); | ||
327 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | ||
328 | n->iif = skb->iif; | ||
329 | } | ||
330 | return n; | ||
331 | } | ||
332 | #endif | ||
333 | |||
319 | #endif | 334 | #endif |
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index 4945954a16af..5db261a1e85e 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h | |||
@@ -88,7 +88,6 @@ static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key) | |||
88 | 88 | ||
89 | void sctp_auth_key_put(struct sctp_auth_bytes *key); | 89 | void sctp_auth_key_put(struct sctp_auth_bytes *key); |
90 | struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp); | 90 | struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp); |
91 | void sctp_auth_shkey_free(struct sctp_shared_key *sh_key); | ||
92 | void sctp_auth_destroy_keys(struct list_head *keys); | 91 | void sctp_auth_destroy_keys(struct list_head *keys); |
93 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp); | 92 | int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp); |
94 | struct sctp_shared_key *sctp_auth_get_shkey( | 93 | struct sctp_shared_key *sctp_auth_get_shkey( |
@@ -104,7 +103,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc); | |||
104 | void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, | 103 | void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, |
105 | struct sctp_hmac_algo_param *hmacs); | 104 | struct sctp_hmac_algo_param *hmacs); |
106 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, | 105 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, |
107 | __u16 hmac_id); | 106 | __be16 hmac_id); |
108 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc); | 107 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc); |
109 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc); | 108 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc); |
110 | void sctp_auth_calculate_hmac(const struct sctp_association *asoc, | 109 | void sctp_auth_calculate_hmac(const struct sctp_association *asoc, |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 119f5a1ed499..93eb708609e7 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -156,7 +156,6 @@ int sctp_primitive_ASCONF(struct sctp_association *, void *arg); | |||
156 | __u32 sctp_start_cksum(__u8 *ptr, __u16 count); | 156 | __u32 sctp_start_cksum(__u8 *ptr, __u16 count); |
157 | __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum); | 157 | __u32 sctp_update_cksum(__u8 *ptr, __u16 count, __u32 cksum); |
158 | __u32 sctp_end_cksum(__u32 cksum); | 158 | __u32 sctp_end_cksum(__u32 cksum); |
159 | __u32 sctp_update_copy_cksum(__u8 *, __u8 *, __u16 count, __u32 cksum); | ||
160 | 159 | ||
161 | /* | 160 | /* |
162 | * sctp/input.c | 161 | * sctp/input.c |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 92049e681258..d695cea7730d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -803,7 +803,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |||
803 | return left <= tcp_max_burst(tp); | 803 | return left <= tcp_max_burst(tp); |
804 | } | 804 | } |
805 | 805 | ||
806 | static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, | 806 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, |
807 | const struct sk_buff *skb) | 807 | const struct sk_buff *skb) |
808 | { | 808 | { |
809 | if (skb->len < mss) | 809 | if (skb->len < mss) |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 688f6f5d3285..58dfa82889aa 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -37,6 +37,8 @@ | |||
37 | extern struct sock *xfrm_nl; | 37 | extern struct sock *xfrm_nl; |
38 | extern u32 sysctl_xfrm_aevent_etime; | 38 | extern u32 sysctl_xfrm_aevent_etime; |
39 | extern u32 sysctl_xfrm_aevent_rseqth; | 39 | extern u32 sysctl_xfrm_aevent_rseqth; |
40 | extern int sysctl_xfrm_larval_drop; | ||
41 | extern u32 sysctl_xfrm_acq_expires; | ||
40 | 42 | ||
41 | extern struct mutex xfrm_cfg_mutex; | 43 | extern struct mutex xfrm_cfg_mutex; |
42 | 44 | ||
diff --git a/init/Kconfig b/init/Kconfig index b7dffa837926..8b88d0bedcbd 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -322,7 +322,6 @@ config CPUSETS | |||
322 | config FAIR_GROUP_SCHED | 322 | config FAIR_GROUP_SCHED |
323 | bool "Fair group CPU scheduler" | 323 | bool "Fair group CPU scheduler" |
324 | default y | 324 | default y |
325 | depends on EXPERIMENTAL | ||
326 | help | 325 | help |
327 | This feature lets CPU scheduler recognize task groups and control CPU | 326 | This feature lets CPU scheduler recognize task groups and control CPU |
328 | bandwidth allocation to such task groups. | 327 | bandwidth allocation to such task groups. |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b6d2ff7e37ee..22a25142e4cf 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -602,7 +602,7 @@ static int hrtimer_switch_to_hres(void) | |||
602 | /* "Retrigger" the interrupt to get things going */ | 602 | /* "Retrigger" the interrupt to get things going */ |
603 | retrigger_next_event(NULL); | 603 | retrigger_next_event(NULL); |
604 | local_irq_restore(flags); | 604 | local_irq_restore(flags); |
605 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", | 605 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", |
606 | smp_processor_id()); | 606 | smp_processor_id()); |
607 | return 1; | 607 | return 1; |
608 | } | 608 | } |
diff --git a/kernel/profile.c b/kernel/profile.c index 631b75c25d7e..5e95330e5120 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -60,6 +60,7 @@ static int __init profile_setup(char * str) | |||
60 | int par; | 60 | int par; |
61 | 61 | ||
62 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { | 62 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { |
63 | #ifdef CONFIG_SCHEDSTATS | ||
63 | prof_on = SLEEP_PROFILING; | 64 | prof_on = SLEEP_PROFILING; |
64 | if (str[strlen(sleepstr)] == ',') | 65 | if (str[strlen(sleepstr)] == ',') |
65 | str += strlen(sleepstr) + 1; | 66 | str += strlen(sleepstr) + 1; |
@@ -68,6 +69,10 @@ static int __init profile_setup(char * str) | |||
68 | printk(KERN_INFO | 69 | printk(KERN_INFO |
69 | "kernel sleep profiling enabled (shift: %ld)\n", | 70 | "kernel sleep profiling enabled (shift: %ld)\n", |
70 | prof_shift); | 71 | prof_shift); |
72 | #else | ||
73 | printk(KERN_WARNING | ||
74 | "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); | ||
75 | #endif /* CONFIG_SCHEDSTATS */ | ||
71 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { | 76 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
72 | prof_on = SCHED_PROFILING; | 77 | prof_on = SCHED_PROFILING; |
73 | if (str[strlen(schedstr)] == ',') | 78 | if (str[strlen(schedstr)] == ',') |
diff --git a/kernel/sched.c b/kernel/sched.c index 2810e562a991..b4fbbc440453 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/pagemap.h> | 66 | #include <linux/pagemap.h> |
67 | 67 | ||
68 | #include <asm/tlb.h> | 68 | #include <asm/tlb.h> |
69 | #include <asm/irq_regs.h> | ||
69 | 70 | ||
70 | /* | 71 | /* |
71 | * Scheduler clock - returns current time in nanosec units. | 72 | * Scheduler clock - returns current time in nanosec units. |
@@ -837,11 +838,18 @@ struct rq_iterator { | |||
837 | struct task_struct *(*next)(void *); | 838 | struct task_struct *(*next)(void *); |
838 | }; | 839 | }; |
839 | 840 | ||
840 | static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 841 | #ifdef CONFIG_SMP |
841 | unsigned long max_nr_move, unsigned long max_load_move, | 842 | static unsigned long |
842 | struct sched_domain *sd, enum cpu_idle_type idle, | 843 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
843 | int *all_pinned, unsigned long *load_moved, | 844 | unsigned long max_load_move, struct sched_domain *sd, |
844 | int *this_best_prio, struct rq_iterator *iterator); | 845 | enum cpu_idle_type idle, int *all_pinned, |
846 | int *this_best_prio, struct rq_iterator *iterator); | ||
847 | |||
848 | static int | ||
849 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
850 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
851 | struct rq_iterator *iterator); | ||
852 | #endif | ||
845 | 853 | ||
846 | #include "sched_stats.h" | 854 | #include "sched_stats.h" |
847 | #include "sched_idletask.c" | 855 | #include "sched_idletask.c" |
@@ -2223,17 +2231,17 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2223 | return 1; | 2231 | return 1; |
2224 | } | 2232 | } |
2225 | 2233 | ||
2226 | static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2234 | static unsigned long |
2227 | unsigned long max_nr_move, unsigned long max_load_move, | 2235 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2228 | struct sched_domain *sd, enum cpu_idle_type idle, | 2236 | unsigned long max_load_move, struct sched_domain *sd, |
2229 | int *all_pinned, unsigned long *load_moved, | 2237 | enum cpu_idle_type idle, int *all_pinned, |
2230 | int *this_best_prio, struct rq_iterator *iterator) | 2238 | int *this_best_prio, struct rq_iterator *iterator) |
2231 | { | 2239 | { |
2232 | int pulled = 0, pinned = 0, skip_for_load; | 2240 | int pulled = 0, pinned = 0, skip_for_load; |
2233 | struct task_struct *p; | 2241 | struct task_struct *p; |
2234 | long rem_load_move = max_load_move; | 2242 | long rem_load_move = max_load_move; |
2235 | 2243 | ||
2236 | if (max_nr_move == 0 || max_load_move == 0) | 2244 | if (max_load_move == 0) |
2237 | goto out; | 2245 | goto out; |
2238 | 2246 | ||
2239 | pinned = 1; | 2247 | pinned = 1; |
@@ -2266,7 +2274,7 @@ next: | |||
2266 | * We only want to steal up to the prescribed number of tasks | 2274 | * We only want to steal up to the prescribed number of tasks |
2267 | * and the prescribed amount of weighted load. | 2275 | * and the prescribed amount of weighted load. |
2268 | */ | 2276 | */ |
2269 | if (pulled < max_nr_move && rem_load_move > 0) { | 2277 | if (rem_load_move > 0) { |
2270 | if (p->prio < *this_best_prio) | 2278 | if (p->prio < *this_best_prio) |
2271 | *this_best_prio = p->prio; | 2279 | *this_best_prio = p->prio; |
2272 | p = iterator->next(iterator->arg); | 2280 | p = iterator->next(iterator->arg); |
@@ -2274,7 +2282,7 @@ next: | |||
2274 | } | 2282 | } |
2275 | out: | 2283 | out: |
2276 | /* | 2284 | /* |
2277 | * Right now, this is the only place pull_task() is called, | 2285 | * Right now, this is one of only two places pull_task() is called, |
2278 | * so we can safely collect pull_task() stats here rather than | 2286 | * so we can safely collect pull_task() stats here rather than |
2279 | * inside pull_task(). | 2287 | * inside pull_task(). |
2280 | */ | 2288 | */ |
@@ -2282,8 +2290,8 @@ out: | |||
2282 | 2290 | ||
2283 | if (all_pinned) | 2291 | if (all_pinned) |
2284 | *all_pinned = pinned; | 2292 | *all_pinned = pinned; |
2285 | *load_moved = max_load_move - rem_load_move; | 2293 | |
2286 | return pulled; | 2294 | return max_load_move - rem_load_move; |
2287 | } | 2295 | } |
2288 | 2296 | ||
2289 | /* | 2297 | /* |
@@ -2305,7 +2313,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2305 | do { | 2313 | do { |
2306 | total_load_moved += | 2314 | total_load_moved += |
2307 | class->load_balance(this_rq, this_cpu, busiest, | 2315 | class->load_balance(this_rq, this_cpu, busiest, |
2308 | ULONG_MAX, max_load_move - total_load_moved, | 2316 | max_load_move - total_load_moved, |
2309 | sd, idle, all_pinned, &this_best_prio); | 2317 | sd, idle, all_pinned, &this_best_prio); |
2310 | class = class->next; | 2318 | class = class->next; |
2311 | } while (class && max_load_move > total_load_moved); | 2319 | } while (class && max_load_move > total_load_moved); |
@@ -2313,6 +2321,32 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2313 | return total_load_moved > 0; | 2321 | return total_load_moved > 0; |
2314 | } | 2322 | } |
2315 | 2323 | ||
2324 | static int | ||
2325 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
2326 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
2327 | struct rq_iterator *iterator) | ||
2328 | { | ||
2329 | struct task_struct *p = iterator->start(iterator->arg); | ||
2330 | int pinned = 0; | ||
2331 | |||
2332 | while (p) { | ||
2333 | if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | ||
2334 | pull_task(busiest, p, this_rq, this_cpu); | ||
2335 | /* | ||
2336 | * Right now, this is only the second place pull_task() | ||
2337 | * is called, so we can safely collect pull_task() | ||
2338 | * stats here rather than inside pull_task(). | ||
2339 | */ | ||
2340 | schedstat_inc(sd, lb_gained[idle]); | ||
2341 | |||
2342 | return 1; | ||
2343 | } | ||
2344 | p = iterator->next(iterator->arg); | ||
2345 | } | ||
2346 | |||
2347 | return 0; | ||
2348 | } | ||
2349 | |||
2316 | /* | 2350 | /* |
2317 | * move_one_task tries to move exactly one task from busiest to this_rq, as | 2351 | * move_one_task tries to move exactly one task from busiest to this_rq, as |
2318 | * part of active balancing operations within "domain". | 2352 | * part of active balancing operations within "domain". |
@@ -2324,12 +2358,9 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2324 | struct sched_domain *sd, enum cpu_idle_type idle) | 2358 | struct sched_domain *sd, enum cpu_idle_type idle) |
2325 | { | 2359 | { |
2326 | const struct sched_class *class; | 2360 | const struct sched_class *class; |
2327 | int this_best_prio = MAX_PRIO; | ||
2328 | 2361 | ||
2329 | for (class = sched_class_highest; class; class = class->next) | 2362 | for (class = sched_class_highest; class; class = class->next) |
2330 | if (class->load_balance(this_rq, this_cpu, busiest, | 2363 | if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) |
2331 | 1, ULONG_MAX, sd, idle, NULL, | ||
2332 | &this_best_prio)) | ||
2333 | return 1; | 2364 | return 1; |
2334 | 2365 | ||
2335 | return 0; | 2366 | return 0; |
@@ -3266,18 +3297,6 @@ static inline void idle_balance(int cpu, struct rq *rq) | |||
3266 | { | 3297 | { |
3267 | } | 3298 | } |
3268 | 3299 | ||
3269 | /* Avoid "used but not defined" warning on UP */ | ||
3270 | static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3271 | unsigned long max_nr_move, unsigned long max_load_move, | ||
3272 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3273 | int *all_pinned, unsigned long *load_moved, | ||
3274 | int *this_best_prio, struct rq_iterator *iterator) | ||
3275 | { | ||
3276 | *load_moved = 0; | ||
3277 | |||
3278 | return 0; | ||
3279 | } | ||
3280 | |||
3281 | #endif | 3300 | #endif |
3282 | 3301 | ||
3283 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 3302 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
@@ -3507,12 +3526,19 @@ EXPORT_SYMBOL(sub_preempt_count); | |||
3507 | */ | 3526 | */ |
3508 | static noinline void __schedule_bug(struct task_struct *prev) | 3527 | static noinline void __schedule_bug(struct task_struct *prev) |
3509 | { | 3528 | { |
3510 | printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", | 3529 | struct pt_regs *regs = get_irq_regs(); |
3511 | prev->comm, preempt_count(), task_pid_nr(prev)); | 3530 | |
3531 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", | ||
3532 | prev->comm, prev->pid, preempt_count()); | ||
3533 | |||
3512 | debug_show_held_locks(prev); | 3534 | debug_show_held_locks(prev); |
3513 | if (irqs_disabled()) | 3535 | if (irqs_disabled()) |
3514 | print_irqtrace_events(prev); | 3536 | print_irqtrace_events(prev); |
3515 | dump_stack(); | 3537 | |
3538 | if (regs) | ||
3539 | show_regs(regs); | ||
3540 | else | ||
3541 | dump_stack(); | ||
3516 | } | 3542 | } |
3517 | 3543 | ||
3518 | /* | 3544 | /* |
@@ -3820,7 +3846,7 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
3820 | } | 3846 | } |
3821 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 3847 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
3822 | 3848 | ||
3823 | void fastcall complete(struct completion *x) | 3849 | void complete(struct completion *x) |
3824 | { | 3850 | { |
3825 | unsigned long flags; | 3851 | unsigned long flags; |
3826 | 3852 | ||
@@ -3832,7 +3858,7 @@ void fastcall complete(struct completion *x) | |||
3832 | } | 3858 | } |
3833 | EXPORT_SYMBOL(complete); | 3859 | EXPORT_SYMBOL(complete); |
3834 | 3860 | ||
3835 | void fastcall complete_all(struct completion *x) | 3861 | void complete_all(struct completion *x) |
3836 | { | 3862 | { |
3837 | unsigned long flags; | 3863 | unsigned long flags; |
3838 | 3864 | ||
@@ -3884,13 +3910,13 @@ wait_for_common(struct completion *x, long timeout, int state) | |||
3884 | return timeout; | 3910 | return timeout; |
3885 | } | 3911 | } |
3886 | 3912 | ||
3887 | void fastcall __sched wait_for_completion(struct completion *x) | 3913 | void __sched wait_for_completion(struct completion *x) |
3888 | { | 3914 | { |
3889 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | 3915 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
3890 | } | 3916 | } |
3891 | EXPORT_SYMBOL(wait_for_completion); | 3917 | EXPORT_SYMBOL(wait_for_completion); |
3892 | 3918 | ||
3893 | unsigned long fastcall __sched | 3919 | unsigned long __sched |
3894 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 3920 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
3895 | { | 3921 | { |
3896 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); | 3922 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); |
@@ -3906,7 +3932,7 @@ int __sched wait_for_completion_interruptible(struct completion *x) | |||
3906 | } | 3932 | } |
3907 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 3933 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
3908 | 3934 | ||
3909 | unsigned long fastcall __sched | 3935 | unsigned long __sched |
3910 | wait_for_completion_interruptible_timeout(struct completion *x, | 3936 | wait_for_completion_interruptible_timeout(struct completion *x, |
3911 | unsigned long timeout) | 3937 | unsigned long timeout) |
3912 | { | 3938 | { |
@@ -5461,11 +5487,12 @@ static void register_sched_domain_sysctl(void) | |||
5461 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | 5487 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); |
5462 | char buf[32]; | 5488 | char buf[32]; |
5463 | 5489 | ||
5490 | WARN_ON(sd_ctl_dir[0].child); | ||
5491 | sd_ctl_dir[0].child = entry; | ||
5492 | |||
5464 | if (entry == NULL) | 5493 | if (entry == NULL) |
5465 | return; | 5494 | return; |
5466 | 5495 | ||
5467 | sd_ctl_dir[0].child = entry; | ||
5468 | |||
5469 | for_each_online_cpu(i) { | 5496 | for_each_online_cpu(i) { |
5470 | snprintf(buf, 32, "cpu%d", i); | 5497 | snprintf(buf, 32, "cpu%d", i); |
5471 | entry->procname = kstrdup(buf, GFP_KERNEL); | 5498 | entry->procname = kstrdup(buf, GFP_KERNEL); |
@@ -5473,14 +5500,19 @@ static void register_sched_domain_sysctl(void) | |||
5473 | entry->child = sd_alloc_ctl_cpu_table(i); | 5500 | entry->child = sd_alloc_ctl_cpu_table(i); |
5474 | entry++; | 5501 | entry++; |
5475 | } | 5502 | } |
5503 | |||
5504 | WARN_ON(sd_sysctl_header); | ||
5476 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); | 5505 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
5477 | } | 5506 | } |
5478 | 5507 | ||
5508 | /* may be called multiple times per register */ | ||
5479 | static void unregister_sched_domain_sysctl(void) | 5509 | static void unregister_sched_domain_sysctl(void) |
5480 | { | 5510 | { |
5481 | unregister_sysctl_table(sd_sysctl_header); | 5511 | if (sd_sysctl_header) |
5512 | unregister_sysctl_table(sd_sysctl_header); | ||
5482 | sd_sysctl_header = NULL; | 5513 | sd_sysctl_header = NULL; |
5483 | sd_free_ctl_entry(&sd_ctl_dir[0].child); | 5514 | if (sd_ctl_dir[0].child) |
5515 | sd_free_ctl_entry(&sd_ctl_dir[0].child); | ||
5484 | } | 5516 | } |
5485 | #else | 5517 | #else |
5486 | static void register_sched_domain_sysctl(void) | 5518 | static void register_sched_domain_sysctl(void) |
@@ -5611,101 +5643,101 @@ int nr_cpu_ids __read_mostly = NR_CPUS; | |||
5611 | EXPORT_SYMBOL(nr_cpu_ids); | 5643 | EXPORT_SYMBOL(nr_cpu_ids); |
5612 | 5644 | ||
5613 | #ifdef CONFIG_SCHED_DEBUG | 5645 | #ifdef CONFIG_SCHED_DEBUG |
5614 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 5646 | |
5647 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) | ||
5615 | { | 5648 | { |
5616 | int level = 0; | 5649 | struct sched_group *group = sd->groups; |
5650 | cpumask_t groupmask; | ||
5651 | char str[NR_CPUS]; | ||
5617 | 5652 | ||
5618 | if (!sd) { | 5653 | cpumask_scnprintf(str, NR_CPUS, sd->span); |
5619 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); | 5654 | cpus_clear(groupmask); |
5620 | return; | 5655 | |
5656 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | ||
5657 | |||
5658 | if (!(sd->flags & SD_LOAD_BALANCE)) { | ||
5659 | printk("does not load-balance\n"); | ||
5660 | if (sd->parent) | ||
5661 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" | ||
5662 | " has parent"); | ||
5663 | return -1; | ||
5621 | } | 5664 | } |
5622 | 5665 | ||
5623 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 5666 | printk(KERN_CONT "span %s\n", str); |
5667 | |||
5668 | if (!cpu_isset(cpu, sd->span)) { | ||
5669 | printk(KERN_ERR "ERROR: domain->span does not contain " | ||
5670 | "CPU%d\n", cpu); | ||
5671 | } | ||
5672 | if (!cpu_isset(cpu, group->cpumask)) { | ||
5673 | printk(KERN_ERR "ERROR: domain->groups does not contain" | ||
5674 | " CPU%d\n", cpu); | ||
5675 | } | ||
5624 | 5676 | ||
5677 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); | ||
5625 | do { | 5678 | do { |
5626 | int i; | 5679 | if (!group) { |
5627 | char str[NR_CPUS]; | 5680 | printk("\n"); |
5628 | struct sched_group *group = sd->groups; | 5681 | printk(KERN_ERR "ERROR: group is NULL\n"); |
5629 | cpumask_t groupmask; | ||
5630 | |||
5631 | cpumask_scnprintf(str, NR_CPUS, sd->span); | ||
5632 | cpus_clear(groupmask); | ||
5633 | |||
5634 | printk(KERN_DEBUG); | ||
5635 | for (i = 0; i < level + 1; i++) | ||
5636 | printk(" "); | ||
5637 | printk("domain %d: ", level); | ||
5638 | |||
5639 | if (!(sd->flags & SD_LOAD_BALANCE)) { | ||
5640 | printk("does not load-balance\n"); | ||
5641 | if (sd->parent) | ||
5642 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" | ||
5643 | " has parent"); | ||
5644 | break; | 5682 | break; |
5645 | } | 5683 | } |
5646 | 5684 | ||
5647 | printk("span %s\n", str); | 5685 | if (!group->__cpu_power) { |
5686 | printk(KERN_CONT "\n"); | ||
5687 | printk(KERN_ERR "ERROR: domain->cpu_power not " | ||
5688 | "set\n"); | ||
5689 | break; | ||
5690 | } | ||
5648 | 5691 | ||
5649 | if (!cpu_isset(cpu, sd->span)) | 5692 | if (!cpus_weight(group->cpumask)) { |
5650 | printk(KERN_ERR "ERROR: domain->span does not contain " | 5693 | printk(KERN_CONT "\n"); |
5651 | "CPU%d\n", cpu); | 5694 | printk(KERN_ERR "ERROR: empty group\n"); |
5652 | if (!cpu_isset(cpu, group->cpumask)) | 5695 | break; |
5653 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 5696 | } |
5654 | " CPU%d\n", cpu); | ||
5655 | 5697 | ||
5656 | printk(KERN_DEBUG); | 5698 | if (cpus_intersects(groupmask, group->cpumask)) { |
5657 | for (i = 0; i < level + 2; i++) | 5699 | printk(KERN_CONT "\n"); |
5658 | printk(" "); | 5700 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
5659 | printk("groups:"); | 5701 | break; |
5660 | do { | 5702 | } |
5661 | if (!group) { | ||
5662 | printk("\n"); | ||
5663 | printk(KERN_ERR "ERROR: group is NULL\n"); | ||
5664 | break; | ||
5665 | } | ||
5666 | 5703 | ||
5667 | if (!group->__cpu_power) { | 5704 | cpus_or(groupmask, groupmask, group->cpumask); |
5668 | printk(KERN_CONT "\n"); | ||
5669 | printk(KERN_ERR "ERROR: domain->cpu_power not " | ||
5670 | "set\n"); | ||
5671 | break; | ||
5672 | } | ||
5673 | 5705 | ||
5674 | if (!cpus_weight(group->cpumask)) { | 5706 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); |
5675 | printk(KERN_CONT "\n"); | 5707 | printk(KERN_CONT " %s", str); |
5676 | printk(KERN_ERR "ERROR: empty group\n"); | ||
5677 | break; | ||
5678 | } | ||
5679 | 5708 | ||
5680 | if (cpus_intersects(groupmask, group->cpumask)) { | 5709 | group = group->next; |
5681 | printk(KERN_CONT "\n"); | 5710 | } while (group != sd->groups); |
5682 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 5711 | printk(KERN_CONT "\n"); |
5683 | break; | ||
5684 | } | ||
5685 | 5712 | ||
5686 | cpus_or(groupmask, groupmask, group->cpumask); | 5713 | if (!cpus_equal(sd->span, groupmask)) |
5714 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | ||
5687 | 5715 | ||
5688 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); | 5716 | if (sd->parent && !cpus_subset(groupmask, sd->parent->span)) |
5689 | printk(KERN_CONT " %s", str); | 5717 | printk(KERN_ERR "ERROR: parent span is not a superset " |
5718 | "of domain->span\n"); | ||
5719 | return 0; | ||
5720 | } | ||
5690 | 5721 | ||
5691 | group = group->next; | 5722 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
5692 | } while (group != sd->groups); | 5723 | { |
5693 | printk(KERN_CONT "\n"); | 5724 | int level = 0; |
5694 | 5725 | ||
5695 | if (!cpus_equal(sd->span, groupmask)) | 5726 | if (!sd) { |
5696 | printk(KERN_ERR "ERROR: groups don't span " | 5727 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
5697 | "domain->span\n"); | 5728 | return; |
5729 | } | ||
5730 | |||
5731 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | ||
5698 | 5732 | ||
5733 | for (;;) { | ||
5734 | if (sched_domain_debug_one(sd, cpu, level)) | ||
5735 | break; | ||
5699 | level++; | 5736 | level++; |
5700 | sd = sd->parent; | 5737 | sd = sd->parent; |
5701 | if (!sd) | 5738 | if (!sd) |
5702 | continue; | 5739 | break; |
5703 | 5740 | } | |
5704 | if (!cpus_subset(groupmask, sd->span)) | ||
5705 | printk(KERN_ERR "ERROR: parent span is not a superset " | ||
5706 | "of domain->span\n"); | ||
5707 | |||
5708 | } while (sd); | ||
5709 | } | 5741 | } |
5710 | #else | 5742 | #else |
5711 | # define sched_domain_debug(sd, cpu) do { } while (0) | 5743 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6424,13 +6456,17 @@ static cpumask_t fallback_doms; | |||
6424 | */ | 6456 | */ |
6425 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 6457 | static int arch_init_sched_domains(const cpumask_t *cpu_map) |
6426 | { | 6458 | { |
6459 | int err; | ||
6460 | |||
6427 | ndoms_cur = 1; | 6461 | ndoms_cur = 1; |
6428 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6462 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
6429 | if (!doms_cur) | 6463 | if (!doms_cur) |
6430 | doms_cur = &fallback_doms; | 6464 | doms_cur = &fallback_doms; |
6431 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 6465 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); |
6466 | err = build_sched_domains(doms_cur); | ||
6432 | register_sched_domain_sysctl(); | 6467 | register_sched_domain_sysctl(); |
6433 | return build_sched_domains(doms_cur); | 6468 | |
6469 | return err; | ||
6434 | } | 6470 | } |
6435 | 6471 | ||
6436 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) | 6472 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map) |
@@ -6479,6 +6515,9 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) | |||
6479 | { | 6515 | { |
6480 | int i, j; | 6516 | int i, j; |
6481 | 6517 | ||
6518 | /* always unregister in case we don't destroy any domains */ | ||
6519 | unregister_sched_domain_sysctl(); | ||
6520 | |||
6482 | if (doms_new == NULL) { | 6521 | if (doms_new == NULL) { |
6483 | ndoms_new = 1; | 6522 | ndoms_new = 1; |
6484 | doms_new = &fallback_doms; | 6523 | doms_new = &fallback_doms; |
@@ -6514,6 +6553,8 @@ match2: | |||
6514 | kfree(doms_cur); | 6553 | kfree(doms_cur); |
6515 | doms_cur = doms_new; | 6554 | doms_cur = doms_new; |
6516 | ndoms_cur = ndoms_new; | 6555 | ndoms_cur = ndoms_new; |
6556 | |||
6557 | register_sched_domain_sysctl(); | ||
6517 | } | 6558 | } |
6518 | 6559 | ||
6519 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 6560 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
@@ -7101,25 +7142,25 @@ unsigned long sched_group_shares(struct task_group *tg) | |||
7101 | #ifdef CONFIG_FAIR_CGROUP_SCHED | 7142 | #ifdef CONFIG_FAIR_CGROUP_SCHED |
7102 | 7143 | ||
7103 | /* return corresponding task_group object of a cgroup */ | 7144 | /* return corresponding task_group object of a cgroup */ |
7104 | static inline struct task_group *cgroup_tg(struct cgroup *cont) | 7145 | static inline struct task_group *cgroup_tg(struct cgroup *cgrp) |
7105 | { | 7146 | { |
7106 | return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id), | 7147 | return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), |
7107 | struct task_group, css); | 7148 | struct task_group, css); |
7108 | } | 7149 | } |
7109 | 7150 | ||
7110 | static struct cgroup_subsys_state * | 7151 | static struct cgroup_subsys_state * |
7111 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 7152 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) |
7112 | { | 7153 | { |
7113 | struct task_group *tg; | 7154 | struct task_group *tg; |
7114 | 7155 | ||
7115 | if (!cont->parent) { | 7156 | if (!cgrp->parent) { |
7116 | /* This is early initialization for the top cgroup */ | 7157 | /* This is early initialization for the top cgroup */ |
7117 | init_task_group.css.cgroup = cont; | 7158 | init_task_group.css.cgroup = cgrp; |
7118 | return &init_task_group.css; | 7159 | return &init_task_group.css; |
7119 | } | 7160 | } |
7120 | 7161 | ||
7121 | /* we support only 1-level deep hierarchical scheduler atm */ | 7162 | /* we support only 1-level deep hierarchical scheduler atm */ |
7122 | if (cont->parent->parent) | 7163 | if (cgrp->parent->parent) |
7123 | return ERR_PTR(-EINVAL); | 7164 | return ERR_PTR(-EINVAL); |
7124 | 7165 | ||
7125 | tg = sched_create_group(); | 7166 | tg = sched_create_group(); |
@@ -7127,21 +7168,21 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
7127 | return ERR_PTR(-ENOMEM); | 7168 | return ERR_PTR(-ENOMEM); |
7128 | 7169 | ||
7129 | /* Bind the cgroup to task_group object we just created */ | 7170 | /* Bind the cgroup to task_group object we just created */ |
7130 | tg->css.cgroup = cont; | 7171 | tg->css.cgroup = cgrp; |
7131 | 7172 | ||
7132 | return &tg->css; | 7173 | return &tg->css; |
7133 | } | 7174 | } |
7134 | 7175 | ||
7135 | static void cpu_cgroup_destroy(struct cgroup_subsys *ss, | 7176 | static void cpu_cgroup_destroy(struct cgroup_subsys *ss, |
7136 | struct cgroup *cont) | 7177 | struct cgroup *cgrp) |
7137 | { | 7178 | { |
7138 | struct task_group *tg = cgroup_tg(cont); | 7179 | struct task_group *tg = cgroup_tg(cgrp); |
7139 | 7180 | ||
7140 | sched_destroy_group(tg); | 7181 | sched_destroy_group(tg); |
7141 | } | 7182 | } |
7142 | 7183 | ||
7143 | static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, | 7184 | static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, |
7144 | struct cgroup *cont, struct task_struct *tsk) | 7185 | struct cgroup *cgrp, struct task_struct *tsk) |
7145 | { | 7186 | { |
7146 | /* We don't support RT-tasks being in separate groups */ | 7187 | /* We don't support RT-tasks being in separate groups */ |
7147 | if (tsk->sched_class != &fair_sched_class) | 7188 | if (tsk->sched_class != &fair_sched_class) |
@@ -7151,38 +7192,21 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, | |||
7151 | } | 7192 | } |
7152 | 7193 | ||
7153 | static void | 7194 | static void |
7154 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont, | 7195 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
7155 | struct cgroup *old_cont, struct task_struct *tsk) | 7196 | struct cgroup *old_cont, struct task_struct *tsk) |
7156 | { | 7197 | { |
7157 | sched_move_task(tsk); | 7198 | sched_move_task(tsk); |
7158 | } | 7199 | } |
7159 | 7200 | ||
7160 | static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype, | 7201 | static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype, |
7161 | struct file *file, const char __user *userbuf, | 7202 | u64 shareval) |
7162 | size_t nbytes, loff_t *ppos) | ||
7163 | { | 7203 | { |
7164 | unsigned long shareval; | 7204 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); |
7165 | struct task_group *tg = cgroup_tg(cont); | ||
7166 | char buffer[2*sizeof(unsigned long) + 1]; | ||
7167 | int rc; | ||
7168 | |||
7169 | if (nbytes > 2*sizeof(unsigned long)) /* safety check */ | ||
7170 | return -E2BIG; | ||
7171 | |||
7172 | if (copy_from_user(buffer, userbuf, nbytes)) | ||
7173 | return -EFAULT; | ||
7174 | |||
7175 | buffer[nbytes] = 0; /* nul-terminate */ | ||
7176 | shareval = simple_strtoul(buffer, NULL, 10); | ||
7177 | |||
7178 | rc = sched_group_set_shares(tg, shareval); | ||
7179 | |||
7180 | return (rc < 0 ? rc : nbytes); | ||
7181 | } | 7205 | } |
7182 | 7206 | ||
7183 | static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) | 7207 | static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) |
7184 | { | 7208 | { |
7185 | struct task_group *tg = cgroup_tg(cont); | 7209 | struct task_group *tg = cgroup_tg(cgrp); |
7186 | 7210 | ||
7187 | return (u64) tg->shares; | 7211 | return (u64) tg->shares; |
7188 | } | 7212 | } |
@@ -7190,7 +7214,7 @@ static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) | |||
7190 | static struct cftype cpu_shares = { | 7214 | static struct cftype cpu_shares = { |
7191 | .name = "shares", | 7215 | .name = "shares", |
7192 | .read_uint = cpu_shares_read_uint, | 7216 | .read_uint = cpu_shares_read_uint, |
7193 | .write = cpu_shares_write, | 7217 | .write_uint = cpu_shares_write_uint, |
7194 | }; | 7218 | }; |
7195 | 7219 | ||
7196 | static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) | 7220 | static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 166ed6db600b..9971831b560e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -876,6 +876,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | |||
876 | } | 876 | } |
877 | } | 877 | } |
878 | 878 | ||
879 | #ifdef CONFIG_SMP | ||
879 | /************************************************** | 880 | /************************************************** |
880 | * Fair scheduling class load-balancing methods: | 881 | * Fair scheduling class load-balancing methods: |
881 | */ | 882 | */ |
@@ -936,12 +937,11 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) | |||
936 | 937 | ||
937 | static unsigned long | 938 | static unsigned long |
938 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 939 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
939 | unsigned long max_nr_move, unsigned long max_load_move, | 940 | unsigned long max_load_move, |
940 | struct sched_domain *sd, enum cpu_idle_type idle, | 941 | struct sched_domain *sd, enum cpu_idle_type idle, |
941 | int *all_pinned, int *this_best_prio) | 942 | int *all_pinned, int *this_best_prio) |
942 | { | 943 | { |
943 | struct cfs_rq *busy_cfs_rq; | 944 | struct cfs_rq *busy_cfs_rq; |
944 | unsigned long load_moved, total_nr_moved = 0, nr_moved; | ||
945 | long rem_load_move = max_load_move; | 945 | long rem_load_move = max_load_move; |
946 | struct rq_iterator cfs_rq_iterator; | 946 | struct rq_iterator cfs_rq_iterator; |
947 | 947 | ||
@@ -969,25 +969,48 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
969 | #else | 969 | #else |
970 | # define maxload rem_load_move | 970 | # define maxload rem_load_move |
971 | #endif | 971 | #endif |
972 | /* pass busy_cfs_rq argument into | 972 | /* |
973 | * pass busy_cfs_rq argument into | ||
973 | * load_balance_[start|next]_fair iterators | 974 | * load_balance_[start|next]_fair iterators |
974 | */ | 975 | */ |
975 | cfs_rq_iterator.arg = busy_cfs_rq; | 976 | cfs_rq_iterator.arg = busy_cfs_rq; |
976 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, | 977 | rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, |
977 | max_nr_move, maxload, sd, idle, all_pinned, | 978 | maxload, sd, idle, all_pinned, |
978 | &load_moved, this_best_prio, &cfs_rq_iterator); | 979 | this_best_prio, |
979 | 980 | &cfs_rq_iterator); | |
980 | total_nr_moved += nr_moved; | ||
981 | max_nr_move -= nr_moved; | ||
982 | rem_load_move -= load_moved; | ||
983 | 981 | ||
984 | if (max_nr_move <= 0 || rem_load_move <= 0) | 982 | if (rem_load_move <= 0) |
985 | break; | 983 | break; |
986 | } | 984 | } |
987 | 985 | ||
988 | return max_load_move - rem_load_move; | 986 | return max_load_move - rem_load_move; |
989 | } | 987 | } |
990 | 988 | ||
989 | static int | ||
990 | move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
991 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
992 | { | ||
993 | struct cfs_rq *busy_cfs_rq; | ||
994 | struct rq_iterator cfs_rq_iterator; | ||
995 | |||
996 | cfs_rq_iterator.start = load_balance_start_fair; | ||
997 | cfs_rq_iterator.next = load_balance_next_fair; | ||
998 | |||
999 | for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { | ||
1000 | /* | ||
1001 | * pass busy_cfs_rq argument into | ||
1002 | * load_balance_[start|next]_fair iterators | ||
1003 | */ | ||
1004 | cfs_rq_iterator.arg = busy_cfs_rq; | ||
1005 | if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, | ||
1006 | &cfs_rq_iterator)) | ||
1007 | return 1; | ||
1008 | } | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | #endif | ||
1013 | |||
991 | /* | 1014 | /* |
992 | * scheduler tick hitting a task of our scheduling class: | 1015 | * scheduler tick hitting a task of our scheduling class: |
993 | */ | 1016 | */ |
@@ -1063,7 +1086,10 @@ static const struct sched_class fair_sched_class = { | |||
1063 | .pick_next_task = pick_next_task_fair, | 1086 | .pick_next_task = pick_next_task_fair, |
1064 | .put_prev_task = put_prev_task_fair, | 1087 | .put_prev_task = put_prev_task_fair, |
1065 | 1088 | ||
1089 | #ifdef CONFIG_SMP | ||
1066 | .load_balance = load_balance_fair, | 1090 | .load_balance = load_balance_fair, |
1091 | .move_one_task = move_one_task_fair, | ||
1092 | #endif | ||
1067 | 1093 | ||
1068 | .set_curr_task = set_curr_task_fair, | 1094 | .set_curr_task = set_curr_task_fair, |
1069 | .task_tick = task_tick_fair, | 1095 | .task_tick = task_tick_fair, |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 6e2ead41516e..bf9c25c15b8b 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -37,15 +37,24 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | |||
37 | { | 37 | { |
38 | } | 38 | } |
39 | 39 | ||
40 | #ifdef CONFIG_SMP | ||
40 | static unsigned long | 41 | static unsigned long |
41 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | 42 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
42 | unsigned long max_nr_move, unsigned long max_load_move, | 43 | unsigned long max_load_move, |
43 | struct sched_domain *sd, enum cpu_idle_type idle, | 44 | struct sched_domain *sd, enum cpu_idle_type idle, |
44 | int *all_pinned, int *this_best_prio) | 45 | int *all_pinned, int *this_best_prio) |
45 | { | 46 | { |
46 | return 0; | 47 | return 0; |
47 | } | 48 | } |
48 | 49 | ||
50 | static int | ||
51 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
52 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | #endif | ||
57 | |||
49 | static void task_tick_idle(struct rq *rq, struct task_struct *curr) | 58 | static void task_tick_idle(struct rq *rq, struct task_struct *curr) |
50 | { | 59 | { |
51 | } | 60 | } |
@@ -69,7 +78,10 @@ const struct sched_class idle_sched_class = { | |||
69 | .pick_next_task = pick_next_task_idle, | 78 | .pick_next_task = pick_next_task_idle, |
70 | .put_prev_task = put_prev_task_idle, | 79 | .put_prev_task = put_prev_task_idle, |
71 | 80 | ||
81 | #ifdef CONFIG_SMP | ||
72 | .load_balance = load_balance_idle, | 82 | .load_balance = load_balance_idle, |
83 | .move_one_task = move_one_task_idle, | ||
84 | #endif | ||
73 | 85 | ||
74 | .set_curr_task = set_curr_task_idle, | 86 | .set_curr_task = set_curr_task_idle, |
75 | .task_tick = task_tick_idle, | 87 | .task_tick = task_tick_idle, |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d0097a0634e5..8abd752a0ebd 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -98,6 +98,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
98 | p->se.exec_start = 0; | 98 | p->se.exec_start = 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | #ifdef CONFIG_SMP | ||
101 | /* | 102 | /* |
102 | * Load-balancing iterator. Note: while the runqueue stays locked | 103 | * Load-balancing iterator. Note: while the runqueue stays locked |
103 | * during the whole iteration, the current task might be | 104 | * during the whole iteration, the current task might be |
@@ -172,13 +173,11 @@ static struct task_struct *load_balance_next_rt(void *arg) | |||
172 | 173 | ||
173 | static unsigned long | 174 | static unsigned long |
174 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | 175 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
175 | unsigned long max_nr_move, unsigned long max_load_move, | 176 | unsigned long max_load_move, |
176 | struct sched_domain *sd, enum cpu_idle_type idle, | 177 | struct sched_domain *sd, enum cpu_idle_type idle, |
177 | int *all_pinned, int *this_best_prio) | 178 | int *all_pinned, int *this_best_prio) |
178 | { | 179 | { |
179 | int nr_moved; | ||
180 | struct rq_iterator rt_rq_iterator; | 180 | struct rq_iterator rt_rq_iterator; |
181 | unsigned long load_moved; | ||
182 | 181 | ||
183 | rt_rq_iterator.start = load_balance_start_rt; | 182 | rt_rq_iterator.start = load_balance_start_rt; |
184 | rt_rq_iterator.next = load_balance_next_rt; | 183 | rt_rq_iterator.next = load_balance_next_rt; |
@@ -187,12 +186,24 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
187 | */ | 186 | */ |
188 | rt_rq_iterator.arg = busiest; | 187 | rt_rq_iterator.arg = busiest; |
189 | 188 | ||
190 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, | 189 | return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, |
191 | max_load_move, sd, idle, all_pinned, &load_moved, | 190 | idle, all_pinned, this_best_prio, &rt_rq_iterator); |
192 | this_best_prio, &rt_rq_iterator); | 191 | } |
192 | |||
193 | static int | ||
194 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
195 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
196 | { | ||
197 | struct rq_iterator rt_rq_iterator; | ||
198 | |||
199 | rt_rq_iterator.start = load_balance_start_rt; | ||
200 | rt_rq_iterator.next = load_balance_next_rt; | ||
201 | rt_rq_iterator.arg = busiest; | ||
193 | 202 | ||
194 | return load_moved; | 203 | return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, |
204 | &rt_rq_iterator); | ||
195 | } | 205 | } |
206 | #endif | ||
196 | 207 | ||
197 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 208 | static void task_tick_rt(struct rq *rq, struct task_struct *p) |
198 | { | 209 | { |
@@ -236,7 +247,10 @@ const struct sched_class rt_sched_class = { | |||
236 | .pick_next_task = pick_next_task_rt, | 247 | .pick_next_task = pick_next_task_rt, |
237 | .put_prev_task = put_prev_task_rt, | 248 | .put_prev_task = put_prev_task_rt, |
238 | 249 | ||
250 | #ifdef CONFIG_SMP | ||
239 | .load_balance = load_balance_rt, | 251 | .load_balance = load_balance_rt, |
252 | .move_one_task = move_one_task_rt, | ||
253 | #endif | ||
240 | 254 | ||
241 | .set_curr_task = set_curr_task_rt, | 255 | .set_curr_task = set_curr_task_rt, |
242 | .task_tick = task_tick_rt, | 256 | .task_tick = task_tick_rt, |
diff --git a/kernel/signal.c b/kernel/signal.c index 12006308c7eb..4537bdda1ebf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -732,7 +732,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
732 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 732 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
733 | current->comm, task_pid_nr(current), signr); | 733 | current->comm, task_pid_nr(current), signr); |
734 | 734 | ||
735 | #ifdef __i386__ | 735 | #if defined(__i386__) && !defined(__arch_um__) |
736 | printk("code at %08lx: ", regs->eip); | 736 | printk("code at %08lx: ", regs->eip); |
737 | { | 737 | { |
738 | int i; | 738 | int i; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 10a1347597fd..5997456ebbc9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -320,8 +320,6 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
320 | return ts->sleep_length; | 320 | return ts->sleep_length; |
321 | } | 321 | } |
322 | 322 | ||
323 | EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); | ||
324 | |||
325 | /** | 323 | /** |
326 | * nohz_restart_sched_tick - restart the idle tick from the idle task | 324 | * nohz_restart_sched_tick - restart the idle tick from the idle task |
327 | * | 325 | * |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index fdb2e03d4fe0..12c5f4cb6b8c 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -129,7 +129,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) | |||
129 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 129 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
130 | int i; | 130 | int i; |
131 | 131 | ||
132 | SEQ_printf(m, "\ncpu: %d\n", cpu); | 132 | SEQ_printf(m, "\n"); |
133 | SEQ_printf(m, "cpu: %d\n", cpu); | ||
133 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 134 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
134 | SEQ_printf(m, " clock %d:\n", i); | 135 | SEQ_printf(m, " clock %d:\n", i); |
135 | print_base(m, cpu_base->clock_base + i, now); | 136 | print_base(m, cpu_base->clock_base + i, now); |
@@ -184,7 +185,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td) | |||
184 | { | 185 | { |
185 | struct clock_event_device *dev = td->evtdev; | 186 | struct clock_event_device *dev = td->evtdev; |
186 | 187 | ||
187 | SEQ_printf(m, "\nTick Device: mode: %d\n", td->mode); | 188 | SEQ_printf(m, "\n"); |
189 | SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); | ||
188 | 190 | ||
189 | SEQ_printf(m, "Clock Event Device: "); | 191 | SEQ_printf(m, "Clock Event Device: "); |
190 | if (!dev) { | 192 | if (!dev) { |
diff --git a/kernel/user.c b/kernel/user.c index e91331c457e2..0f3aa0234107 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -129,7 +129,7 @@ static inline void uids_mutex_unlock(void) | |||
129 | } | 129 | } |
130 | 130 | ||
131 | /* return cpu shares held by the user */ | 131 | /* return cpu shares held by the user */ |
132 | ssize_t cpu_shares_show(struct kset *kset, char *buffer) | 132 | static ssize_t cpu_shares_show(struct kset *kset, char *buffer) |
133 | { | 133 | { |
134 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 134 | struct user_struct *up = container_of(kset, struct user_struct, kset); |
135 | 135 | ||
@@ -137,7 +137,8 @@ ssize_t cpu_shares_show(struct kset *kset, char *buffer) | |||
137 | } | 137 | } |
138 | 138 | ||
139 | /* modify cpu shares held by the user */ | 139 | /* modify cpu shares held by the user */ |
140 | ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size) | 140 | static ssize_t cpu_shares_store(struct kset *kset, const char *buffer, |
141 | size_t size) | ||
141 | { | 142 | { |
142 | struct user_struct *up = container_of(kset, struct user_struct, kset); | 143 | struct user_struct *up = container_of(kset, struct user_struct, kset); |
143 | unsigned long shares; | 144 | unsigned long shares; |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1faa5087dc86..1e5f207b9074 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -9,6 +9,14 @@ config PRINTK_TIME | |||
9 | operations. This is useful for identifying long delays | 9 | operations. This is useful for identifying long delays |
10 | in kernel startup. | 10 | in kernel startup. |
11 | 11 | ||
12 | config ENABLE_WARN_DEPRECATED | ||
13 | bool "Enable __deprecated logic" | ||
14 | default y | ||
15 | help | ||
16 | Enable the __deprecated logic in the kernel build. | ||
17 | Disable this to suppress the "warning: 'foo' is deprecated | ||
18 | (declared at kernel/power/somefile.c:1234)" messages. | ||
19 | |||
12 | config ENABLE_MUST_CHECK | 20 | config ENABLE_MUST_CHECK |
13 | bool "Enable __must_check logic" | 21 | bool "Enable __must_check logic" |
14 | default y | 22 | default y |
diff --git a/mm/filemap.c b/mm/filemap.c index 5209e47b7fe3..7c8643630023 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/backing-dev.h> | 28 | #include <linux/backing-dev.h> |
29 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
30 | #include <linux/blkdev.h> | 30 | #include <linux/blkdev.h> |
31 | #include <linux/backing-dev.h> | ||
31 | #include <linux/security.h> | 32 | #include <linux/security.h> |
32 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
33 | #include <linux/cpuset.h> | 34 | #include <linux/cpuset.h> |
diff --git a/mm/nommu.c b/mm/nommu.c index 8f09333f78e1..35622c590925 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> | 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | ||
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/mman.h> | 17 | #include <linux/mman.h> |
17 | #include <linux/swap.h> | 18 | #include <linux/swap.h> |
@@ -2734,7 +2734,7 @@ static void slab_mem_offline_callback(void *arg) | |||
2734 | * and offline_pages() function shoudn't call this | 2734 | * and offline_pages() function shoudn't call this |
2735 | * callback. So, we must fail. | 2735 | * callback. So, we must fail. |
2736 | */ | 2736 | */ |
2737 | BUG_ON(atomic_read(&n->nr_slabs)); | 2737 | BUG_ON(atomic_long_read(&n->nr_slabs)); |
2738 | 2738 | ||
2739 | s->node[offline_node] = NULL; | 2739 | s->node[offline_node] = NULL; |
2740 | kmem_cache_free(kmalloc_caches, n); | 2740 | kmem_cache_free(kmalloc_caches, n); |
diff --git a/mm/sparse.c b/mm/sparse.c index 08fb14f5eea3..e06f514fe04f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -220,12 +220,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, | |||
220 | return 1; | 220 | return 1; |
221 | } | 221 | } |
222 | 222 | ||
223 | __attribute__((weak)) __init | ||
224 | void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) | ||
225 | { | ||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | static unsigned long usemap_size(void) | 223 | static unsigned long usemap_size(void) |
230 | { | 224 | { |
231 | unsigned long size_bytes; | 225 | unsigned long size_bytes; |
@@ -267,11 +261,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) | |||
267 | if (map) | 261 | if (map) |
268 | return map; | 262 | return map; |
269 | 263 | ||
270 | map = alloc_bootmem_high_node(NODE_DATA(nid), | ||
271 | sizeof(struct page) * PAGES_PER_SECTION); | ||
272 | if (map) | ||
273 | return map; | ||
274 | |||
275 | map = alloc_bootmem_node(NODE_DATA(nid), | 264 | map = alloc_bootmem_node(NODE_DATA(nid), |
276 | sizeof(struct page) * PAGES_PER_SECTION); | 265 | sizeof(struct page) * PAGES_PER_SECTION); |
277 | return map; | 266 | return map; |
diff --git a/net/9p/mux.c b/net/9p/mux.c index f14014793bed..c9f0805048e4 100644 --- a/net/9p/mux.c +++ b/net/9p/mux.c | |||
@@ -222,8 +222,10 @@ static int p9_mux_poll_start(struct p9_conn *m) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | 224 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { |
225 | if (vptlast == NULL) | 225 | if (vptlast == NULL) { |
226 | mutex_unlock(&p9_mux_task_lock); | ||
226 | return -ENOMEM; | 227 | return -ENOMEM; |
228 | } | ||
227 | 229 | ||
228 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | 230 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); |
229 | list_add(&m->mux_list, &vptlast->mux_list); | 231 | list_add(&m->mux_list, &vptlast->mux_list); |
diff --git a/net/core/dev.c b/net/core/dev.c index 872658927e47..853c8b575f1d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -120,6 +120,8 @@ | |||
120 | #include <linux/ctype.h> | 120 | #include <linux/ctype.h> |
121 | #include <linux/if_arp.h> | 121 | #include <linux/if_arp.h> |
122 | 122 | ||
123 | #include "net-sysfs.h" | ||
124 | |||
123 | /* | 125 | /* |
124 | * The list of packet types we will receive (as opposed to discard) | 126 | * The list of packet types we will receive (as opposed to discard) |
125 | * and the routines to invoke. | 127 | * and the routines to invoke. |
@@ -249,10 +251,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
249 | 251 | ||
250 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 252 | DEFINE_PER_CPU(struct softnet_data, softnet_data); |
251 | 253 | ||
252 | extern int netdev_kobject_init(void); | ||
253 | extern int netdev_register_kobject(struct net_device *); | ||
254 | extern void netdev_unregister_kobject(struct net_device *); | ||
255 | |||
256 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 254 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
257 | /* | 255 | /* |
258 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class | 256 | * register_netdevice() inits dev->_xmit_lock and sets lockdep class |
@@ -885,6 +883,9 @@ int dev_change_name(struct net_device *dev, char *newname) | |||
885 | if (!dev_valid_name(newname)) | 883 | if (!dev_valid_name(newname)) |
886 | return -EINVAL; | 884 | return -EINVAL; |
887 | 885 | ||
886 | if (strncmp(newname, dev->name, IFNAMSIZ) == 0) | ||
887 | return 0; | ||
888 | |||
888 | memcpy(oldname, dev->name, IFNAMSIZ); | 889 | memcpy(oldname, dev->name, IFNAMSIZ); |
889 | 890 | ||
890 | if (strchr(newname, '%')) { | 891 | if (strchr(newname, '%')) { |
@@ -1007,17 +1008,20 @@ int dev_open(struct net_device *dev) | |||
1007 | * Call device private open method | 1008 | * Call device private open method |
1008 | */ | 1009 | */ |
1009 | set_bit(__LINK_STATE_START, &dev->state); | 1010 | set_bit(__LINK_STATE_START, &dev->state); |
1010 | if (dev->open) { | 1011 | |
1012 | if (dev->validate_addr) | ||
1013 | ret = dev->validate_addr(dev); | ||
1014 | |||
1015 | if (!ret && dev->open) | ||
1011 | ret = dev->open(dev); | 1016 | ret = dev->open(dev); |
1012 | if (ret) | ||
1013 | clear_bit(__LINK_STATE_START, &dev->state); | ||
1014 | } | ||
1015 | 1017 | ||
1016 | /* | 1018 | /* |
1017 | * If it went open OK then: | 1019 | * If it went open OK then: |
1018 | */ | 1020 | */ |
1019 | 1021 | ||
1020 | if (!ret) { | 1022 | if (ret) |
1023 | clear_bit(__LINK_STATE_START, &dev->state); | ||
1024 | else { | ||
1021 | /* | 1025 | /* |
1022 | * Set the flags. | 1026 | * Set the flags. |
1023 | */ | 1027 | */ |
@@ -1038,6 +1042,7 @@ int dev_open(struct net_device *dev) | |||
1038 | */ | 1042 | */ |
1039 | call_netdevice_notifiers(NETDEV_UP, dev); | 1043 | call_netdevice_notifiers(NETDEV_UP, dev); |
1040 | } | 1044 | } |
1045 | |||
1041 | return ret; | 1046 | return ret; |
1042 | } | 1047 | } |
1043 | 1048 | ||
@@ -2663,7 +2668,7 @@ static void __net_exit dev_proc_net_exit(struct net *net) | |||
2663 | proc_net_remove(net, "dev"); | 2668 | proc_net_remove(net, "dev"); |
2664 | } | 2669 | } |
2665 | 2670 | ||
2666 | static struct pernet_operations __net_initdata dev_proc_ops = { | 2671 | static struct pernet_operations dev_proc_ops = { |
2667 | .init = dev_proc_net_init, | 2672 | .init = dev_proc_net_init, |
2668 | .exit = dev_proc_net_exit, | 2673 | .exit = dev_proc_net_exit, |
2669 | }; | 2674 | }; |
@@ -4323,7 +4328,7 @@ static void __net_exit netdev_exit(struct net *net) | |||
4323 | kfree(net->dev_index_head); | 4328 | kfree(net->dev_index_head); |
4324 | } | 4329 | } |
4325 | 4330 | ||
4326 | static struct pernet_operations __net_initdata netdev_net_ops = { | 4331 | static struct pernet_operations netdev_net_ops = { |
4327 | .init = netdev_init, | 4332 | .init = netdev_init, |
4328 | .exit = netdev_exit, | 4333 | .exit = netdev_exit, |
4329 | }; | 4334 | }; |
@@ -4354,7 +4359,7 @@ static void __net_exit default_device_exit(struct net *net) | |||
4354 | rtnl_unlock(); | 4359 | rtnl_unlock(); |
4355 | } | 4360 | } |
4356 | 4361 | ||
4357 | static struct pernet_operations __net_initdata default_device_ops = { | 4362 | static struct pernet_operations default_device_ops = { |
4358 | .exit = default_device_exit, | 4363 | .exit = default_device_exit, |
4359 | }; | 4364 | }; |
4360 | 4365 | ||
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 15241cf48af8..ae354057d84c 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net) | |||
285 | proc_net_remove(net, "dev_mcast"); | 285 | proc_net_remove(net, "dev_mcast"); |
286 | } | 286 | } |
287 | 287 | ||
288 | static struct pernet_operations __net_initdata dev_mc_net_ops = { | 288 | static struct pernet_operations dev_mc_net_ops = { |
289 | .init = dev_mc_net_init, | 289 | .init = dev_mc_net_init, |
290 | .exit = dev_mc_net_exit, | 290 | .exit = dev_mc_net_exit, |
291 | }; | 291 | }; |
diff --git a/net/core/flow.c b/net/core/flow.c index 0ab5234b17d8..3ed2b4b1d6d4 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -142,8 +142,6 @@ typedef u64 flow_compare_t; | |||
142 | typedef u32 flow_compare_t; | 142 | typedef u32 flow_compare_t; |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | extern void flowi_is_missized(void); | ||
146 | |||
147 | /* I hear what you're saying, use memcmp. But memcmp cannot make | 145 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
148 | * important assumptions that we can here, such as alignment and | 146 | * important assumptions that we can here, such as alignment and |
149 | * constant size. | 147 | * constant size. |
@@ -153,8 +151,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |||
153 | flow_compare_t *k1, *k1_lim, *k2; | 151 | flow_compare_t *k1, *k1_lim, *k2; |
154 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); | 152 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); |
155 | 153 | ||
156 | if (sizeof(struct flowi) % sizeof(flow_compare_t)) | 154 | BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); |
157 | flowi_is_missized(); | ||
158 | 155 | ||
159 | k1 = (flow_compare_t *) key1; | 156 | k1 = (flow_compare_t *) key1; |
160 | k1_lim = k1 + n_elem; | 157 | k1_lim = k1 + n_elem; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 6628e457ddc0..61ead1d11132 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/wireless.h> | 18 | #include <linux/wireless.h> |
19 | #include <net/iw_handler.h> | 19 | #include <net/iw_handler.h> |
20 | 20 | ||
21 | #include "net-sysfs.h" | ||
22 | |||
21 | #ifdef CONFIG_SYSFS | 23 | #ifdef CONFIG_SYSFS |
22 | static const char fmt_hex[] = "%#x\n"; | 24 | static const char fmt_hex[] = "%#x\n"; |
23 | static const char fmt_long_hex[] = "%#lx\n"; | 25 | static const char fmt_long_hex[] = "%#lx\n"; |
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h new file mode 100644 index 000000000000..f5f108db3924 --- /dev/null +++ b/net/core/net-sysfs.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __NET_SYSFS_H__ | ||
2 | #define __NET_SYSFS_H__ | ||
3 | |||
4 | int netdev_kobject_init(void); | ||
5 | int netdev_register_kobject(struct net_device *); | ||
6 | void netdev_unregister_kobject(struct net_device *); | ||
7 | |||
8 | #endif | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4a2640d38261..e1ba26fb4bf2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -742,7 +742,7 @@ static struct net *get_net_ns_by_pid(pid_t pid) | |||
742 | /* Lookup the network namespace */ | 742 | /* Lookup the network namespace */ |
743 | net = ERR_PTR(-ESRCH); | 743 | net = ERR_PTR(-ESRCH); |
744 | rcu_read_lock(); | 744 | rcu_read_lock(); |
745 | tsk = find_task_by_pid(pid); | 745 | tsk = find_task_by_vpid(pid); |
746 | if (tsk) { | 746 | if (tsk) { |
747 | struct nsproxy *nsproxy; | 747 | struct nsproxy *nsproxy; |
748 | nsproxy = task_nsproxy(tsk); | 748 | nsproxy = task_nsproxy(tsk); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e2c84fcf276..573e17240197 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -415,13 +415,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
415 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 415 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
416 | n->nohdr = 0; | 416 | n->nohdr = 0; |
417 | n->destructor = NULL; | 417 | n->destructor = NULL; |
418 | #ifdef CONFIG_NET_CLS_ACT | ||
419 | /* FIXME What is this and why don't we do it in copy_skb_header? */ | ||
420 | n->tc_verd = SET_TC_VERD(n->tc_verd,0); | ||
421 | n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); | ||
422 | n->tc_verd = CLR_TC_MUNGED(n->tc_verd); | ||
423 | C(iif); | ||
424 | #endif | ||
425 | C(truesize); | 418 | C(truesize); |
426 | atomic_set(&n->users, 1); | 419 | atomic_set(&n->users, 1); |
427 | C(head); | 420 | C(head); |
@@ -2045,9 +2038,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2045 | if (copy > 0) { | 2038 | if (copy > 0) { |
2046 | if (copy > len) | 2039 | if (copy > len) |
2047 | copy = len; | 2040 | copy = len; |
2048 | sg_set_page(&sg[elt], virt_to_page(skb->data + offset)); | 2041 | sg_set_buf(sg, skb->data + offset, copy); |
2049 | sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; | ||
2050 | sg[elt].length = copy; | ||
2051 | elt++; | 2042 | elt++; |
2052 | if ((len -= copy) == 0) | 2043 | if ((len -= copy) == 0) |
2053 | return elt; | 2044 | return elt; |
@@ -2065,9 +2056,8 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2065 | 2056 | ||
2066 | if (copy > len) | 2057 | if (copy > len) |
2067 | copy = len; | 2058 | copy = len; |
2068 | sg_set_page(&sg[elt], frag->page); | 2059 | sg_set_page(&sg[elt], frag->page, copy, |
2069 | sg[elt].offset = frag->page_offset+offset-start; | 2060 | frag->page_offset+offset-start); |
2070 | sg[elt].length = copy; | ||
2071 | elt++; | 2061 | elt++; |
2072 | if (!(len -= copy)) | 2062 | if (!(len -= copy)) |
2073 | return elt; | 2063 | return elt; |
diff --git a/net/core/sock.c b/net/core/sock.c index febbcbcf8022..bba9949681ff 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1649,7 +1649,6 @@ void sock_enable_timestamp(struct sock *sk) | |||
1649 | net_enable_timestamp(); | 1649 | net_enable_timestamp(); |
1650 | } | 1650 | } |
1651 | } | 1651 | } |
1652 | EXPORT_SYMBOL(sock_enable_timestamp); | ||
1653 | 1652 | ||
1654 | /* | 1653 | /* |
1655 | * Get a socket option on an socket. | 1654 | * Get a socket option on an socket. |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 6d5ea9762040..113cc728dc31 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -9,25 +9,12 @@ | |||
9 | #include <linux/sysctl.h> | 9 | #include <linux/sysctl.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/socket.h> | 11 | #include <linux/socket.h> |
12 | #include <linux/netdevice.h> | ||
12 | #include <net/sock.h> | 13 | #include <net/sock.h> |
14 | #include <net/xfrm.h> | ||
13 | 15 | ||
14 | #ifdef CONFIG_SYSCTL | 16 | #ifdef CONFIG_SYSCTL |
15 | 17 | ||
16 | extern int netdev_max_backlog; | ||
17 | extern int weight_p; | ||
18 | |||
19 | extern __u32 sysctl_wmem_max; | ||
20 | extern __u32 sysctl_rmem_max; | ||
21 | |||
22 | extern int sysctl_core_destroy_delay; | ||
23 | |||
24 | #ifdef CONFIG_XFRM | ||
25 | extern u32 sysctl_xfrm_aevent_etime; | ||
26 | extern u32 sysctl_xfrm_aevent_rseqth; | ||
27 | extern int sysctl_xfrm_larval_drop; | ||
28 | extern u32 sysctl_xfrm_acq_expires; | ||
29 | #endif | ||
30 | |||
31 | ctl_table core_table[] = { | 18 | ctl_table core_table[] = { |
32 | #ifdef CONFIG_NET | 19 | #ifdef CONFIG_NET |
33 | { | 20 | { |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 426008e3b7e3..d694656b8800 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -750,20 +750,16 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
750 | */ | 750 | */ |
751 | hctx->ccid2hctx_ssthresh = ~0; | 751 | hctx->ccid2hctx_ssthresh = ~0; |
752 | hctx->ccid2hctx_numdupack = 3; | 752 | hctx->ccid2hctx_numdupack = 3; |
753 | hctx->ccid2hctx_seqbufc = 0; | ||
754 | 753 | ||
755 | /* XXX init ~ to window size... */ | 754 | /* XXX init ~ to window size... */ |
756 | if (ccid2_hc_tx_alloc_seq(hctx)) | 755 | if (ccid2_hc_tx_alloc_seq(hctx)) |
757 | return -ENOMEM; | 756 | return -ENOMEM; |
758 | 757 | ||
759 | hctx->ccid2hctx_sent = 0; | ||
760 | hctx->ccid2hctx_rto = 3 * HZ; | 758 | hctx->ccid2hctx_rto = 3 * HZ; |
761 | ccid2_change_srtt(hctx, -1); | 759 | ccid2_change_srtt(hctx, -1); |
762 | hctx->ccid2hctx_rttvar = -1; | 760 | hctx->ccid2hctx_rttvar = -1; |
763 | hctx->ccid2hctx_lastrtt = 0; | ||
764 | hctx->ccid2hctx_rpdupack = -1; | 761 | hctx->ccid2hctx_rpdupack = -1; |
765 | hctx->ccid2hctx_last_cong = jiffies; | 762 | hctx->ccid2hctx_last_cong = jiffies; |
766 | hctx->ccid2hctx_high_ack = 0; | ||
767 | 763 | ||
768 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; | 764 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; |
769 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; | 765 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 25772c326172..19b33586333d 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include "lib/tfrc.h" | 40 | #include "lib/tfrc.h" |
41 | #include "ccid3.h" | 41 | #include "ccid3.h" |
42 | 42 | ||
43 | #include <asm/unaligned.h> | ||
44 | |||
43 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | 45 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
44 | static int ccid3_debug; | 46 | static int ccid3_debug; |
45 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) | 47 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) |
@@ -544,6 +546,7 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
544 | const struct dccp_sock *dp = dccp_sk(sk); | 546 | const struct dccp_sock *dp = dccp_sk(sk); |
545 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 547 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
546 | struct ccid3_options_received *opt_recv; | 548 | struct ccid3_options_received *opt_recv; |
549 | __be32 opt_val; | ||
547 | 550 | ||
548 | opt_recv = &hctx->ccid3hctx_options_received; | 551 | opt_recv = &hctx->ccid3hctx_options_received; |
549 | 552 | ||
@@ -563,8 +566,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
563 | dccp_role(sk), sk, len); | 566 | dccp_role(sk), sk, len); |
564 | rc = -EINVAL; | 567 | rc = -EINVAL; |
565 | } else { | 568 | } else { |
566 | opt_recv->ccid3or_loss_event_rate = | 569 | opt_val = get_unaligned((__be32 *)value); |
567 | ntohl(*(__be32 *)value); | 570 | opt_recv->ccid3or_loss_event_rate = ntohl(opt_val); |
568 | ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", | 571 | ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", |
569 | dccp_role(sk), sk, | 572 | dccp_role(sk), sk, |
570 | opt_recv->ccid3or_loss_event_rate); | 573 | opt_recv->ccid3or_loss_event_rate); |
@@ -585,8 +588,8 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
585 | dccp_role(sk), sk, len); | 588 | dccp_role(sk), sk, len); |
586 | rc = -EINVAL; | 589 | rc = -EINVAL; |
587 | } else { | 590 | } else { |
588 | opt_recv->ccid3or_receive_rate = | 591 | opt_val = get_unaligned((__be32 *)value); |
589 | ntohl(*(__be32 *)value); | 592 | opt_recv->ccid3or_receive_rate = ntohl(opt_val); |
590 | ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", | 593 | ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", |
591 | dccp_role(sk), sk, | 594 | dccp_role(sk), sk, |
592 | opt_recv->ccid3or_receive_rate); | 595 | opt_recv->ccid3or_receive_rate); |
@@ -601,8 +604,6 @@ static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
601 | { | 604 | { |
602 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 605 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); |
603 | 606 | ||
604 | hctx->ccid3hctx_s = 0; | ||
605 | hctx->ccid3hctx_rtt = 0; | ||
606 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 607 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; |
607 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); | 608 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); |
608 | 609 | ||
@@ -963,8 +964,6 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | |||
963 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); | 964 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); |
964 | hcrx->ccid3hcrx_tstamp_last_feedback = | 965 | hcrx->ccid3hcrx_tstamp_last_feedback = |
965 | hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); | 966 | hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); |
966 | hcrx->ccid3hcrx_s = 0; | ||
967 | hcrx->ccid3hcrx_rtt = 0; | ||
968 | return 0; | 967 | return 0; |
969 | } | 968 | } |
970 | 969 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 3560a2a875a0..1ce101062824 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -58,6 +58,42 @@ static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | |||
58 | dccp_send_close(sk, 0); | 58 | dccp_send_close(sk, 0); |
59 | } | 59 | } |
60 | 60 | ||
61 | static u8 dccp_reset_code_convert(const u8 code) | ||
62 | { | ||
63 | const u8 error_code[] = { | ||
64 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ | ||
65 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ | ||
66 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, | ||
67 | |||
68 | [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED, | ||
69 | [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED, | ||
70 | [DCCP_RESET_CODE_TOO_BUSY] = EUSERS, | ||
71 | [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT, | ||
72 | |||
73 | [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG, | ||
74 | [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR, | ||
75 | [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC, | ||
76 | [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ, | ||
77 | [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP, | ||
78 | }; | ||
79 | |||
80 | return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code]; | ||
81 | } | ||
82 | |||
83 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) | ||
84 | { | ||
85 | u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); | ||
86 | |||
87 | sk->sk_err = err; | ||
88 | |||
89 | /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */ | ||
90 | dccp_fin(sk, skb); | ||
91 | |||
92 | if (err && !sock_flag(sk, SOCK_DEAD)) | ||
93 | sk_wake_async(sk, 0, POLL_ERR); | ||
94 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
95 | } | ||
96 | |||
61 | static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) | 97 | static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb) |
62 | { | 98 | { |
63 | struct dccp_sock *dp = dccp_sk(sk); | 99 | struct dccp_sock *dp = dccp_sk(sk); |
@@ -191,9 +227,8 @@ static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
191 | * S.state := TIMEWAIT | 227 | * S.state := TIMEWAIT |
192 | * Set TIMEWAIT timer | 228 | * Set TIMEWAIT timer |
193 | * Drop packet and return | 229 | * Drop packet and return |
194 | */ | 230 | */ |
195 | dccp_fin(sk, skb); | 231 | dccp_rcv_reset(sk, skb); |
196 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
197 | return 0; | 232 | return 0; |
198 | case DCCP_PKT_CLOSEREQ: | 233 | case DCCP_PKT_CLOSEREQ: |
199 | dccp_rcv_closereq(sk, skb); | 234 | dccp_rcv_closereq(sk, skb); |
@@ -521,12 +556,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
521 | * Drop packet and return | 556 | * Drop packet and return |
522 | */ | 557 | */ |
523 | if (dh->dccph_type == DCCP_PKT_RESET) { | 558 | if (dh->dccph_type == DCCP_PKT_RESET) { |
524 | /* | 559 | dccp_rcv_reset(sk, skb); |
525 | * Queue the equivalent of TCP fin so that dccp_recvmsg | ||
526 | * exits the loop | ||
527 | */ | ||
528 | dccp_fin(sk, skb); | ||
529 | dccp_time_wait(sk, DCCP_TIME_WAIT, 0); | ||
530 | return 0; | 560 | return 0; |
531 | /* | 561 | /* |
532 | * Step 7: Check for unexpected packet types | 562 | * Step 7: Check for unexpected packet types |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 222549ab274a..01a6a808bdb7 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -241,8 +241,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
241 | goto out; | 241 | goto out; |
242 | 242 | ||
243 | dp = dccp_sk(sk); | 243 | dp = dccp_sk(sk); |
244 | seq = dccp_hdr_seq(skb); | 244 | seq = dccp_hdr_seq(dh); |
245 | if (sk->sk_state != DCCP_LISTEN && | 245 | if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && |
246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { | 246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { |
247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
248 | goto out; | 248 | goto out; |
@@ -795,7 +795,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
795 | 795 | ||
796 | dh = dccp_hdr(skb); | 796 | dh = dccp_hdr(skb); |
797 | 797 | ||
798 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 798 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); |
799 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; | 799 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; |
800 | 800 | ||
801 | dccp_pr_debug("%8.8s " | 801 | dccp_pr_debug("%8.8s " |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index bbadd6681b83..62428ff137dd 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -173,7 +173,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
173 | 173 | ||
174 | icmpv6_err_convert(type, code, &err); | 174 | icmpv6_err_convert(type, code, &err); |
175 | 175 | ||
176 | seq = DCCP_SKB_CB(skb)->dccpd_seq; | 176 | seq = dccp_hdr_seq(dh); |
177 | /* Might be for an request_sock */ | 177 | /* Might be for an request_sock */ |
178 | switch (sk->sk_state) { | 178 | switch (sk->sk_state) { |
179 | struct request_sock *req, **prev; | 179 | struct request_sock *req, **prev; |
@@ -787,7 +787,7 @@ static int dccp_v6_rcv(struct sk_buff *skb) | |||
787 | 787 | ||
788 | dh = dccp_hdr(skb); | 788 | dh = dccp_hdr(skb); |
789 | 789 | ||
790 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 790 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); |
791 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; | 791 | DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; |
792 | 792 | ||
793 | if (dccp_packet_without_ack(skb)) | 793 | if (dccp_packet_without_ack(skb)) |
diff --git a/net/dccp/options.c b/net/dccp/options.c index d361b5533309..d286cffe2c49 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/dccp.h> | 14 | #include <linux/dccp.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <asm/unaligned.h> | ||
17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
18 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
19 | 20 | ||
@@ -59,6 +60,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
59 | unsigned char opt, len; | 60 | unsigned char opt, len; |
60 | unsigned char *value; | 61 | unsigned char *value; |
61 | u32 elapsed_time; | 62 | u32 elapsed_time; |
63 | __be32 opt_val; | ||
62 | int rc; | 64 | int rc; |
63 | int mandatory = 0; | 65 | int mandatory = 0; |
64 | 66 | ||
@@ -145,7 +147,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
145 | if (len != 4) | 147 | if (len != 4) |
146 | goto out_invalid_option; | 148 | goto out_invalid_option; |
147 | 149 | ||
148 | opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value); | 150 | opt_val = get_unaligned((__be32 *)value); |
151 | opt_recv->dccpor_timestamp = ntohl(opt_val); | ||
149 | 152 | ||
150 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; | 153 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; |
151 | dp->dccps_timestamp_time = ktime_get_real(); | 154 | dp->dccps_timestamp_time = ktime_get_real(); |
@@ -159,7 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
159 | if (len != 4 && len != 6 && len != 8) | 162 | if (len != 4 && len != 6 && len != 8) |
160 | goto out_invalid_option; | 163 | goto out_invalid_option; |
161 | 164 | ||
162 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); | 165 | opt_val = get_unaligned((__be32 *)value); |
166 | opt_recv->dccpor_timestamp_echo = ntohl(opt_val); | ||
163 | 167 | ||
164 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " | 168 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " |
165 | "ackno=%llu", dccp_role(sk), | 169 | "ackno=%llu", dccp_role(sk), |
@@ -168,16 +172,20 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
168 | (unsigned long long) | 172 | (unsigned long long) |
169 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 173 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
170 | 174 | ||
175 | value += 4; | ||
171 | 176 | ||
172 | if (len == 4) { | 177 | if (len == 4) { /* no elapsed time included */ |
173 | dccp_pr_debug_cat("\n"); | 178 | dccp_pr_debug_cat("\n"); |
174 | break; | 179 | break; |
175 | } | 180 | } |
176 | 181 | ||
177 | if (len == 6) | 182 | if (len == 6) { /* 2-byte elapsed time */ |
178 | elapsed_time = ntohs(*(__be16 *)(value + 4)); | 183 | __be16 opt_val2 = get_unaligned((__be16 *)value); |
179 | else | 184 | elapsed_time = ntohs(opt_val2); |
180 | elapsed_time = ntohl(*(__be32 *)(value + 4)); | 185 | } else { /* 4-byte elapsed time */ |
186 | opt_val = get_unaligned((__be32 *)value); | ||
187 | elapsed_time = ntohl(opt_val); | ||
188 | } | ||
181 | 189 | ||
182 | dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); | 190 | dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); |
183 | 191 | ||
@@ -192,10 +200,13 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
192 | if (pkt_type == DCCP_PKT_DATA) | 200 | if (pkt_type == DCCP_PKT_DATA) |
193 | continue; | 201 | continue; |
194 | 202 | ||
195 | if (len == 2) | 203 | if (len == 2) { |
196 | elapsed_time = ntohs(*(__be16 *)value); | 204 | __be16 opt_val2 = get_unaligned((__be16 *)value); |
197 | else | 205 | elapsed_time = ntohs(opt_val2); |
198 | elapsed_time = ntohl(*(__be32 *)value); | 206 | } else { |
207 | opt_val = get_unaligned((__be32 *)value); | ||
208 | elapsed_time = ntohl(opt_val); | ||
209 | } | ||
199 | 210 | ||
200 | if (elapsed_time > opt_recv->dccpor_elapsed_time) | 211 | if (elapsed_time > opt_recv->dccpor_elapsed_time) |
201 | opt_recv->dccpor_elapsed_time = elapsed_time; | 212 | opt_recv->dccpor_elapsed_time = elapsed_time; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index cc9bf1cb2646..d84973928033 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/sock.h> | 26 | #include <net/sock.h> |
27 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
28 | 28 | ||
29 | #include <asm/ioctls.h> | ||
29 | #include <asm/semaphore.h> | 30 | #include <asm/semaphore.h> |
30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
31 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
@@ -378,8 +379,36 @@ EXPORT_SYMBOL_GPL(dccp_poll); | |||
378 | 379 | ||
379 | int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) | 380 | int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
380 | { | 381 | { |
381 | dccp_pr_debug("entry\n"); | 382 | int rc = -ENOTCONN; |
382 | return -ENOIOCTLCMD; | 383 | |
384 | lock_sock(sk); | ||
385 | |||
386 | if (sk->sk_state == DCCP_LISTEN) | ||
387 | goto out; | ||
388 | |||
389 | switch (cmd) { | ||
390 | case SIOCINQ: { | ||
391 | struct sk_buff *skb; | ||
392 | unsigned long amount = 0; | ||
393 | |||
394 | skb = skb_peek(&sk->sk_receive_queue); | ||
395 | if (skb != NULL) { | ||
396 | /* | ||
397 | * We will only return the amount of this packet since | ||
398 | * that is all that will be read. | ||
399 | */ | ||
400 | amount = skb->len; | ||
401 | } | ||
402 | rc = put_user(amount, (int __user *)arg); | ||
403 | } | ||
404 | break; | ||
405 | default: | ||
406 | rc = -ENOIOCTLCMD; | ||
407 | break; | ||
408 | } | ||
409 | out: | ||
410 | release_sock(sk); | ||
411 | return rc; | ||
383 | } | 412 | } |
384 | 413 | ||
385 | EXPORT_SYMBOL_GPL(dccp_ioctl); | 414 | EXPORT_SYMBOL_GPL(dccp_ioctl); |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index ed8a3d49487d..6b2e454ae313 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -298,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu) | |||
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
300 | 300 | ||
301 | static int eth_validate_addr(struct net_device *dev) | ||
302 | { | ||
303 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
304 | return -EINVAL; | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
301 | const struct header_ops eth_header_ops ____cacheline_aligned = { | 309 | const struct header_ops eth_header_ops ____cacheline_aligned = { |
302 | .create = eth_header, | 310 | .create = eth_header, |
303 | .parse = eth_header_parse, | 311 | .parse = eth_header_parse, |
@@ -317,6 +325,7 @@ void ether_setup(struct net_device *dev) | |||
317 | 325 | ||
318 | dev->change_mtu = eth_change_mtu; | 326 | dev->change_mtu = eth_change_mtu; |
319 | dev->set_mac_address = eth_mac_addr; | 327 | dev->set_mac_address = eth_mac_addr; |
328 | dev->validate_addr = eth_validate_addr; | ||
320 | 329 | ||
321 | dev->type = ARPHRD_ETHER; | 330 | dev->type = ARPHRD_ETHER; |
322 | dev->hard_header_len = ETH_HLEN; | 331 | dev->hard_header_len = ETH_HLEN; |
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 811777682e2b..4cce3534e408 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <net/ieee80211.h> | 25 | #include <net/ieee80211.h> |
26 | 26 | ||
27 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
28 | #include <asm/scatterlist.h> | 28 | #include <linux/scatterlist.h> |
29 | #include <linux/crc32.h> | 29 | #include <linux/crc32.h> |
30 | 30 | ||
31 | MODULE_AUTHOR("Jouni Malinen"); | 31 | MODULE_AUTHOR("Jouni Malinen"); |
@@ -537,13 +537,8 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, | |||
537 | return -1; | 537 | return -1; |
538 | } | 538 | } |
539 | sg_init_table(sg, 2); | 539 | sg_init_table(sg, 2); |
540 | sg_set_page(&sg[0], virt_to_page(hdr)); | 540 | sg_set_buf(&sg[0], hdr, 16); |
541 | sg[0].offset = offset_in_page(hdr); | 541 | sg_set_buf(&sg[1], data, data_len); |
542 | sg[0].length = 16; | ||
543 | |||
544 | sg_set_page(&sg[1], virt_to_page(data)); | ||
545 | sg[1].offset = offset_in_page(data); | ||
546 | sg[1].length = data_len; | ||
547 | 542 | ||
548 | if (crypto_hash_setkey(tfm_michael, key, 8)) | 543 | if (crypto_hash_setkey(tfm_michael, key, 8)) |
549 | return -1; | 544 | return -1; |
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index 9693429489ed..866fc04c44f9 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <net/ieee80211.h> | 22 | #include <net/ieee80211.h> |
23 | 23 | ||
24 | #include <linux/crypto.h> | 24 | #include <linux/crypto.h> |
25 | #include <asm/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/crc32.h> | 26 | #include <linux/crc32.h> |
27 | 27 | ||
28 | MODULE_AUTHOR("Jouni Malinen"); | 28 | MODULE_AUTHOR("Jouni Malinen"); |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 805a78e6ed55..f18e88bc86ec 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -504,22 +504,16 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | |||
504 | INIT_RCU_HEAD(&doi_def->rcu); | 504 | INIT_RCU_HEAD(&doi_def->rcu); |
505 | INIT_LIST_HEAD(&doi_def->dom_list); | 505 | INIT_LIST_HEAD(&doi_def->dom_list); |
506 | 506 | ||
507 | rcu_read_lock(); | ||
508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | ||
509 | goto doi_add_failure_rlock; | ||
510 | spin_lock(&cipso_v4_doi_list_lock); | 507 | spin_lock(&cipso_v4_doi_list_lock); |
511 | if (cipso_v4_doi_search(doi_def->doi) != NULL) | 508 | if (cipso_v4_doi_search(doi_def->doi) != NULL) |
512 | goto doi_add_failure_slock; | 509 | goto doi_add_failure; |
513 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); | 510 | list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); |
514 | spin_unlock(&cipso_v4_doi_list_lock); | 511 | spin_unlock(&cipso_v4_doi_list_lock); |
515 | rcu_read_unlock(); | ||
516 | 512 | ||
517 | return 0; | 513 | return 0; |
518 | 514 | ||
519 | doi_add_failure_slock: | 515 | doi_add_failure: |
520 | spin_unlock(&cipso_v4_doi_list_lock); | 516 | spin_unlock(&cipso_v4_doi_list_lock); |
521 | doi_add_failure_rlock: | ||
522 | rcu_read_unlock(); | ||
523 | return -EEXIST; | 517 | return -EEXIST; |
524 | } | 518 | } |
525 | 519 | ||
@@ -543,29 +537,23 @@ int cipso_v4_doi_remove(u32 doi, | |||
543 | struct cipso_v4_doi *doi_def; | 537 | struct cipso_v4_doi *doi_def; |
544 | struct cipso_v4_domhsh_entry *dom_iter; | 538 | struct cipso_v4_domhsh_entry *dom_iter; |
545 | 539 | ||
546 | rcu_read_lock(); | 540 | spin_lock(&cipso_v4_doi_list_lock); |
547 | if (cipso_v4_doi_search(doi) != NULL) { | 541 | doi_def = cipso_v4_doi_search(doi); |
548 | spin_lock(&cipso_v4_doi_list_lock); | 542 | if (doi_def != NULL) { |
549 | doi_def = cipso_v4_doi_search(doi); | ||
550 | if (doi_def == NULL) { | ||
551 | spin_unlock(&cipso_v4_doi_list_lock); | ||
552 | rcu_read_unlock(); | ||
553 | return -ENOENT; | ||
554 | } | ||
555 | doi_def->valid = 0; | 543 | doi_def->valid = 0; |
556 | list_del_rcu(&doi_def->list); | 544 | list_del_rcu(&doi_def->list); |
557 | spin_unlock(&cipso_v4_doi_list_lock); | 545 | spin_unlock(&cipso_v4_doi_list_lock); |
546 | rcu_read_lock(); | ||
558 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) | 547 | list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list) |
559 | if (dom_iter->valid) | 548 | if (dom_iter->valid) |
560 | netlbl_domhsh_remove(dom_iter->domain, | 549 | netlbl_domhsh_remove(dom_iter->domain, |
561 | audit_info); | 550 | audit_info); |
562 | cipso_v4_cache_invalidate(); | ||
563 | rcu_read_unlock(); | 551 | rcu_read_unlock(); |
564 | 552 | cipso_v4_cache_invalidate(); | |
565 | call_rcu(&doi_def->rcu, callback); | 553 | call_rcu(&doi_def->rcu, callback); |
566 | return 0; | 554 | return 0; |
567 | } | 555 | } |
568 | rcu_read_unlock(); | 556 | spin_unlock(&cipso_v4_doi_list_lock); |
569 | 557 | ||
570 | return -ENOENT; | 558 | return -ENOENT; |
571 | } | 559 | } |
@@ -653,22 +641,19 @@ int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, const char *domain) | |||
653 | new_dom->valid = 1; | 641 | new_dom->valid = 1; |
654 | INIT_RCU_HEAD(&new_dom->rcu); | 642 | INIT_RCU_HEAD(&new_dom->rcu); |
655 | 643 | ||
656 | rcu_read_lock(); | ||
657 | spin_lock(&cipso_v4_doi_list_lock); | 644 | spin_lock(&cipso_v4_doi_list_lock); |
658 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 645 | list_for_each_entry(iter, &doi_def->dom_list, list) |
659 | if (iter->valid && | 646 | if (iter->valid && |
660 | ((domain != NULL && iter->domain != NULL && | 647 | ((domain != NULL && iter->domain != NULL && |
661 | strcmp(iter->domain, domain) == 0) || | 648 | strcmp(iter->domain, domain) == 0) || |
662 | (domain == NULL && iter->domain == NULL))) { | 649 | (domain == NULL && iter->domain == NULL))) { |
663 | spin_unlock(&cipso_v4_doi_list_lock); | 650 | spin_unlock(&cipso_v4_doi_list_lock); |
664 | rcu_read_unlock(); | ||
665 | kfree(new_dom->domain); | 651 | kfree(new_dom->domain); |
666 | kfree(new_dom); | 652 | kfree(new_dom); |
667 | return -EEXIST; | 653 | return -EEXIST; |
668 | } | 654 | } |
669 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); | 655 | list_add_tail_rcu(&new_dom->list, &doi_def->dom_list); |
670 | spin_unlock(&cipso_v4_doi_list_lock); | 656 | spin_unlock(&cipso_v4_doi_list_lock); |
671 | rcu_read_unlock(); | ||
672 | 657 | ||
673 | return 0; | 658 | return 0; |
674 | } | 659 | } |
@@ -689,9 +674,8 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
689 | { | 674 | { |
690 | struct cipso_v4_domhsh_entry *iter; | 675 | struct cipso_v4_domhsh_entry *iter; |
691 | 676 | ||
692 | rcu_read_lock(); | ||
693 | spin_lock(&cipso_v4_doi_list_lock); | 677 | spin_lock(&cipso_v4_doi_list_lock); |
694 | list_for_each_entry_rcu(iter, &doi_def->dom_list, list) | 678 | list_for_each_entry(iter, &doi_def->dom_list, list) |
695 | if (iter->valid && | 679 | if (iter->valid && |
696 | ((domain != NULL && iter->domain != NULL && | 680 | ((domain != NULL && iter->domain != NULL && |
697 | strcmp(iter->domain, domain) == 0) || | 681 | strcmp(iter->domain, domain) == 0) || |
@@ -699,13 +683,10 @@ int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, | |||
699 | iter->valid = 0; | 683 | iter->valid = 0; |
700 | list_del_rcu(&iter->list); | 684 | list_del_rcu(&iter->list); |
701 | spin_unlock(&cipso_v4_doi_list_lock); | 685 | spin_unlock(&cipso_v4_doi_list_lock); |
702 | rcu_read_unlock(); | ||
703 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); | 686 | call_rcu(&iter->rcu, cipso_v4_doi_domhsh_free); |
704 | |||
705 | return 0; | 687 | return 0; |
706 | } | 688 | } |
707 | spin_unlock(&cipso_v4_doi_list_lock); | 689 | spin_unlock(&cipso_v4_doi_list_lock); |
708 | rcu_read_unlock(); | ||
709 | 690 | ||
710 | return -ENOENT; | 691 | return -ENOENT; |
711 | } | 692 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 6b1a31a74cf2..cad4278025ad 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include <net/ip.h> | 3 | #include <net/ip.h> |
4 | #include <net/xfrm.h> | 4 | #include <net/xfrm.h> |
5 | #include <net/esp.h> | 5 | #include <net/esp.h> |
6 | #include <asm/scatterlist.h> | 6 | #include <linux/scatterlist.h> |
7 | #include <linux/crypto.h> | 7 | #include <linux/crypto.h> |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/pfkeyv2.h> | 9 | #include <linux/pfkeyv2.h> |
@@ -110,7 +110,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
110 | if (!sg) | 110 | if (!sg) |
111 | goto unlock; | 111 | goto unlock; |
112 | } | 112 | } |
113 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); | 113 | sg_init_table(sg, nfrags); |
114 | sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + | ||
115 | esp->conf.ivlen - | ||
116 | skb->data, clen)); | ||
114 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 117 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); |
115 | if (unlikely(sg != &esp->sgbuf[0])) | 118 | if (unlikely(sg != &esp->sgbuf[0])) |
116 | kfree(sg); | 119 | kfree(sg); |
@@ -201,7 +204,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
201 | if (!sg) | 204 | if (!sg) |
202 | goto out; | 205 | goto out; |
203 | } | 206 | } |
204 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); | 207 | sg_init_table(sg, nfrags); |
208 | sg_mark_end(sg, skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, | ||
209 | elen)); | ||
205 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | 210 | err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); |
206 | if (unlikely(sg != &esp->sgbuf[0])) | 211 | if (unlikely(sg != &esp->sgbuf[0])) |
207 | kfree(sg); | 212 | kfree(sg); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 78b514ba1414..60123905dbbf 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -128,13 +128,14 @@ struct net_device * ip_dev_find(__be32 addr) | |||
128 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; | 128 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; |
129 | struct fib_result res; | 129 | struct fib_result res; |
130 | struct net_device *dev = NULL; | 130 | struct net_device *dev = NULL; |
131 | struct fib_table *local_table; | ||
131 | 132 | ||
132 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 133 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
133 | res.r = NULL; | 134 | res.r = NULL; |
134 | #endif | 135 | #endif |
135 | 136 | ||
136 | if (!ip_fib_local_table || | 137 | local_table = fib_get_table(RT_TABLE_LOCAL); |
137 | ip_fib_local_table->tb_lookup(ip_fib_local_table, &fl, &res)) | 138 | if (!local_table || local_table->tb_lookup(local_table, &fl, &res)) |
138 | return NULL; | 139 | return NULL; |
139 | if (res.type != RTN_LOCAL) | 140 | if (res.type != RTN_LOCAL) |
140 | goto out; | 141 | goto out; |
@@ -152,6 +153,7 @@ unsigned inet_addr_type(__be32 addr) | |||
152 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; | 153 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } }; |
153 | struct fib_result res; | 154 | struct fib_result res; |
154 | unsigned ret = RTN_BROADCAST; | 155 | unsigned ret = RTN_BROADCAST; |
156 | struct fib_table *local_table; | ||
155 | 157 | ||
156 | if (ZERONET(addr) || BADCLASS(addr)) | 158 | if (ZERONET(addr) || BADCLASS(addr)) |
157 | return RTN_BROADCAST; | 159 | return RTN_BROADCAST; |
@@ -162,10 +164,10 @@ unsigned inet_addr_type(__be32 addr) | |||
162 | res.r = NULL; | 164 | res.r = NULL; |
163 | #endif | 165 | #endif |
164 | 166 | ||
165 | if (ip_fib_local_table) { | 167 | local_table = fib_get_table(RT_TABLE_LOCAL); |
168 | if (local_table) { | ||
166 | ret = RTN_UNICAST; | 169 | ret = RTN_UNICAST; |
167 | if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, | 170 | if (!local_table->tb_lookup(local_table, &fl, &res)) { |
168 | &fl, &res)) { | ||
169 | ret = res.type; | 171 | ret = res.type; |
170 | fib_res_put(&res); | 172 | fib_res_put(&res); |
171 | } | 173 | } |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 272c69e106e9..233de0634298 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -1104,5 +1104,4 @@ void __init icmp_init(struct net_proto_family *ops) | |||
1104 | EXPORT_SYMBOL(icmp_err_convert); | 1104 | EXPORT_SYMBOL(icmp_err_convert); |
1105 | EXPORT_SYMBOL(icmp_send); | 1105 | EXPORT_SYMBOL(icmp_send); |
1106 | EXPORT_SYMBOL(icmp_statistics); | 1106 | EXPORT_SYMBOL(icmp_statistics); |
1107 | EXPORT_SYMBOL(icmpmsg_statistics); | ||
1108 | EXPORT_SYMBOL(xrlim_allow); | 1107 | EXPORT_SYMBOL(xrlim_allow); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f151900efaf9..02b02a8d681c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -674,7 +674,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
674 | struct rtable *rt; /* Route to the other host */ | 674 | struct rtable *rt; /* Route to the other host */ |
675 | struct net_device *tdev; /* Device to other host */ | 675 | struct net_device *tdev; /* Device to other host */ |
676 | struct iphdr *iph; /* Our new IP header */ | 676 | struct iphdr *iph; /* Our new IP header */ |
677 | int max_headroom; /* The extra header space needed */ | 677 | unsigned int max_headroom; /* The extra header space needed */ |
678 | int gre_hlen; | 678 | int gre_hlen; |
679 | __be32 dst; | 679 | __be32 dst; |
680 | int mtu; | 680 | int mtu; |
@@ -1033,7 +1033,6 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
1033 | return 0; | 1033 | return 0; |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
1037 | /* Nice toy. Unfortunately, useless in real life :-) | 1036 | /* Nice toy. Unfortunately, useless in real life :-) |
1038 | It allows to construct virtual multiprotocol broadcast "LAN" | 1037 | It allows to construct virtual multiprotocol broadcast "LAN" |
1039 | over the Internet, provided multicast routing is tuned. | 1038 | over the Internet, provided multicast routing is tuned. |
@@ -1092,10 +1091,19 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
1092 | return -t->hlen; | 1091 | return -t->hlen; |
1093 | } | 1092 | } |
1094 | 1093 | ||
1094 | static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) | ||
1095 | { | ||
1096 | struct iphdr *iph = (struct iphdr*) skb_mac_header(skb); | ||
1097 | memcpy(haddr, &iph->saddr, 4); | ||
1098 | return 4; | ||
1099 | } | ||
1100 | |||
1095 | static const struct header_ops ipgre_header_ops = { | 1101 | static const struct header_ops ipgre_header_ops = { |
1096 | .create = ipgre_header, | 1102 | .create = ipgre_header, |
1103 | .parse = ipgre_header_parse, | ||
1097 | }; | 1104 | }; |
1098 | 1105 | ||
1106 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
1099 | static int ipgre_open(struct net_device *dev) | 1107 | static int ipgre_open(struct net_device *dev) |
1100 | { | 1108 | { |
1101 | struct ip_tunnel *t = netdev_priv(dev); | 1109 | struct ip_tunnel *t = netdev_priv(dev); |
@@ -1197,6 +1205,8 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
1197 | dev->stop = ipgre_close; | 1205 | dev->stop = ipgre_close; |
1198 | } | 1206 | } |
1199 | #endif | 1207 | #endif |
1208 | } else { | ||
1209 | dev->header_ops = &ipgre_header_ops; | ||
1200 | } | 1210 | } |
1201 | 1211 | ||
1202 | if (!tdev && tunnel->parms.link) | 1212 | if (!tdev && tunnel->parms.link) |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index f508835ba713..e5f7dc2de303 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -161,7 +161,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
161 | struct dst_entry *dst = skb->dst; | 161 | struct dst_entry *dst = skb->dst; |
162 | struct rtable *rt = (struct rtable *)dst; | 162 | struct rtable *rt = (struct rtable *)dst; |
163 | struct net_device *dev = dst->dev; | 163 | struct net_device *dev = dst->dev; |
164 | int hh_len = LL_RESERVED_SPACE(dev); | 164 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
165 | 165 | ||
166 | if (rt->rt_type == RTN_MULTICAST) | 166 | if (rt->rt_type == RTN_MULTICAST) |
167 | IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 167 | IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 5cd5bbe1379a..8c2b2b0741da 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -515,7 +515,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
515 | struct net_device *tdev; /* Device to other host */ | 515 | struct net_device *tdev; /* Device to other host */ |
516 | struct iphdr *old_iph = ip_hdr(skb); | 516 | struct iphdr *old_iph = ip_hdr(skb); |
517 | struct iphdr *iph; /* Our new IP header */ | 517 | struct iphdr *iph; /* Our new IP header */ |
518 | int max_headroom; /* The extra header space needed */ | 518 | unsigned int max_headroom; /* The extra header space needed */ |
519 | __be32 dst = tiph->daddr; | 519 | __be32 dst = tiph->daddr; |
520 | int mtu; | 520 | int mtu; |
521 | 521 | ||
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index d0a92dec1050..7c074e386c17 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -325,7 +325,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
325 | __be16 df = old_iph->frag_off; | 325 | __be16 df = old_iph->frag_off; |
326 | sk_buff_data_t old_transport_header = skb->transport_header; | 326 | sk_buff_data_t old_transport_header = skb->transport_header; |
327 | struct iphdr *iph; /* Our new IP header */ | 327 | struct iphdr *iph; /* Our new IP header */ |
328 | int max_headroom; /* The extra header space needed */ | 328 | unsigned int max_headroom; /* The extra header space needed */ |
329 | int mtu; | 329 | int mtu; |
330 | 330 | ||
331 | EnterFunction(10); | 331 | EnterFunction(10); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index fd16cb8f8abe..9be0daa9c0ec 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -121,14 +121,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { | |||
121 | SNMP_MIB_SENTINEL | 121 | SNMP_MIB_SENTINEL |
122 | }; | 122 | }; |
123 | 123 | ||
124 | static const struct snmp_mib snmp4_icmp_list[] = { | ||
125 | SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), | ||
126 | SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), | ||
127 | SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS), | ||
128 | SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS), | ||
129 | SNMP_MIB_SENTINEL | ||
130 | }; | ||
131 | |||
132 | static struct { | 124 | static struct { |
133 | char *name; | 125 | char *name; |
134 | int index; | 126 | int index; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9288220b73a8..69d8c38ccd39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -103,7 +103,7 @@ int sysctl_tcp_abc __read_mostly; | |||
103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 103 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 104 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 105 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ | 106 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 107 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
108 | 108 | ||
109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 109 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
@@ -866,7 +866,7 @@ static void tcp_disable_fack(struct tcp_sock *tp) | |||
866 | tp->rx_opt.sack_ok &= ~2; | 866 | tp->rx_opt.sack_ok &= ~2; |
867 | } | 867 | } |
868 | 868 | ||
869 | /* Take a notice that peer is sending DSACKs */ | 869 | /* Take a notice that peer is sending D-SACKs */ |
870 | static void tcp_dsack_seen(struct tcp_sock *tp) | 870 | static void tcp_dsack_seen(struct tcp_sock *tp) |
871 | { | 871 | { |
872 | tp->rx_opt.sack_ok |= 4; | 872 | tp->rx_opt.sack_ok |= 4; |
@@ -1058,7 +1058,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
1058 | * | 1058 | * |
1059 | * With D-SACK the lower bound is extended to cover sequence space below | 1059 | * With D-SACK the lower bound is extended to cover sequence space below |
1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet | 1060 | * SND.UNA down to undo_marker, which is the last point of interest. Yet |
1061 | * again, DSACK block must not to go across snd_una (for the same reason as | 1061 | * again, D-SACK block must not to go across snd_una (for the same reason as |
1062 | * for the normal SACK blocks, explained above). But there all simplicity | 1062 | * for the normal SACK blocks, explained above). But there all simplicity |
1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside | 1063 | * ends, TCP might receive valid D-SACKs below that. As long as they reside |
1064 | * fully below undo_marker they do not affect behavior in anyway and can | 1064 | * fully below undo_marker they do not affect behavior in anyway and can |
@@ -1080,7 +1080,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, | |||
1080 | if (!before(start_seq, tp->snd_nxt)) | 1080 | if (!before(start_seq, tp->snd_nxt)) |
1081 | return 0; | 1081 | return 0; |
1082 | 1082 | ||
1083 | /* In outstanding window? ...This is valid exit for DSACKs too. | 1083 | /* In outstanding window? ...This is valid exit for D-SACKs too. |
1084 | * start_seq == snd_una is non-sensical (see comments above) | 1084 | * start_seq == snd_una is non-sensical (see comments above) |
1085 | */ | 1085 | */ |
1086 | if (after(start_seq, tp->snd_una)) | 1086 | if (after(start_seq, tp->snd_una)) |
@@ -1204,8 +1204,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1204 | * which may fail and creates some hassle (caller must handle error case | 1204 | * which may fail and creates some hassle (caller must handle error case |
1205 | * returns). | 1205 | * returns). |
1206 | */ | 1206 | */ |
1207 | int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | 1207 | static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, |
1208 | u32 start_seq, u32 end_seq) | 1208 | u32 start_seq, u32 end_seq) |
1209 | { | 1209 | { |
1210 | int in_sack, err; | 1210 | int in_sack, err; |
1211 | unsigned int pkt_len; | 1211 | unsigned int pkt_len; |
@@ -1248,6 +1248,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1248 | int cached_fack_count; | 1248 | int cached_fack_count; |
1249 | int i; | 1249 | int i; |
1250 | int first_sack_index; | 1250 | int first_sack_index; |
1251 | int force_one_sack; | ||
1251 | 1252 | ||
1252 | if (!tp->sacked_out) { | 1253 | if (!tp->sacked_out) { |
1253 | if (WARN_ON(tp->fackets_out)) | 1254 | if (WARN_ON(tp->fackets_out)) |
@@ -1272,18 +1273,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1272 | * if the only SACK change is the increase of the end_seq of | 1273 | * if the only SACK change is the increase of the end_seq of |
1273 | * the first block then only apply that SACK block | 1274 | * the first block then only apply that SACK block |
1274 | * and use retrans queue hinting otherwise slowpath */ | 1275 | * and use retrans queue hinting otherwise slowpath */ |
1275 | flag = 1; | 1276 | force_one_sack = 1; |
1276 | for (i = 0; i < num_sacks; i++) { | 1277 | for (i = 0; i < num_sacks; i++) { |
1277 | __be32 start_seq = sp[i].start_seq; | 1278 | __be32 start_seq = sp[i].start_seq; |
1278 | __be32 end_seq = sp[i].end_seq; | 1279 | __be32 end_seq = sp[i].end_seq; |
1279 | 1280 | ||
1280 | if (i == 0) { | 1281 | if (i == 0) { |
1281 | if (tp->recv_sack_cache[i].start_seq != start_seq) | 1282 | if (tp->recv_sack_cache[i].start_seq != start_seq) |
1282 | flag = 0; | 1283 | force_one_sack = 0; |
1283 | } else { | 1284 | } else { |
1284 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || | 1285 | if ((tp->recv_sack_cache[i].start_seq != start_seq) || |
1285 | (tp->recv_sack_cache[i].end_seq != end_seq)) | 1286 | (tp->recv_sack_cache[i].end_seq != end_seq)) |
1286 | flag = 0; | 1287 | force_one_sack = 0; |
1287 | } | 1288 | } |
1288 | tp->recv_sack_cache[i].start_seq = start_seq; | 1289 | tp->recv_sack_cache[i].start_seq = start_seq; |
1289 | tp->recv_sack_cache[i].end_seq = end_seq; | 1290 | tp->recv_sack_cache[i].end_seq = end_seq; |
@@ -1295,7 +1296,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1295 | } | 1296 | } |
1296 | 1297 | ||
1297 | first_sack_index = 0; | 1298 | first_sack_index = 0; |
1298 | if (flag) | 1299 | if (force_one_sack) |
1299 | num_sacks = 1; | 1300 | num_sacks = 1; |
1300 | else { | 1301 | else { |
1301 | int j; | 1302 | int j; |
@@ -1321,9 +1322,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1321 | } | 1322 | } |
1322 | } | 1323 | } |
1323 | 1324 | ||
1324 | /* clear flag as used for different purpose in following code */ | ||
1325 | flag = 0; | ||
1326 | |||
1327 | /* Use SACK fastpath hint if valid */ | 1325 | /* Use SACK fastpath hint if valid */ |
1328 | cached_skb = tp->fastpath_skb_hint; | 1326 | cached_skb = tp->fastpath_skb_hint; |
1329 | cached_fack_count = tp->fastpath_cnt_hint; | 1327 | cached_fack_count = tp->fastpath_cnt_hint; |
@@ -1615,7 +1613,7 @@ void tcp_enter_frto(struct sock *sk) | |||
1615 | !icsk->icsk_retransmits)) { | 1613 | !icsk->icsk_retransmits)) { |
1616 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1614 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
1617 | /* Our state is too optimistic in ssthresh() call because cwnd | 1615 | /* Our state is too optimistic in ssthresh() call because cwnd |
1618 | * is not reduced until tcp_enter_frto_loss() when previous FRTO | 1616 | * is not reduced until tcp_enter_frto_loss() when previous F-RTO |
1619 | * recovery has not yet completed. Pattern would be this: RTO, | 1617 | * recovery has not yet completed. Pattern would be this: RTO, |
1620 | * Cumulative ACK, RTO (2xRTO for the same segment does not end | 1618 | * Cumulative ACK, RTO (2xRTO for the same segment does not end |
1621 | * up here twice). | 1619 | * up here twice). |
@@ -1801,7 +1799,7 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
1801 | tcp_set_ca_state(sk, TCP_CA_Loss); | 1799 | tcp_set_ca_state(sk, TCP_CA_Loss); |
1802 | tp->high_seq = tp->snd_nxt; | 1800 | tp->high_seq = tp->snd_nxt; |
1803 | TCP_ECN_queue_cwr(tp); | 1801 | TCP_ECN_queue_cwr(tp); |
1804 | /* Abort FRTO algorithm if one is in progress */ | 1802 | /* Abort F-RTO algorithm if one is in progress */ |
1805 | tp->frto_counter = 0; | 1803 | tp->frto_counter = 0; |
1806 | } | 1804 | } |
1807 | 1805 | ||
@@ -1946,7 +1944,7 @@ static int tcp_time_to_recover(struct sock *sk) | |||
1946 | struct tcp_sock *tp = tcp_sk(sk); | 1944 | struct tcp_sock *tp = tcp_sk(sk); |
1947 | __u32 packets_out; | 1945 | __u32 packets_out; |
1948 | 1946 | ||
1949 | /* Do not perform any recovery during FRTO algorithm */ | 1947 | /* Do not perform any recovery during F-RTO algorithm */ |
1950 | if (tp->frto_counter) | 1948 | if (tp->frto_counter) |
1951 | return 0; | 1949 | return 0; |
1952 | 1950 | ||
@@ -2962,7 +2960,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
2962 | } | 2960 | } |
2963 | 2961 | ||
2964 | if (tp->frto_counter == 1) { | 2962 | if (tp->frto_counter == 1) { |
2965 | /* Sending of the next skb must be allowed or no FRTO */ | 2963 | /* Sending of the next skb must be allowed or no F-RTO */ |
2966 | if (!tcp_send_head(sk) || | 2964 | if (!tcp_send_head(sk) || |
2967 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, | 2965 | after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, |
2968 | tp->snd_una + tp->snd_wnd)) { | 2966 | tp->snd_una + tp->snd_wnd)) { |
@@ -3909,7 +3907,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
3909 | 3907 | ||
3910 | while (before(start, end)) { | 3908 | while (before(start, end)) { |
3911 | struct sk_buff *nskb; | 3909 | struct sk_buff *nskb; |
3912 | int header = skb_headroom(skb); | 3910 | unsigned int header = skb_headroom(skb); |
3913 | int copy = SKB_MAX_ORDER(header, 0); | 3911 | int copy = SKB_MAX_ORDER(header, 0); |
3914 | 3912 | ||
3915 | /* Too big header? This can happen with IPv6. */ | 3913 | /* Too big header? This can happen with IPv6. */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 38cf73a56731..ad759f1c3777 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1055,6 +1055,9 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1055 | bp->pad = 0; | 1055 | bp->pad = 0; |
1056 | bp->protocol = protocol; | 1056 | bp->protocol = protocol; |
1057 | bp->len = htons(tcplen); | 1057 | bp->len = htons(tcplen); |
1058 | |||
1059 | sg_init_table(sg, 4); | ||
1060 | |||
1058 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 1061 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); |
1059 | nbytes += sizeof(*bp); | 1062 | nbytes += sizeof(*bp); |
1060 | 1063 | ||
@@ -1080,6 +1083,8 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1080 | sg_set_buf(&sg[block++], key->key, key->keylen); | 1083 | sg_set_buf(&sg[block++], key->key, key->keylen); |
1081 | nbytes += key->keylen; | 1084 | nbytes += key->keylen; |
1082 | 1085 | ||
1086 | sg_mark_end(sg, block); | ||
1087 | |||
1083 | /* Now store the Hash into the packet */ | 1088 | /* Now store the Hash into the packet */ |
1084 | err = crypto_hash_init(desc); | 1089 | err = crypto_hash_init(desc); |
1085 | if (err) | 1090 | if (err) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 35d2b0e9e10b..4bc25b46f33f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1152,7 +1152,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | |||
1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); | 1152 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); |
1153 | 1153 | ||
1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, | 1154 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, |
1155 | skb->dev->ifindex, udptable ); | 1155 | inet_iif(skb), udptable); |
1156 | 1156 | ||
1157 | if (sk != NULL) { | 1157 | if (sk != NULL) { |
1158 | int ret = udp_queue_rcv_skb(sk, skb); | 1158 | int ret = udp_queue_rcv_skb(sk, skb); |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 72a659806cad..ab17b5e62355 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <net/ip.h> | 29 | #include <net/ip.h> |
30 | #include <net/xfrm.h> | 30 | #include <net/xfrm.h> |
31 | #include <net/esp.h> | 31 | #include <net/esp.h> |
32 | #include <asm/scatterlist.h> | 32 | #include <linux/scatterlist.h> |
33 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
34 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
35 | #include <linux/pfkeyv2.h> | 35 | #include <linux/pfkeyv2.h> |
@@ -109,7 +109,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
109 | if (!sg) | 109 | if (!sg) |
110 | goto unlock; | 110 | goto unlock; |
111 | } | 111 | } |
112 | skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); | 112 | sg_init_table(sg, nfrags); |
113 | sg_mark_end(sg, skb_to_sgvec(skb, sg, esph->enc_data + | ||
114 | esp->conf.ivlen - | ||
115 | skb->data, clen)); | ||
113 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); | 116 | err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); |
114 | if (unlikely(sg != &esp->sgbuf[0])) | 117 | if (unlikely(sg != &esp->sgbuf[0])) |
115 | kfree(sg); | 118 | kfree(sg); |
@@ -205,7 +208,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
205 | goto out; | 208 | goto out; |
206 | } | 209 | } |
207 | } | 210 | } |
208 | skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); | 211 | sg_init_table(sg, nfrags); |
212 | sg_mark_end(sg, skb_to_sgvec(skb, sg, | ||
213 | sizeof(*esph) + esp->conf.ivlen, | ||
214 | elen)); | ||
209 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); | 215 | ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); |
210 | if (unlikely(sg != &esp->sgbuf[0])) | 216 | if (unlikely(sg != &esp->sgbuf[0])) |
211 | kfree(sg); | 217 | kfree(sg); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 13565dfb1b45..653fc0a8235b 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -171,7 +171,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
171 | u32 mtu; | 171 | u32 mtu; |
172 | 172 | ||
173 | if (opt) { | 173 | if (opt) { |
174 | int head_room; | 174 | unsigned int head_room; |
175 | 175 | ||
176 | /* First: exthdrs may take lots of space (~8K for now) | 176 | /* First: exthdrs may take lots of space (~8K for now) |
177 | MAX_HEADER is not enough. | 177 | MAX_HEADER is not enough. |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2320cc27ff9e..5383b33db8ca 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -838,7 +838,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, | |||
838 | struct dst_entry *dst; | 838 | struct dst_entry *dst; |
839 | struct net_device *tdev; | 839 | struct net_device *tdev; |
840 | int mtu; | 840 | int mtu; |
841 | int max_headroom = sizeof(struct ipv6hdr); | 841 | unsigned int max_headroom = sizeof(struct ipv6hdr); |
842 | u8 proto; | 842 | u8 proto; |
843 | int err = -1; | 843 | int err = -1; |
844 | int pkt_len; | 844 | int pkt_len; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 466657a9a8bd..71433d29d884 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -430,7 +430,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
430 | struct rtable *rt; /* Route to the other host */ | 430 | struct rtable *rt; /* Route to the other host */ |
431 | struct net_device *tdev; /* Device to other host */ | 431 | struct net_device *tdev; /* Device to other host */ |
432 | struct iphdr *iph; /* Our new IP header */ | 432 | struct iphdr *iph; /* Our new IP header */ |
433 | int max_headroom; /* The extra header space needed */ | 433 | unsigned int max_headroom; /* The extra header space needed */ |
434 | __be32 dst = tiph->daddr; | 434 | __be32 dst = tiph->daddr; |
435 | int mtu; | 435 | int mtu; |
436 | struct in6_addr *addr6; | 436 | struct in6_addr *addr6; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 737b755342bd..85208026278b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -59,6 +59,7 @@ | |||
59 | #include <net/snmp.h> | 59 | #include <net/snmp.h> |
60 | #include <net/dsfield.h> | 60 | #include <net/dsfield.h> |
61 | #include <net/timewait_sock.h> | 61 | #include <net/timewait_sock.h> |
62 | #include <net/netdma.h> | ||
62 | 63 | ||
63 | #include <asm/uaccess.h> | 64 | #include <asm/uaccess.h> |
64 | 65 | ||
@@ -757,6 +758,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
757 | bp->len = htonl(tcplen); | 758 | bp->len = htonl(tcplen); |
758 | bp->protocol = htonl(protocol); | 759 | bp->protocol = htonl(protocol); |
759 | 760 | ||
761 | sg_init_table(sg, 4); | ||
762 | |||
760 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 763 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); |
761 | nbytes += sizeof(*bp); | 764 | nbytes += sizeof(*bp); |
762 | 765 | ||
@@ -778,6 +781,8 @@ static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
778 | sg_set_buf(&sg[block++], key->key, key->keylen); | 781 | sg_set_buf(&sg[block++], key->key, key->keylen); |
779 | nbytes += key->keylen; | 782 | nbytes += key->keylen; |
780 | 783 | ||
784 | sg_mark_end(sg, block); | ||
785 | |||
781 | /* Now store the hash into the packet */ | 786 | /* Now store the hash into the packet */ |
782 | err = crypto_hash_init(desc); | 787 | err = crypto_hash_init(desc); |
783 | if (err) { | 788 | if (err) { |
@@ -1728,6 +1733,8 @@ process: | |||
1728 | if (!sock_owned_by_user(sk)) { | 1733 | if (!sock_owned_by_user(sk)) { |
1729 | #ifdef CONFIG_NET_DMA | 1734 | #ifdef CONFIG_NET_DMA |
1730 | struct tcp_sock *tp = tcp_sk(sk); | 1735 | struct tcp_sock *tp = tcp_sk(sk); |
1736 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | ||
1737 | tp->ucopy.dma_chan = get_softnet_dma(); | ||
1731 | if (tp->ucopy.dma_chan) | 1738 | if (tp->ucopy.dma_chan) |
1732 | ret = tcp_v6_do_rcv(sk, skb); | 1739 | ret = tcp_v6_do_rcv(sk, skb); |
1733 | else | 1740 | else |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 3d241e415a2a..1120b150e211 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -77,7 +77,7 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len, | |||
77 | #endif /* CONFIG_PROC_FS */ | 77 | #endif /* CONFIG_PROC_FS */ |
78 | static struct tty_driver *driver; | 78 | static struct tty_driver *driver; |
79 | 79 | ||
80 | hashbin_t *ircomm_tty = NULL; | 80 | static hashbin_t *ircomm_tty = NULL; |
81 | 81 | ||
82 | static const struct tty_operations ops = { | 82 | static const struct tty_operations ops = { |
83 | .open = ircomm_tty_open, | 83 | .open = ircomm_tty_open, |
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index f7ffeec3913f..fda0e06453e8 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c | |||
@@ -1184,7 +1184,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, | |||
1184 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " | 1184 | printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " |
1185 | "status=%d aid=%d)\n", | 1185 | "status=%d aid=%d)\n", |
1186 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), | 1186 | dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), |
1187 | capab_info, status_code, aid & ~(BIT(15) | BIT(14))); | 1187 | capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); |
1188 | 1188 | ||
1189 | if (status_code != WLAN_STATUS_SUCCESS) { | 1189 | if (status_code != WLAN_STATUS_SUCCESS) { |
1190 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1190 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
@@ -2096,7 +2096,8 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, | |||
2096 | { | 2096 | { |
2097 | int tmp, hidden_ssid; | 2097 | int tmp, hidden_ssid; |
2098 | 2098 | ||
2099 | if (!memcmp(ifsta->ssid, ssid, ssid_len)) | 2099 | if (ssid_len == ifsta->ssid_len && |
2100 | !memcmp(ifsta->ssid, ssid, ssid_len)) | ||
2100 | return 1; | 2101 | return 1; |
2101 | 2102 | ||
2102 | if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) | 2103 | if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) |
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index b6c844b7e1c1..b3675bd7db33 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -178,11 +178,9 @@ int netlbl_domhsh_init(u32 size) | |||
178 | for (iter = 0; iter < hsh_tbl->size; iter++) | 178 | for (iter = 0; iter < hsh_tbl->size; iter++) |
179 | INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); | 179 | INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); |
180 | 180 | ||
181 | rcu_read_lock(); | ||
182 | spin_lock(&netlbl_domhsh_lock); | 181 | spin_lock(&netlbl_domhsh_lock); |
183 | rcu_assign_pointer(netlbl_domhsh, hsh_tbl); | 182 | rcu_assign_pointer(netlbl_domhsh, hsh_tbl); |
184 | spin_unlock(&netlbl_domhsh_lock); | 183 | spin_unlock(&netlbl_domhsh_lock); |
185 | rcu_read_unlock(); | ||
186 | 184 | ||
187 | return 0; | 185 | return 0; |
188 | } | 186 | } |
@@ -222,7 +220,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
222 | entry->valid = 1; | 220 | entry->valid = 1; |
223 | INIT_RCU_HEAD(&entry->rcu); | 221 | INIT_RCU_HEAD(&entry->rcu); |
224 | 222 | ||
225 | ret_val = 0; | ||
226 | rcu_read_lock(); | 223 | rcu_read_lock(); |
227 | if (entry->domain != NULL) { | 224 | if (entry->domain != NULL) { |
228 | bkt = netlbl_domhsh_hash(entry->domain); | 225 | bkt = netlbl_domhsh_hash(entry->domain); |
@@ -233,7 +230,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
233 | else | 230 | else |
234 | ret_val = -EEXIST; | 231 | ret_val = -EEXIST; |
235 | spin_unlock(&netlbl_domhsh_lock); | 232 | spin_unlock(&netlbl_domhsh_lock); |
236 | } else if (entry->domain == NULL) { | 233 | } else { |
237 | INIT_LIST_HEAD(&entry->list); | 234 | INIT_LIST_HEAD(&entry->list); |
238 | spin_lock(&netlbl_domhsh_def_lock); | 235 | spin_lock(&netlbl_domhsh_def_lock); |
239 | if (rcu_dereference(netlbl_domhsh_def) == NULL) | 236 | if (rcu_dereference(netlbl_domhsh_def) == NULL) |
@@ -241,9 +238,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
241 | else | 238 | else |
242 | ret_val = -EEXIST; | 239 | ret_val = -EEXIST; |
243 | spin_unlock(&netlbl_domhsh_def_lock); | 240 | spin_unlock(&netlbl_domhsh_def_lock); |
244 | } else | 241 | } |
245 | ret_val = -EINVAL; | ||
246 | |||
247 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); | 242 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); |
248 | if (audit_buf != NULL) { | 243 | if (audit_buf != NULL) { |
249 | audit_log_format(audit_buf, | 244 | audit_log_format(audit_buf, |
@@ -262,7 +257,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
262 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | 257 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); |
263 | audit_log_end(audit_buf); | 258 | audit_log_end(audit_buf); |
264 | } | 259 | } |
265 | |||
266 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
267 | 261 | ||
268 | if (ret_val != 0) { | 262 | if (ret_val != 0) { |
@@ -313,38 +307,30 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
313 | struct audit_buffer *audit_buf; | 307 | struct audit_buffer *audit_buf; |
314 | 308 | ||
315 | rcu_read_lock(); | 309 | rcu_read_lock(); |
316 | if (domain != NULL) | 310 | entry = netlbl_domhsh_search(domain, (domain != NULL ? 0 : 1)); |
317 | entry = netlbl_domhsh_search(domain, 0); | ||
318 | else | ||
319 | entry = netlbl_domhsh_search(domain, 1); | ||
320 | if (entry == NULL) | 311 | if (entry == NULL) |
321 | goto remove_return; | 312 | goto remove_return; |
322 | switch (entry->type) { | 313 | switch (entry->type) { |
323 | case NETLBL_NLTYPE_UNLABELED: | ||
324 | break; | ||
325 | case NETLBL_NLTYPE_CIPSOV4: | 314 | case NETLBL_NLTYPE_CIPSOV4: |
326 | ret_val = cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, | 315 | cipso_v4_doi_domhsh_remove(entry->type_def.cipsov4, |
327 | entry->domain); | 316 | entry->domain); |
328 | if (ret_val != 0) | ||
329 | goto remove_return; | ||
330 | break; | 317 | break; |
331 | } | 318 | } |
332 | ret_val = 0; | ||
333 | if (entry != rcu_dereference(netlbl_domhsh_def)) { | 319 | if (entry != rcu_dereference(netlbl_domhsh_def)) { |
334 | spin_lock(&netlbl_domhsh_lock); | 320 | spin_lock(&netlbl_domhsh_lock); |
335 | if (entry->valid) { | 321 | if (entry->valid) { |
336 | entry->valid = 0; | 322 | entry->valid = 0; |
337 | list_del_rcu(&entry->list); | 323 | list_del_rcu(&entry->list); |
338 | } else | 324 | ret_val = 0; |
339 | ret_val = -ENOENT; | 325 | } |
340 | spin_unlock(&netlbl_domhsh_lock); | 326 | spin_unlock(&netlbl_domhsh_lock); |
341 | } else { | 327 | } else { |
342 | spin_lock(&netlbl_domhsh_def_lock); | 328 | spin_lock(&netlbl_domhsh_def_lock); |
343 | if (entry->valid) { | 329 | if (entry->valid) { |
344 | entry->valid = 0; | 330 | entry->valid = 0; |
345 | rcu_assign_pointer(netlbl_domhsh_def, NULL); | 331 | rcu_assign_pointer(netlbl_domhsh_def, NULL); |
346 | } else | 332 | ret_val = 0; |
347 | ret_val = -ENOENT; | 333 | } |
348 | spin_unlock(&netlbl_domhsh_def_lock); | 334 | spin_unlock(&netlbl_domhsh_def_lock); |
349 | } | 335 | } |
350 | 336 | ||
@@ -357,11 +343,10 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
357 | audit_log_end(audit_buf); | 343 | audit_log_end(audit_buf); |
358 | } | 344 | } |
359 | 345 | ||
360 | if (ret_val == 0) | ||
361 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | ||
362 | |||
363 | remove_return: | 346 | remove_return: |
364 | rcu_read_unlock(); | 347 | rcu_read_unlock(); |
348 | if (ret_val == 0) | ||
349 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | ||
365 | return ret_val; | 350 | return ret_val; |
366 | } | 351 | } |
367 | 352 | ||
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 5315dacc5222..56483377997a 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
@@ -85,11 +85,9 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { | |||
85 | */ | 85 | */ |
86 | void netlbl_mgmt_protocount_inc(void) | 86 | void netlbl_mgmt_protocount_inc(void) |
87 | { | 87 | { |
88 | rcu_read_lock(); | ||
89 | spin_lock(&netlabel_mgmt_protocount_lock); | 88 | spin_lock(&netlabel_mgmt_protocount_lock); |
90 | netlabel_mgmt_protocount++; | 89 | netlabel_mgmt_protocount++; |
91 | spin_unlock(&netlabel_mgmt_protocount_lock); | 90 | spin_unlock(&netlabel_mgmt_protocount_lock); |
92 | rcu_read_unlock(); | ||
93 | } | 91 | } |
94 | 92 | ||
95 | /** | 93 | /** |
@@ -103,12 +101,10 @@ void netlbl_mgmt_protocount_inc(void) | |||
103 | */ | 101 | */ |
104 | void netlbl_mgmt_protocount_dec(void) | 102 | void netlbl_mgmt_protocount_dec(void) |
105 | { | 103 | { |
106 | rcu_read_lock(); | ||
107 | spin_lock(&netlabel_mgmt_protocount_lock); | 104 | spin_lock(&netlabel_mgmt_protocount_lock); |
108 | if (netlabel_mgmt_protocount > 0) | 105 | if (netlabel_mgmt_protocount > 0) |
109 | netlabel_mgmt_protocount--; | 106 | netlabel_mgmt_protocount--; |
110 | spin_unlock(&netlabel_mgmt_protocount_lock); | 107 | spin_unlock(&netlabel_mgmt_protocount_lock); |
111 | rcu_read_unlock(); | ||
112 | } | 108 | } |
113 | 109 | ||
114 | /** | 110 | /** |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 5c303c68af1d..348292450deb 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -84,12 +84,10 @@ static void netlbl_unlabel_acceptflg_set(u8 value, | |||
84 | struct audit_buffer *audit_buf; | 84 | struct audit_buffer *audit_buf; |
85 | u8 old_val; | 85 | u8 old_val; |
86 | 86 | ||
87 | rcu_read_lock(); | ||
88 | old_val = netlabel_unlabel_acceptflg; | ||
89 | spin_lock(&netlabel_unlabel_acceptflg_lock); | 87 | spin_lock(&netlabel_unlabel_acceptflg_lock); |
88 | old_val = netlabel_unlabel_acceptflg; | ||
90 | netlabel_unlabel_acceptflg = value; | 89 | netlabel_unlabel_acceptflg = value; |
91 | spin_unlock(&netlabel_unlabel_acceptflg_lock); | 90 | spin_unlock(&netlabel_unlabel_acceptflg_lock); |
92 | rcu_read_unlock(); | ||
93 | 91 | ||
94 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, | 92 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, |
95 | audit_info); | 93 | audit_info); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 98e313e5e594..4f994c0fb3f8 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1565,7 +1565,11 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1565 | 1565 | ||
1566 | netlink_dump(sk); | 1566 | netlink_dump(sk); |
1567 | sock_put(sk); | 1567 | sock_put(sk); |
1568 | return 0; | 1568 | |
1569 | /* We successfully started a dump, by returning -EINTR we | ||
1570 | * signal not to send ACK even if it was requested. | ||
1571 | */ | ||
1572 | return -EINTR; | ||
1569 | } | 1573 | } |
1570 | 1574 | ||
1571 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | 1575 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) |
@@ -1619,17 +1623,21 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, | |||
1619 | 1623 | ||
1620 | /* Only requests are handled by the kernel */ | 1624 | /* Only requests are handled by the kernel */ |
1621 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) | 1625 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) |
1622 | goto skip; | 1626 | goto ack; |
1623 | 1627 | ||
1624 | /* Skip control messages */ | 1628 | /* Skip control messages */ |
1625 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) | 1629 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) |
1626 | goto skip; | 1630 | goto ack; |
1627 | 1631 | ||
1628 | err = cb(skb, nlh); | 1632 | err = cb(skb, nlh); |
1629 | skip: | 1633 | if (err == -EINTR) |
1634 | goto skip; | ||
1635 | |||
1636 | ack: | ||
1630 | if (nlh->nlmsg_flags & NLM_F_ACK || err) | 1637 | if (nlh->nlmsg_flags & NLM_F_ACK || err) |
1631 | netlink_ack(skb, nlh, err); | 1638 | netlink_ack(skb, nlh, err); |
1632 | 1639 | ||
1640 | skip: | ||
1633 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); | 1641 | msglen = NLMSG_ALIGN(nlh->nlmsg_len); |
1634 | if (msglen > skb->len) | 1642 | if (msglen > skb->len) |
1635 | msglen = skb->len; | 1643 | msglen = skb->len; |
@@ -1880,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net) | |||
1880 | #endif | 1888 | #endif |
1881 | } | 1889 | } |
1882 | 1890 | ||
1883 | static struct pernet_operations __net_initdata netlink_net_ops = { | 1891 | static struct pernet_operations netlink_net_ops = { |
1884 | .init = netlink_net_init, | 1892 | .init = netlink_net_init, |
1885 | .exit = netlink_net_exit, | 1893 | .exit = netlink_net_exit, |
1886 | }; | 1894 | }; |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index ac3cabdca78c..eebefb6ef139 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
@@ -135,9 +135,8 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn) | |||
135 | tmpbuf.x[2] = 0; | 135 | tmpbuf.x[2] = 0; |
136 | tmpbuf.x[3] = htonl(conn->security_ix); | 136 | tmpbuf.x[3] = htonl(conn->security_ix); |
137 | 137 | ||
138 | memset(sg, 0, sizeof(sg)); | 138 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
139 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 139 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
140 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
141 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 140 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
142 | 141 | ||
143 | memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); | 142 | memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); |
@@ -180,9 +179,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, | |||
180 | desc.info = iv.x; | 179 | desc.info = iv.x; |
181 | desc.flags = 0; | 180 | desc.flags = 0; |
182 | 181 | ||
183 | memset(sg, 0, sizeof(sg)); | 182 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
184 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 183 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
185 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
186 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 184 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
187 | 185 | ||
188 | memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); | 186 | memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); |
@@ -227,9 +225,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
227 | desc.info = iv.x; | 225 | desc.info = iv.x; |
228 | desc.flags = 0; | 226 | desc.flags = 0; |
229 | 227 | ||
230 | memset(sg, 0, sizeof(sg[0]) * 2); | 228 | sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); |
231 | sg_set_buf(&sg[0], sechdr, sizeof(rxkhdr)); | 229 | sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); |
232 | sg_set_buf(&sg[1], &rxkhdr, sizeof(rxkhdr)); | ||
233 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); | 230 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); |
234 | 231 | ||
235 | /* we want to encrypt the skbuff in-place */ | 232 | /* we want to encrypt the skbuff in-place */ |
@@ -240,7 +237,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, | |||
240 | len = data_size + call->conn->size_align - 1; | 237 | len = data_size + call->conn->size_align - 1; |
241 | len &= ~(call->conn->size_align - 1); | 238 | len &= ~(call->conn->size_align - 1); |
242 | 239 | ||
243 | skb_to_sgvec(skb, sg, 0, len); | 240 | sg_init_table(sg, skb_to_sgvec(skb, sg, 0, len)); |
244 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); | 241 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); |
245 | 242 | ||
246 | _leave(" = 0"); | 243 | _leave(" = 0"); |
@@ -290,9 +287,8 @@ static int rxkad_secure_packet(const struct rxrpc_call *call, | |||
290 | tmpbuf.x[0] = sp->hdr.callNumber; | 287 | tmpbuf.x[0] = sp->hdr.callNumber; |
291 | tmpbuf.x[1] = x; | 288 | tmpbuf.x[1] = x; |
292 | 289 | ||
293 | memset(&sg, 0, sizeof(sg)); | 290 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
294 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 291 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
295 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
296 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 292 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
297 | 293 | ||
298 | x = ntohl(tmpbuf.x[1]); | 294 | x = ntohl(tmpbuf.x[1]); |
@@ -332,20 +328,23 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, | |||
332 | struct rxrpc_skb_priv *sp; | 328 | struct rxrpc_skb_priv *sp; |
333 | struct blkcipher_desc desc; | 329 | struct blkcipher_desc desc; |
334 | struct rxrpc_crypt iv; | 330 | struct rxrpc_crypt iv; |
335 | struct scatterlist sg[2]; | 331 | struct scatterlist sg[16]; |
336 | struct sk_buff *trailer; | 332 | struct sk_buff *trailer; |
337 | u32 data_size, buf; | 333 | u32 data_size, buf; |
338 | u16 check; | 334 | u16 check; |
335 | int nsg; | ||
339 | 336 | ||
340 | _enter(""); | 337 | _enter(""); |
341 | 338 | ||
342 | sp = rxrpc_skb(skb); | 339 | sp = rxrpc_skb(skb); |
343 | 340 | ||
344 | /* we want to decrypt the skbuff in-place */ | 341 | /* we want to decrypt the skbuff in-place */ |
345 | if (skb_cow_data(skb, 0, &trailer) < 0) | 342 | nsg = skb_cow_data(skb, 0, &trailer); |
343 | if (nsg < 0 || nsg > 16) | ||
346 | goto nomem; | 344 | goto nomem; |
347 | 345 | ||
348 | skb_to_sgvec(skb, sg, 0, 8); | 346 | sg_init_table(sg, nsg); |
347 | sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, 8)); | ||
349 | 348 | ||
350 | /* start the decryption afresh */ | 349 | /* start the decryption afresh */ |
351 | memset(&iv, 0, sizeof(iv)); | 350 | memset(&iv, 0, sizeof(iv)); |
@@ -426,7 +425,8 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, | |||
426 | goto nomem; | 425 | goto nomem; |
427 | } | 426 | } |
428 | 427 | ||
429 | skb_to_sgvec(skb, sg, 0, skb->len); | 428 | sg_init_table(sg, nsg); |
429 | sg_mark_end(sg, skb_to_sgvec(skb, sg, 0, skb->len)); | ||
430 | 430 | ||
431 | /* decrypt from the session key */ | 431 | /* decrypt from the session key */ |
432 | payload = call->conn->key->payload.data; | 432 | payload = call->conn->key->payload.data; |
@@ -521,9 +521,8 @@ static int rxkad_verify_packet(const struct rxrpc_call *call, | |||
521 | tmpbuf.x[0] = call->call_id; | 521 | tmpbuf.x[0] = call->call_id; |
522 | tmpbuf.x[1] = x; | 522 | tmpbuf.x[1] = x; |
523 | 523 | ||
524 | memset(&sg, 0, sizeof(sg)); | 524 | sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); |
525 | sg_set_buf(&sg[0], &tmpbuf, sizeof(tmpbuf)); | 525 | sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); |
526 | sg_set_buf(&sg[1], &tmpbuf, sizeof(tmpbuf)); | ||
527 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); | 526 | crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); |
528 | 527 | ||
529 | x = ntohl(tmpbuf.x[1]); | 528 | x = ntohl(tmpbuf.x[1]); |
@@ -690,16 +689,20 @@ static void rxkad_calc_response_checksum(struct rxkad_response *response) | |||
690 | static void rxkad_sg_set_buf2(struct scatterlist sg[2], | 689 | static void rxkad_sg_set_buf2(struct scatterlist sg[2], |
691 | void *buf, size_t buflen) | 690 | void *buf, size_t buflen) |
692 | { | 691 | { |
692 | int nsg = 1; | ||
693 | 693 | ||
694 | memset(sg, 0, sizeof(sg)); | 694 | sg_init_table(sg, 2); |
695 | 695 | ||
696 | sg_set_buf(&sg[0], buf, buflen); | 696 | sg_set_buf(&sg[0], buf, buflen); |
697 | if (sg[0].offset + buflen > PAGE_SIZE) { | 697 | if (sg[0].offset + buflen > PAGE_SIZE) { |
698 | /* the buffer was split over two pages */ | 698 | /* the buffer was split over two pages */ |
699 | sg[0].length = PAGE_SIZE - sg[0].offset; | 699 | sg[0].length = PAGE_SIZE - sg[0].offset; |
700 | sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); | 700 | sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); |
701 | nsg++; | ||
701 | } | 702 | } |
702 | 703 | ||
704 | sg_mark_end(sg, nsg); | ||
705 | |||
703 | ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); | 706 | ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); |
704 | } | 707 | } |
705 | 708 | ||
@@ -712,7 +715,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, | |||
712 | { | 715 | { |
713 | struct blkcipher_desc desc; | 716 | struct blkcipher_desc desc; |
714 | struct rxrpc_crypt iv; | 717 | struct rxrpc_crypt iv; |
715 | struct scatterlist ssg[2], dsg[2]; | 718 | struct scatterlist sg[2]; |
716 | 719 | ||
717 | /* continue encrypting from where we left off */ | 720 | /* continue encrypting from where we left off */ |
718 | memcpy(&iv, s2->session_key, sizeof(iv)); | 721 | memcpy(&iv, s2->session_key, sizeof(iv)); |
@@ -720,9 +723,8 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn, | |||
720 | desc.info = iv.x; | 723 | desc.info = iv.x; |
721 | desc.flags = 0; | 724 | desc.flags = 0; |
722 | 725 | ||
723 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | 726 | rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); |
724 | memcpy(dsg, ssg, sizeof(dsg)); | 727 | crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); |
725 | crypto_blkcipher_encrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
726 | } | 728 | } |
727 | 729 | ||
728 | /* | 730 | /* |
@@ -817,7 +819,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, | |||
817 | { | 819 | { |
818 | struct blkcipher_desc desc; | 820 | struct blkcipher_desc desc; |
819 | struct rxrpc_crypt iv, key; | 821 | struct rxrpc_crypt iv, key; |
820 | struct scatterlist ssg[1], dsg[1]; | 822 | struct scatterlist sg[1]; |
821 | struct in_addr addr; | 823 | struct in_addr addr; |
822 | unsigned life; | 824 | unsigned life; |
823 | time_t issue, now; | 825 | time_t issue, now; |
@@ -850,9 +852,8 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, | |||
850 | desc.info = iv.x; | 852 | desc.info = iv.x; |
851 | desc.flags = 0; | 853 | desc.flags = 0; |
852 | 854 | ||
853 | sg_init_one(&ssg[0], ticket, ticket_len); | 855 | sg_init_one(&sg[0], ticket, ticket_len); |
854 | memcpy(dsg, ssg, sizeof(dsg)); | 856 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); |
855 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, ticket_len); | ||
856 | 857 | ||
857 | p = ticket; | 858 | p = ticket; |
858 | end = p + ticket_len; | 859 | end = p + ticket_len; |
@@ -961,7 +962,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, | |||
961 | const struct rxrpc_crypt *session_key) | 962 | const struct rxrpc_crypt *session_key) |
962 | { | 963 | { |
963 | struct blkcipher_desc desc; | 964 | struct blkcipher_desc desc; |
964 | struct scatterlist ssg[2], dsg[2]; | 965 | struct scatterlist sg[2]; |
965 | struct rxrpc_crypt iv; | 966 | struct rxrpc_crypt iv; |
966 | 967 | ||
967 | _enter(",,%08x%08x", | 968 | _enter(",,%08x%08x", |
@@ -979,9 +980,8 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn, | |||
979 | desc.info = iv.x; | 980 | desc.info = iv.x; |
980 | desc.flags = 0; | 981 | desc.flags = 0; |
981 | 982 | ||
982 | rxkad_sg_set_buf2(ssg, &resp->encrypted, sizeof(resp->encrypted)); | 983 | rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); |
983 | memcpy(dsg, ssg, sizeof(dsg)); | 984 | crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); |
984 | crypto_blkcipher_decrypt_iv(&desc, dsg, ssg, sizeof(resp->encrypted)); | ||
985 | mutex_unlock(&rxkad_ci_mutex); | 985 | mutex_unlock(&rxkad_ci_mutex); |
986 | 986 | ||
987 | _leave(""); | 987 | _leave(""); |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index fd7bca4d5c20..c3fde9180f9d 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -166,7 +166,7 @@ bad_mirred: | |||
166 | return TC_ACT_SHOT; | 166 | return TC_ACT_SHOT; |
167 | } | 167 | } |
168 | 168 | ||
169 | skb2 = skb_clone(skb, GFP_ATOMIC); | 169 | skb2 = skb_act_clone(skb, GFP_ATOMIC); |
170 | if (skb2 == NULL) | 170 | if (skb2 == NULL) |
171 | goto bad_mirred; | 171 | goto bad_mirred; |
172 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && | 172 | if (m->tcfm_eaction != TCA_EGRESS_MIRROR && |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index abd82fc3ec60..de894096e442 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -136,7 +136,7 @@ prio_dequeue(struct Qdisc* sch) | |||
136 | * pulling an skb. This way we avoid excessive requeues | 136 | * pulling an skb. This way we avoid excessive requeues |
137 | * for slower queues. | 137 | * for slower queues. |
138 | */ | 138 | */ |
139 | if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { | 139 | if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { |
140 | qdisc = q->queues[prio]; | 140 | qdisc = q->queues[prio]; |
141 | skb = qdisc->dequeue(qdisc); | 141 | skb = qdisc->dequeue(qdisc); |
142 | if (skb) { | 142 | if (skb) { |
@@ -165,7 +165,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch) | |||
165 | * for slower queues. If the queue is stopped, try the | 165 | * for slower queues. If the queue is stopped, try the |
166 | * next queue. | 166 | * next queue. |
167 | */ | 167 | */ |
168 | if (!netif_subqueue_stopped(sch->dev, | 168 | if (!__netif_subqueue_stopped(sch->dev, |
169 | (q->mq ? q->curband : 0))) { | 169 | (q->mq ? q->curband : 0))) { |
170 | qdisc = q->queues[q->curband]; | 170 | qdisc = q->queues[q->curband]; |
171 | skb = qdisc->dequeue(qdisc); | 171 | skb = qdisc->dequeue(qdisc); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index cbd64b216cce..6d5fa6bb371b 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -107,7 +107,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | /* Free the shared key stucture */ | 109 | /* Free the shared key stucture */ |
110 | void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) | 110 | static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) |
111 | { | 111 | { |
112 | BUG_ON(!list_empty(&sh_key->key_list)); | 112 | BUG_ON(!list_empty(&sh_key->key_list)); |
113 | sctp_auth_key_put(sh_key->key); | 113 | sctp_auth_key_put(sh_key->key); |
@@ -220,7 +220,7 @@ static struct sctp_auth_bytes *sctp_auth_make_key_vector( | |||
220 | 220 | ||
221 | 221 | ||
222 | /* Make a key vector based on our local parameters */ | 222 | /* Make a key vector based on our local parameters */ |
223 | struct sctp_auth_bytes *sctp_auth_make_local_vector( | 223 | static struct sctp_auth_bytes *sctp_auth_make_local_vector( |
224 | const struct sctp_association *asoc, | 224 | const struct sctp_association *asoc, |
225 | gfp_t gfp) | 225 | gfp_t gfp) |
226 | { | 226 | { |
@@ -232,7 +232,7 @@ struct sctp_auth_bytes *sctp_auth_make_local_vector( | |||
232 | } | 232 | } |
233 | 233 | ||
234 | /* Make a key vector based on peer's parameters */ | 234 | /* Make a key vector based on peer's parameters */ |
235 | struct sctp_auth_bytes *sctp_auth_make_peer_vector( | 235 | static struct sctp_auth_bytes *sctp_auth_make_peer_vector( |
236 | const struct sctp_association *asoc, | 236 | const struct sctp_association *asoc, |
237 | gfp_t gfp) | 237 | gfp_t gfp) |
238 | { | 238 | { |
@@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) | |||
556 | return &sctp_hmac_list[id]; | 556 | return &sctp_hmac_list[id]; |
557 | } | 557 | } |
558 | 558 | ||
559 | static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | 559 | static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id) |
560 | { | 560 | { |
561 | int found = 0; | 561 | int found = 0; |
562 | int i; | 562 | int i; |
@@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | |||
573 | 573 | ||
574 | /* See if the HMAC_ID is one that we claim as supported */ | 574 | /* See if the HMAC_ID is one that we claim as supported */ |
575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, | 575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, |
576 | __u16 hmac_id) | 576 | __be16 hmac_id) |
577 | { | 577 | { |
578 | struct sctp_hmac_algo_param *hmacs; | 578 | struct sctp_hmac_algo_param *hmacs; |
579 | __u16 n_elt; | 579 | __u16 n_elt; |
@@ -726,10 +726,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, | |||
726 | 726 | ||
727 | /* set up scatter list */ | 727 | /* set up scatter list */ |
728 | end = skb_tail_pointer(skb); | 728 | end = skb_tail_pointer(skb); |
729 | sg_init_table(&sg, 1); | 729 | sg_init_one(&sg, auth, end - (unsigned char *)auth); |
730 | sg_set_page(&sg, virt_to_page(auth)); | ||
731 | sg.offset = (unsigned long)(auth) % PAGE_SIZE; | ||
732 | sg.length = end - (unsigned char *)auth; | ||
733 | 730 | ||
734 | desc.tfm = asoc->ep->auth_hmacs[hmac_id]; | 731 | desc.tfm = asoc->ep->auth_hmacs[hmac_id]; |
735 | desc.flags = 0; | 732 | desc.flags = 0; |
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c index 59cf7b06d216..181edabdb8ca 100644 --- a/net/sctp/crc32c.c +++ b/net/sctp/crc32c.c | |||
@@ -170,6 +170,7 @@ __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) | |||
170 | return crc32; | 170 | return crc32; |
171 | } | 171 | } |
172 | 172 | ||
173 | #if 0 | ||
173 | __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) | 174 | __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) |
174 | { | 175 | { |
175 | __u32 i; | 176 | __u32 i; |
@@ -186,6 +187,7 @@ __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) | |||
186 | 187 | ||
187 | return crc32; | 188 | return crc32; |
188 | } | 189 | } |
190 | #endif /* 0 */ | ||
189 | 191 | ||
190 | __u32 sctp_end_cksum(__u32 crc32) | 192 | __u32 sctp_end_cksum(__u32 crc32) |
191 | { | 193 | { |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 658476c4d587..c377e4e8f653 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1513,10 +1513,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
1513 | struct hash_desc desc; | 1513 | struct hash_desc desc; |
1514 | 1514 | ||
1515 | /* Sign the message. */ | 1515 | /* Sign the message. */ |
1516 | sg_init_table(&sg, 1); | 1516 | sg_init_one(&sg, &cookie->c, bodysize); |
1517 | sg_set_page(&sg, virt_to_page(&cookie->c)); | ||
1518 | sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; | ||
1519 | sg.length = bodysize; | ||
1520 | keylen = SCTP_SECRET_SIZE; | 1517 | keylen = SCTP_SECRET_SIZE; |
1521 | key = (char *)ep->secret_key[ep->current_key]; | 1518 | key = (char *)ep->secret_key[ep->current_key]; |
1522 | desc.tfm = sctp_sk(ep->base.sk)->hmac; | 1519 | desc.tfm = sctp_sk(ep->base.sk)->hmac; |
@@ -1586,10 +1583,7 @@ struct sctp_association *sctp_unpack_cookie( | |||
1586 | 1583 | ||
1587 | /* Check the signature. */ | 1584 | /* Check the signature. */ |
1588 | keylen = SCTP_SECRET_SIZE; | 1585 | keylen = SCTP_SECRET_SIZE; |
1589 | sg_init_table(&sg, 1); | 1586 | sg_init_one(&sg, bear_cookie, bodysize); |
1590 | sg_set_page(&sg, virt_to_page(bear_cookie)); | ||
1591 | sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; | ||
1592 | sg.length = bodysize; | ||
1593 | key = (char *)ep->secret_key[ep->current_key]; | 1587 | key = (char *)ep->secret_key[ep->current_key]; |
1594 | desc.tfm = sctp_sk(ep->base.sk)->hmac; | 1588 | desc.tfm = sctp_sk(ep->base.sk)->hmac; |
1595 | desc.flags = 0; | 1589 | desc.flags = 0; |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b9370956b187..4be92d0a2cab 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
908 | return; | 908 | return; |
909 | } | 909 | } |
910 | 910 | ||
911 | /* Renege 'needed' bytes from the ordering queue. */ | 911 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, |
912 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | 912 | struct sk_buff_head *list, __u16 needed) |
913 | { | 913 | { |
914 | __u16 freed = 0; | 914 | __u16 freed = 0; |
915 | __u32 tsn; | 915 | __u32 tsn; |
@@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
919 | 919 | ||
920 | tsnmap = &ulpq->asoc->peer.tsn_map; | 920 | tsnmap = &ulpq->asoc->peer.tsn_map; |
921 | 921 | ||
922 | while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { | 922 | while ((skb = __skb_dequeue_tail(list)) != NULL) { |
923 | freed += skb_headlen(skb); | 923 | freed += skb_headlen(skb); |
924 | event = sctp_skb2event(skb); | 924 | event = sctp_skb2event(skb); |
925 | tsn = event->tsn; | 925 | tsn = event->tsn; |
@@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
933 | return freed; | 933 | return freed; |
934 | } | 934 | } |
935 | 935 | ||
936 | /* Renege 'needed' bytes from the ordering queue. */ | ||
937 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | ||
938 | { | ||
939 | return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | ||
940 | } | ||
941 | |||
936 | /* Renege 'needed' bytes from the reassembly queue. */ | 942 | /* Renege 'needed' bytes from the reassembly queue. */ |
937 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | 943 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) |
938 | { | 944 | { |
939 | __u16 freed = 0; | 945 | return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); |
940 | __u32 tsn; | ||
941 | struct sk_buff *skb; | ||
942 | struct sctp_ulpevent *event; | ||
943 | struct sctp_tsnmap *tsnmap; | ||
944 | |||
945 | tsnmap = &ulpq->asoc->peer.tsn_map; | ||
946 | |||
947 | /* Walk backwards through the list, reneges the newest tsns. */ | ||
948 | while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { | ||
949 | freed += skb_headlen(skb); | ||
950 | event = sctp_skb2event(skb); | ||
951 | tsn = event->tsn; | ||
952 | |||
953 | sctp_ulpevent_free(event); | ||
954 | sctp_tsnmap_renege(tsnmap, tsn); | ||
955 | if (freed >= needed) | ||
956 | return freed; | ||
957 | } | ||
958 | |||
959 | return freed; | ||
960 | } | 946 | } |
961 | 947 | ||
962 | /* Partial deliver the first message as there is pressure on rwnd. */ | 948 | /* Partial deliver the first message as there is pressure on rwnd. */ |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 32be431affcf..91cd8f0d1e10 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -75,7 +75,7 @@ krb5_encrypt( | |||
75 | memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); | 75 | memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); |
76 | 76 | ||
77 | memcpy(out, in, length); | 77 | memcpy(out, in, length); |
78 | sg_set_buf(sg, out, length); | 78 | sg_init_one(sg, out, length); |
79 | 79 | ||
80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); | 80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); |
81 | out: | 81 | out: |
@@ -110,7 +110,7 @@ krb5_decrypt( | |||
110 | memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); | 110 | memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); |
111 | 111 | ||
112 | memcpy(out, in, length); | 112 | memcpy(out, in, length); |
113 | sg_set_buf(sg, out, length); | 113 | sg_init_one(sg, out, length); |
114 | 114 | ||
115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); | 115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); |
116 | out: | 116 | out: |
@@ -146,7 +146,7 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, | |||
146 | err = crypto_hash_init(&desc); | 146 | err = crypto_hash_init(&desc); |
147 | if (err) | 147 | if (err) |
148 | goto out; | 148 | goto out; |
149 | sg_set_buf(sg, header, hdrlen); | 149 | sg_init_one(sg, header, hdrlen); |
150 | err = crypto_hash_update(&desc, sg, hdrlen); | 150 | err = crypto_hash_update(&desc, sg, hdrlen); |
151 | if (err) | 151 | if (err) |
152 | goto out; | 152 | goto out; |
@@ -188,8 +188,6 @@ encryptor(struct scatterlist *sg, void *data) | |||
188 | /* Worst case is 4 fragments: head, end of page 1, start | 188 | /* Worst case is 4 fragments: head, end of page 1, start |
189 | * of page 2, tail. Anything more is a bug. */ | 189 | * of page 2, tail. Anything more is a bug. */ |
190 | BUG_ON(desc->fragno > 3); | 190 | BUG_ON(desc->fragno > 3); |
191 | desc->infrags[desc->fragno] = *sg; | ||
192 | desc->outfrags[desc->fragno] = *sg; | ||
193 | 191 | ||
194 | page_pos = desc->pos - outbuf->head[0].iov_len; | 192 | page_pos = desc->pos - outbuf->head[0].iov_len; |
195 | if (page_pos >= 0 && page_pos < outbuf->page_len) { | 193 | if (page_pos >= 0 && page_pos < outbuf->page_len) { |
@@ -199,7 +197,10 @@ encryptor(struct scatterlist *sg, void *data) | |||
199 | } else { | 197 | } else { |
200 | in_page = sg_page(sg); | 198 | in_page = sg_page(sg); |
201 | } | 199 | } |
202 | sg_set_page(&desc->infrags[desc->fragno], in_page); | 200 | sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, |
201 | sg->offset); | ||
202 | sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, | ||
203 | sg->offset); | ||
203 | desc->fragno++; | 204 | desc->fragno++; |
204 | desc->fraglen += sg->length; | 205 | desc->fraglen += sg->length; |
205 | desc->pos += sg->length; | 206 | desc->pos += sg->length; |
@@ -210,16 +211,22 @@ encryptor(struct scatterlist *sg, void *data) | |||
210 | if (thislen == 0) | 211 | if (thislen == 0) |
211 | return 0; | 212 | return 0; |
212 | 213 | ||
214 | sg_mark_end(desc->infrags, desc->fragno); | ||
215 | sg_mark_end(desc->outfrags, desc->fragno); | ||
216 | |||
213 | ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, | 217 | ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, |
214 | desc->infrags, thislen); | 218 | desc->infrags, thislen); |
215 | if (ret) | 219 | if (ret) |
216 | return ret; | 220 | return ret; |
221 | |||
222 | sg_init_table(desc->infrags, 4); | ||
223 | sg_init_table(desc->outfrags, 4); | ||
224 | |||
217 | if (fraglen) { | 225 | if (fraglen) { |
218 | sg_set_page(&desc->outfrags[0], sg_page(sg)); | 226 | sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, |
219 | desc->outfrags[0].offset = sg->offset + sg->length - fraglen; | 227 | sg->offset + sg->length - fraglen); |
220 | desc->outfrags[0].length = fraglen; | ||
221 | desc->infrags[0] = desc->outfrags[0]; | 228 | desc->infrags[0] = desc->outfrags[0]; |
222 | sg_set_page(&desc->infrags[0], in_page); | 229 | sg_assign_page(&desc->infrags[0], in_page); |
223 | desc->fragno = 1; | 230 | desc->fragno = 1; |
224 | desc->fraglen = fraglen; | 231 | desc->fraglen = fraglen; |
225 | } else { | 232 | } else { |
@@ -248,6 +255,9 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
248 | desc.fragno = 0; | 255 | desc.fragno = 0; |
249 | desc.fraglen = 0; | 256 | desc.fraglen = 0; |
250 | 257 | ||
258 | sg_init_table(desc.infrags, 4); | ||
259 | sg_init_table(desc.outfrags, 4); | ||
260 | |||
251 | ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); | 261 | ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); |
252 | return ret; | 262 | return ret; |
253 | } | 263 | } |
@@ -272,7 +282,8 @@ decryptor(struct scatterlist *sg, void *data) | |||
272 | /* Worst case is 4 fragments: head, end of page 1, start | 282 | /* Worst case is 4 fragments: head, end of page 1, start |
273 | * of page 2, tail. Anything more is a bug. */ | 283 | * of page 2, tail. Anything more is a bug. */ |
274 | BUG_ON(desc->fragno > 3); | 284 | BUG_ON(desc->fragno > 3); |
275 | desc->frags[desc->fragno] = *sg; | 285 | sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, |
286 | sg->offset); | ||
276 | desc->fragno++; | 287 | desc->fragno++; |
277 | desc->fraglen += sg->length; | 288 | desc->fraglen += sg->length; |
278 | 289 | ||
@@ -282,14 +293,18 @@ decryptor(struct scatterlist *sg, void *data) | |||
282 | if (thislen == 0) | 293 | if (thislen == 0) |
283 | return 0; | 294 | return 0; |
284 | 295 | ||
296 | sg_mark_end(desc->frags, desc->fragno); | ||
297 | |||
285 | ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, | 298 | ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, |
286 | desc->frags, thislen); | 299 | desc->frags, thislen); |
287 | if (ret) | 300 | if (ret) |
288 | return ret; | 301 | return ret; |
302 | |||
303 | sg_init_table(desc->frags, 4); | ||
304 | |||
289 | if (fraglen) { | 305 | if (fraglen) { |
290 | sg_set_page(&desc->frags[0], sg_page(sg)); | 306 | sg_set_page(&desc->frags[0], sg_page(sg), fraglen, |
291 | desc->frags[0].offset = sg->offset + sg->length - fraglen; | 307 | sg->offset + sg->length - fraglen); |
292 | desc->frags[0].length = fraglen; | ||
293 | desc->fragno = 1; | 308 | desc->fragno = 1; |
294 | desc->fraglen = fraglen; | 309 | desc->fraglen = fraglen; |
295 | } else { | 310 | } else { |
@@ -314,6 +329,9 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
314 | desc.desc.flags = 0; | 329 | desc.desc.flags = 0; |
315 | desc.fragno = 0; | 330 | desc.fragno = 0; |
316 | desc.fraglen = 0; | 331 | desc.fraglen = 0; |
332 | |||
333 | sg_init_table(desc.frags, 4); | ||
334 | |||
317 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); | 335 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); |
318 | } | 336 | } |
319 | 337 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index d158635de6c0..abf17ce2e3b1 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -173,7 +173,7 @@ make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | |||
173 | if (err) | 173 | if (err) |
174 | goto out; | 174 | goto out; |
175 | 175 | ||
176 | sg_set_buf(sg, header, hdrlen); | 176 | sg_init_one(sg, header, hdrlen); |
177 | crypto_hash_update(&desc, sg, sg->length); | 177 | crypto_hash_update(&desc, sg, sg->length); |
178 | 178 | ||
179 | xdr_process_buf(body, body_offset, body->len - body_offset, | 179 | xdr_process_buf(body, body_offset, body->len - body_offset, |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 3d1f7cdf9dd0..fdc5e6d7562b 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -1030,6 +1030,8 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | |||
1030 | unsigned page_len, thislen, page_offset; | 1030 | unsigned page_len, thislen, page_offset; |
1031 | struct scatterlist sg[1]; | 1031 | struct scatterlist sg[1]; |
1032 | 1032 | ||
1033 | sg_init_table(sg, 1); | ||
1034 | |||
1033 | if (offset >= buf->head[0].iov_len) { | 1035 | if (offset >= buf->head[0].iov_len) { |
1034 | offset -= buf->head[0].iov_len; | 1036 | offset -= buf->head[0].iov_len; |
1035 | } else { | 1037 | } else { |
@@ -1059,9 +1061,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | |||
1059 | do { | 1061 | do { |
1060 | if (thislen > page_len) | 1062 | if (thislen > page_len) |
1061 | thislen = page_len; | 1063 | thislen = page_len; |
1062 | sg_set_page(sg, buf->pages[i]); | 1064 | sg_set_page(sg, buf->pages[i], thislen, page_offset); |
1063 | sg->offset = page_offset; | ||
1064 | sg->length = thislen; | ||
1065 | ret = actor(sg, data); | 1065 | ret = actor(sg, data); |
1066 | if (ret) | 1066 | if (ret) |
1067 | goto out; | 1067 | goto out; |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 12db63580427..f877b88091ce 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | 181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; |
182 | struct rpcrdma_write_array *warray = NULL; | 182 | struct rpcrdma_write_array *warray = NULL; |
183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | 183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; |
184 | u32 *iptr = headerp->rm_body.rm_chunks; | 184 | __be32 *iptr = headerp->rm_body.rm_chunks; |
185 | 185 | ||
186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | 186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { |
187 | /* a read chunk - server will RDMA Read our memory */ | 187 | /* a read chunk - server will RDMA Read our memory */ |
@@ -217,7 +217,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); |
218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); |
219 | xdr_encode_hyper( | 219 | xdr_encode_hyper( |
220 | (u32 *)&cur_rchunk->rc_target.rs_offset, | 220 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
221 | seg->mr_base); | 221 | seg->mr_base); |
222 | dprintk("RPC: %s: read chunk " | 222 | dprintk("RPC: %s: read chunk " |
223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, | 223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, |
@@ -229,7 +229,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); |
230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); |
231 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
232 | (u32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
233 | seg->mr_base); | 233 | seg->mr_base); |
234 | dprintk("RPC: %s: %s chunk " | 234 | dprintk("RPC: %s: %s chunk " |
235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | 235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, |
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
257 | * finish off header. If write, marshal discrim and nchunks. | 257 | * finish off header. If write, marshal discrim and nchunks. |
258 | */ | 258 | */ |
259 | if (cur_rchunk) { | 259 | if (cur_rchunk) { |
260 | iptr = (u32 *) cur_rchunk; | 260 | iptr = (__be32 *) cur_rchunk; |
261 | *iptr++ = xdr_zero; /* finish the read chunk list */ | 261 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | 262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ |
263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
264 | } else { | 264 | } else { |
265 | warray->wc_discrim = xdr_one; | 265 | warray->wc_discrim = xdr_one; |
266 | warray->wc_nchunks = htonl(nchunks); | 266 | warray->wc_nchunks = htonl(nchunks); |
267 | iptr = (u32 *) cur_wchunk; | 267 | iptr = (__be32 *) cur_wchunk; |
268 | if (type == rpcrdma_writech) { | 268 | if (type == rpcrdma_writech) { |
269 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 269 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | 559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
560 | */ | 560 | */ |
561 | static int | 561 | static int |
562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | 562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp) |
563 | { | 563 | { |
564 | unsigned int i, total_len; | 564 | unsigned int i, total_len; |
565 | struct rpcrdma_write_chunk *cur_wchunk; | 565 | struct rpcrdma_write_chunk *cur_wchunk; |
@@ -573,7 +573,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | 573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
574 | ifdebug(FACILITY) { | 574 | ifdebug(FACILITY) { |
575 | u64 off; | 575 | u64 off; |
576 | xdr_decode_hyper((u32 *)&seg->rs_offset, &off); | 576 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
578 | __func__, | 578 | __func__, |
579 | ntohl(seg->rs_length), | 579 | ntohl(seg->rs_length), |
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
585 | } | 585 | } |
586 | /* check and adjust for properly terminated write chunk */ | 586 | /* check and adjust for properly terminated write chunk */ |
587 | if (wrchunk) { | 587 | if (wrchunk) { |
588 | u32 *w = (u32 *) cur_wchunk; | 588 | __be32 *w = (__be32 *) cur_wchunk; |
589 | if (*w++ != xdr_zero) | 589 | if (*w++ != xdr_zero) |
590 | return -1; | 590 | return -1; |
591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | 591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | 593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) |
594 | return -1; | 594 | return -1; |
595 | 595 | ||
596 | *iptrp = (u32 *) cur_wchunk; | 596 | *iptrp = (__be32 *) cur_wchunk; |
597 | return total_len; | 597 | return total_len; |
598 | } | 598 | } |
599 | 599 | ||
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
721 | struct rpc_rqst *rqst; | 721 | struct rpc_rqst *rqst; |
722 | struct rpc_xprt *xprt = rep->rr_xprt; | 722 | struct rpc_xprt *xprt = rep->rr_xprt; |
723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
724 | u32 *iptr; | 724 | __be32 *iptr; |
725 | int i, rdmalen, status; | 725 | int i, rdmalen, status; |
726 | 726 | ||
727 | /* Check status. If bad, signal disconnect and return rep to pool */ | 727 | /* Check status. If bad, signal disconnect and return rep to pool */ |
@@ -801,7 +801,7 @@ repost: | |||
801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | 801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
802 | } else { | 802 | } else { |
803 | /* else ordinary inline */ | 803 | /* else ordinary inline */ |
804 | iptr = (u32 *)((unsigned char *)headerp + 28); | 804 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
805 | rep->rr_len -= 28; /*sizeof *headerp;*/ | 805 | rep->rr_len -= 28; /*sizeof *headerp;*/ |
806 | status = rep->rr_len; | 806 | status = rep->rr_len; |
807 | } | 807 | } |
@@ -816,7 +816,7 @@ repost: | |||
816 | headerp->rm_body.rm_chunks[2] != xdr_one || | 816 | headerp->rm_body.rm_chunks[2] != xdr_one || |
817 | req->rl_nchunks == 0) | 817 | req->rl_nchunks == 0) |
818 | goto badheader; | 818 | goto badheader; |
819 | iptr = (u32 *)((unsigned char *)headerp + 28); | 819 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); | 820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
821 | if (rdmalen < 0) | 821 | if (rdmalen < 0) |
822 | goto badheader; | 822 | goto badheader; |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 313d4bed3aa9..0426388d351d 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -553,9 +553,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
553 | if (copy > len) | 553 | if (copy > len) |
554 | copy = len; | 554 | copy = len; |
555 | 555 | ||
556 | sg_set_page(&sg, virt_to_page(skb->data + offset)); | 556 | sg_init_one(&sg, skb->data + offset, copy); |
557 | sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; | ||
558 | sg.length = copy; | ||
559 | 557 | ||
560 | err = icv_update(desc, &sg, copy); | 558 | err = icv_update(desc, &sg, copy); |
561 | if (unlikely(err)) | 559 | if (unlikely(err)) |
@@ -578,9 +576,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
578 | if (copy > len) | 576 | if (copy > len) |
579 | copy = len; | 577 | copy = len; |
580 | 578 | ||
581 | sg_set_page(&sg, frag->page); | 579 | sg_init_table(&sg, 1); |
582 | sg.offset = frag->page_offset + offset-start; | 580 | sg_set_page(&sg, frag->page, copy, |
583 | sg.length = copy; | 581 | frag->page_offset + offset-start); |
584 | 582 | ||
585 | err = icv_update(desc, &sg, copy); | 583 | err = icv_update(desc, &sg, copy); |
586 | if (unlikely(err)) | 584 | if (unlikely(err)) |
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 83c5e76414ce..59594126e8b6 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile | |||
@@ -4,23 +4,30 @@ | |||
4 | 4 | ||
5 | PHONY += oldconfig xconfig gconfig menuconfig config silentoldconfig update-po-config | 5 | PHONY += oldconfig xconfig gconfig menuconfig config silentoldconfig update-po-config |
6 | 6 | ||
7 | # If a arch/$(SRCARCH)/Kconfig.$(ARCH) file exist use it | ||
8 | ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/Kconfig.$(ARCH)),) | ||
9 | Kconfig := arch/$(SRCARCH)/Kconfig.$(ARCH) | ||
10 | else | ||
11 | Kconfig := arch/$(SRCARCH)/Kconfig | ||
12 | endif | ||
13 | |||
7 | xconfig: $(obj)/qconf | 14 | xconfig: $(obj)/qconf |
8 | $< arch/$(ARCH)/Kconfig | 15 | $< $(Kconfig) |
9 | 16 | ||
10 | gconfig: $(obj)/gconf | 17 | gconfig: $(obj)/gconf |
11 | $< arch/$(ARCH)/Kconfig | 18 | $< $(Kconfig) |
12 | 19 | ||
13 | menuconfig: $(obj)/mconf | 20 | menuconfig: $(obj)/mconf |
14 | $< arch/$(ARCH)/Kconfig | 21 | $< $(Kconfig) |
15 | 22 | ||
16 | config: $(obj)/conf | 23 | config: $(obj)/conf |
17 | $< arch/$(ARCH)/Kconfig | 24 | $< $(Kconfig) |
18 | 25 | ||
19 | oldconfig: $(obj)/conf | 26 | oldconfig: $(obj)/conf |
20 | $< -o arch/$(ARCH)/Kconfig | 27 | $< -o $(Kconfig) |
21 | 28 | ||
22 | silentoldconfig: $(obj)/conf | 29 | silentoldconfig: $(obj)/conf |
23 | $< -s arch/$(ARCH)/Kconfig | 30 | $< -s $(Kconfig) |
24 | 31 | ||
25 | # Create new linux.po file | 32 | # Create new linux.po file |
26 | # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files | 33 | # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files |
@@ -45,27 +52,27 @@ update-po-config: $(obj)/kxgettext | |||
45 | PHONY += randconfig allyesconfig allnoconfig allmodconfig defconfig | 52 | PHONY += randconfig allyesconfig allnoconfig allmodconfig defconfig |
46 | 53 | ||
47 | randconfig: $(obj)/conf | 54 | randconfig: $(obj)/conf |
48 | $< -r arch/$(ARCH)/Kconfig | 55 | $< -r $(Kconfig) |
49 | 56 | ||
50 | allyesconfig: $(obj)/conf | 57 | allyesconfig: $(obj)/conf |
51 | $< -y arch/$(ARCH)/Kconfig | 58 | $< -y $(Kconfig) |
52 | 59 | ||
53 | allnoconfig: $(obj)/conf | 60 | allnoconfig: $(obj)/conf |
54 | $< -n arch/$(ARCH)/Kconfig | 61 | $< -n $(Kconfig) |
55 | 62 | ||
56 | allmodconfig: $(obj)/conf | 63 | allmodconfig: $(obj)/conf |
57 | $< -m arch/$(ARCH)/Kconfig | 64 | $< -m $(Kconfig) |
58 | 65 | ||
59 | defconfig: $(obj)/conf | 66 | defconfig: $(obj)/conf |
60 | ifeq ($(KBUILD_DEFCONFIG),) | 67 | ifeq ($(KBUILD_DEFCONFIG),) |
61 | $< -d arch/$(ARCH)/Kconfig | 68 | $< -d $(Kconfig) |
62 | else | 69 | else |
63 | @echo *** Default configuration is based on '$(KBUILD_DEFCONFIG)' | 70 | @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'" |
64 | $(Q)$< -D arch/$(ARCH)/configs/$(KBUILD_DEFCONFIG) arch/$(ARCH)/Kconfig | 71 | $(Q)$< -D arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig) |
65 | endif | 72 | endif |
66 | 73 | ||
67 | %_defconfig: $(obj)/conf | 74 | %_defconfig: $(obj)/conf |
68 | $(Q)$< -D arch/$(ARCH)/configs/$@ arch/$(ARCH)/Kconfig | 75 | $(Q)$< -D arch/$(SRCARCH)/configs/$@ $(Kconfig) |
69 | 76 | ||
70 | # Help text used by make help | 77 | # Help text used by make help |
71 | help: | 78 | help: |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d9f78c809ee9..1c502789cc1e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -9299,7 +9299,6 @@ static struct alc_config_preset alc268_presets[] = { | |||
9299 | .num_channel_mode = ARRAY_SIZE(alc268_modes), | 9299 | .num_channel_mode = ARRAY_SIZE(alc268_modes), |
9300 | .channel_mode = alc268_modes, | 9300 | .channel_mode = alc268_modes, |
9301 | .input_mux = &alc268_capture_source, | 9301 | .input_mux = &alc268_capture_source, |
9302 | .input_mux = &alc268_capture_source, | ||
9303 | .unsol_event = alc268_toshiba_unsol_event, | 9302 | .unsol_event = alc268_toshiba_unsol_event, |
9304 | .init_hook = alc268_toshiba_automute, | 9303 | .init_hook = alc268_toshiba_automute, |
9305 | }, | 9304 | }, |