diff options
175 files changed, 10125 insertions, 8146 deletions
@@ -2006,6 +2006,9 @@ E: paul@laufernet.com | |||
2006 | D: Soundblaster driver fixes, ISAPnP quirk | 2006 | D: Soundblaster driver fixes, ISAPnP quirk |
2007 | S: California, USA | 2007 | S: California, USA |
2008 | 2008 | ||
2009 | N: Jonathan Layes | ||
2010 | D: ARPD support | ||
2011 | |||
2009 | N: Tom Lees | 2012 | N: Tom Lees |
2010 | E: tom@lpsg.demon.co.uk | 2013 | E: tom@lpsg.demon.co.uk |
2011 | W: http://www.lpsg.demon.co.uk/ | 2014 | W: http://www.lpsg.demon.co.uk/ |
@@ -3802,6 +3805,9 @@ S: van Bronckhorststraat 12 | |||
3802 | S: 2612 XV Delft | 3805 | S: 2612 XV Delft |
3803 | S: The Netherlands | 3806 | S: The Netherlands |
3804 | 3807 | ||
3808 | N: Thomas Woller | ||
3809 | D: CS461x Cirrus Logic sound driver | ||
3810 | |||
3805 | N: David Woodhouse | 3811 | N: David Woodhouse |
3806 | E: dwmw2@infradead.org | 3812 | E: dwmw2@infradead.org |
3807 | D: JFFS2 file system, Memory Technology Device subsystem, | 3813 | D: JFFS2 file system, Memory Technology Device subsystem, |
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 7e81e37c0b1e..b245d524d568 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt | |||
@@ -23,7 +23,8 @@ interface. | |||
23 | Using sysfs | 23 | Using sysfs |
24 | ~~~~~~~~~~~ | 24 | ~~~~~~~~~~~ |
25 | 25 | ||
26 | sysfs is always compiled in. You can access it by doing: | 26 | sysfs is always compiled in if CONFIG_SYSFS is defined. You can access |
27 | it by doing: | ||
27 | 28 | ||
28 | mount -t sysfs sysfs /sys | 29 | mount -t sysfs sysfs /sys |
29 | 30 | ||
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 9ebcd6ef361b..950cde6d6e58 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c | |||
@@ -1,7 +1,9 @@ | |||
1 | /*P:100 This is the Launcher code, a simple program which lays out the | 1 | /*P:100 |
2 | * "physical" memory for the new Guest by mapping the kernel image and | 2 | * This is the Launcher code, a simple program which lays out the "physical" |
3 | * the virtual devices, then opens /dev/lguest to tell the kernel | 3 | * memory for the new Guest by mapping the kernel image and the virtual |
4 | * about the Guest and control it. :*/ | 4 | * devices, then opens /dev/lguest to tell the kernel about the Guest and |
5 | * control it. | ||
6 | :*/ | ||
5 | #define _LARGEFILE64_SOURCE | 7 | #define _LARGEFILE64_SOURCE |
6 | #define _GNU_SOURCE | 8 | #define _GNU_SOURCE |
7 | #include <stdio.h> | 9 | #include <stdio.h> |
@@ -46,13 +48,15 @@ | |||
46 | #include "linux/virtio_rng.h" | 48 | #include "linux/virtio_rng.h" |
47 | #include "linux/virtio_ring.h" | 49 | #include "linux/virtio_ring.h" |
48 | #include "asm/bootparam.h" | 50 | #include "asm/bootparam.h" |
49 | /*L:110 We can ignore the 39 include files we need for this program, but I do | 51 | /*L:110 |
50 | * want to draw attention to the use of kernel-style types. | 52 | * We can ignore the 42 include files we need for this program, but I do want |
53 | * to draw attention to the use of kernel-style types. | ||
51 | * | 54 | * |
52 | * As Linus said, "C is a Spartan language, and so should your naming be." I | 55 | * As Linus said, "C is a Spartan language, and so should your naming be." I |
53 | * like these abbreviations, so we define them here. Note that u64 is always | 56 | * like these abbreviations, so we define them here. Note that u64 is always |
54 | * unsigned long long, which works on all Linux systems: this means that we can | 57 | * unsigned long long, which works on all Linux systems: this means that we can |
55 | * use %llu in printf for any u64. */ | 58 | * use %llu in printf for any u64. |
59 | */ | ||
56 | typedef unsigned long long u64; | 60 | typedef unsigned long long u64; |
57 | typedef uint32_t u32; | 61 | typedef uint32_t u32; |
58 | typedef uint16_t u16; | 62 | typedef uint16_t u16; |
@@ -69,8 +73,10 @@ typedef uint8_t u8; | |||
69 | /* This will occupy 3 pages: it must be a power of 2. */ | 73 | /* This will occupy 3 pages: it must be a power of 2. */ |
70 | #define VIRTQUEUE_NUM 256 | 74 | #define VIRTQUEUE_NUM 256 |
71 | 75 | ||
72 | /*L:120 verbose is both a global flag and a macro. The C preprocessor allows | 76 | /*L:120 |
73 | * this, and although I wouldn't recommend it, it works quite nicely here. */ | 77 | * verbose is both a global flag and a macro. The C preprocessor allows |
78 | * this, and although I wouldn't recommend it, it works quite nicely here. | ||
79 | */ | ||
74 | static bool verbose; | 80 | static bool verbose; |
75 | #define verbose(args...) \ | 81 | #define verbose(args...) \ |
76 | do { if (verbose) printf(args); } while(0) | 82 | do { if (verbose) printf(args); } while(0) |
@@ -87,8 +93,7 @@ static int lguest_fd; | |||
87 | static unsigned int __thread cpu_id; | 93 | static unsigned int __thread cpu_id; |
88 | 94 | ||
89 | /* This is our list of devices. */ | 95 | /* This is our list of devices. */ |
90 | struct device_list | 96 | struct device_list { |
91 | { | ||
92 | /* Counter to assign interrupt numbers. */ | 97 | /* Counter to assign interrupt numbers. */ |
93 | unsigned int next_irq; | 98 | unsigned int next_irq; |
94 | 99 | ||
@@ -100,8 +105,7 @@ struct device_list | |||
100 | 105 | ||
101 | /* A single linked list of devices. */ | 106 | /* A single linked list of devices. */ |
102 | struct device *dev; | 107 | struct device *dev; |
103 | /* And a pointer to the last device for easy append and also for | 108 | /* And a pointer to the last device for easy append. */ |
104 | * configuration appending. */ | ||
105 | struct device *lastdev; | 109 | struct device *lastdev; |
106 | }; | 110 | }; |
107 | 111 | ||
@@ -109,8 +113,7 @@ struct device_list | |||
109 | static struct device_list devices; | 113 | static struct device_list devices; |
110 | 114 | ||
111 | /* The device structure describes a single device. */ | 115 | /* The device structure describes a single device. */ |
112 | struct device | 116 | struct device { |
113 | { | ||
114 | /* The linked-list pointer. */ | 117 | /* The linked-list pointer. */ |
115 | struct device *next; | 118 | struct device *next; |
116 | 119 | ||
@@ -135,8 +138,7 @@ struct device | |||
135 | }; | 138 | }; |
136 | 139 | ||
137 | /* The virtqueue structure describes a queue attached to a device. */ | 140 | /* The virtqueue structure describes a queue attached to a device. */ |
138 | struct virtqueue | 141 | struct virtqueue { |
139 | { | ||
140 | struct virtqueue *next; | 142 | struct virtqueue *next; |
141 | 143 | ||
142 | /* Which device owns me. */ | 144 | /* Which device owns me. */ |
@@ -168,20 +170,24 @@ static char **main_args; | |||
168 | /* The original tty settings to restore on exit. */ | 170 | /* The original tty settings to restore on exit. */ |
169 | static struct termios orig_term; | 171 | static struct termios orig_term; |
170 | 172 | ||
171 | /* We have to be careful with barriers: our devices are all run in separate | 173 | /* |
174 | * We have to be careful with barriers: our devices are all run in separate | ||
172 | * threads and so we need to make sure that changes visible to the Guest happen | 175 | * threads and so we need to make sure that changes visible to the Guest happen |
173 | * in precise order. */ | 176 | * in precise order. |
177 | */ | ||
174 | #define wmb() __asm__ __volatile__("" : : : "memory") | 178 | #define wmb() __asm__ __volatile__("" : : : "memory") |
175 | #define mb() __asm__ __volatile__("" : : : "memory") | 179 | #define mb() __asm__ __volatile__("" : : : "memory") |
176 | 180 | ||
177 | /* Convert an iovec element to the given type. | 181 | /* |
182 | * Convert an iovec element to the given type. | ||
178 | * | 183 | * |
179 | * This is a fairly ugly trick: we need to know the size of the type and | 184 | * This is a fairly ugly trick: we need to know the size of the type and |
180 | * alignment requirement to check the pointer is kosher. It's also nice to | 185 | * alignment requirement to check the pointer is kosher. It's also nice to |
181 | * have the name of the type in case we report failure. | 186 | * have the name of the type in case we report failure. |
182 | * | 187 | * |
183 | * Typing those three things all the time is cumbersome and error prone, so we | 188 | * Typing those three things all the time is cumbersome and error prone, so we |
184 | * have a macro which sets them all up and passes to the real function. */ | 189 | * have a macro which sets them all up and passes to the real function. |
190 | */ | ||
185 | #define convert(iov, type) \ | 191 | #define convert(iov, type) \ |
186 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) | 192 | ((type *)_convert((iov), sizeof(type), __alignof__(type), #type)) |
187 | 193 | ||
@@ -198,8 +204,10 @@ static void *_convert(struct iovec *iov, size_t size, size_t align, | |||
198 | /* Wrapper for the last available index. Makes it easier to change. */ | 204 | /* Wrapper for the last available index. Makes it easier to change. */ |
199 | #define lg_last_avail(vq) ((vq)->last_avail_idx) | 205 | #define lg_last_avail(vq) ((vq)->last_avail_idx) |
200 | 206 | ||
201 | /* The virtio configuration space is defined to be little-endian. x86 is | 207 | /* |
202 | * little-endian too, but it's nice to be explicit so we have these helpers. */ | 208 | * The virtio configuration space is defined to be little-endian. x86 is |
209 | * little-endian too, but it's nice to be explicit so we have these helpers. | ||
210 | */ | ||
203 | #define cpu_to_le16(v16) (v16) | 211 | #define cpu_to_le16(v16) (v16) |
204 | #define cpu_to_le32(v32) (v32) | 212 | #define cpu_to_le32(v32) (v32) |
205 | #define cpu_to_le64(v64) (v64) | 213 | #define cpu_to_le64(v64) (v64) |
@@ -241,11 +249,12 @@ static u8 *get_feature_bits(struct device *dev) | |||
241 | + dev->num_vq * sizeof(struct lguest_vqconfig); | 249 | + dev->num_vq * sizeof(struct lguest_vqconfig); |
242 | } | 250 | } |
243 | 251 | ||
244 | /*L:100 The Launcher code itself takes us out into userspace, that scary place | 252 | /*L:100 |
245 | * where pointers run wild and free! Unfortunately, like most userspace | 253 | * The Launcher code itself takes us out into userspace, that scary place where |
246 | * programs, it's quite boring (which is why everyone likes to hack on the | 254 | * pointers run wild and free! Unfortunately, like most userspace programs, |
247 | * kernel!). Perhaps if you make up an Lguest Drinking Game at this point, it | 255 | * it's quite boring (which is why everyone likes to hack on the kernel!). |
248 | * will get you through this section. Or, maybe not. | 256 | * Perhaps if you make up an Lguest Drinking Game at this point, it will get |
257 | * you through this section. Or, maybe not. | ||
249 | * | 258 | * |
250 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" | 259 | * The Launcher sets up a big chunk of memory to be the Guest's "physical" |
251 | * memory and stores it in "guest_base". In other words, Guest physical == | 260 | * memory and stores it in "guest_base". In other words, Guest physical == |
@@ -253,7 +262,8 @@ static u8 *get_feature_bits(struct device *dev) | |||
253 | * | 262 | * |
254 | * This can be tough to get your head around, but usually it just means that we | 263 | * This can be tough to get your head around, but usually it just means that we |
255 | * use these trivial conversion functions when the Guest gives us it's | 264 | * use these trivial conversion functions when the Guest gives us it's |
256 | * "physical" addresses: */ | 265 | * "physical" addresses: |
266 | */ | ||
257 | static void *from_guest_phys(unsigned long addr) | 267 | static void *from_guest_phys(unsigned long addr) |
258 | { | 268 | { |
259 | return guest_base + addr; | 269 | return guest_base + addr; |
@@ -268,7 +278,8 @@ static unsigned long to_guest_phys(const void *addr) | |||
268 | * Loading the Kernel. | 278 | * Loading the Kernel. |
269 | * | 279 | * |
270 | * We start with couple of simple helper routines. open_or_die() avoids | 280 | * We start with couple of simple helper routines. open_or_die() avoids |
271 | * error-checking code cluttering the callers: */ | 281 | * error-checking code cluttering the callers: |
282 | */ | ||
272 | static int open_or_die(const char *name, int flags) | 283 | static int open_or_die(const char *name, int flags) |
273 | { | 284 | { |
274 | int fd = open(name, flags); | 285 | int fd = open(name, flags); |
@@ -283,12 +294,19 @@ static void *map_zeroed_pages(unsigned int num) | |||
283 | int fd = open_or_die("/dev/zero", O_RDONLY); | 294 | int fd = open_or_die("/dev/zero", O_RDONLY); |
284 | void *addr; | 295 | void *addr; |
285 | 296 | ||
286 | /* We use a private mapping (ie. if we write to the page, it will be | 297 | /* |
287 | * copied). */ | 298 | * We use a private mapping (ie. if we write to the page, it will be |
299 | * copied). | ||
300 | */ | ||
288 | addr = mmap(NULL, getpagesize() * num, | 301 | addr = mmap(NULL, getpagesize() * num, |
289 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); | 302 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); |
290 | if (addr == MAP_FAILED) | 303 | if (addr == MAP_FAILED) |
291 | err(1, "Mmaping %u pages of /dev/zero", num); | 304 | err(1, "Mmaping %u pages of /dev/zero", num); |
305 | |||
306 | /* | ||
307 | * One neat mmap feature is that you can close the fd, and it | ||
308 | * stays mapped. | ||
309 | */ | ||
292 | close(fd); | 310 | close(fd); |
293 | 311 | ||
294 | return addr; | 312 | return addr; |
@@ -305,20 +323,24 @@ static void *get_pages(unsigned int num) | |||
305 | return addr; | 323 | return addr; |
306 | } | 324 | } |
307 | 325 | ||
308 | /* This routine is used to load the kernel or initrd. It tries mmap, but if | 326 | /* |
327 | * This routine is used to load the kernel or initrd. It tries mmap, but if | ||
309 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), | 328 | * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), |
310 | * it falls back to reading the memory in. */ | 329 | * it falls back to reading the memory in. |
330 | */ | ||
311 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | 331 | static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) |
312 | { | 332 | { |
313 | ssize_t r; | 333 | ssize_t r; |
314 | 334 | ||
315 | /* We map writable even though for some segments are marked read-only. | 335 | /* |
336 | * We map writable even though for some segments are marked read-only. | ||
316 | * The kernel really wants to be writable: it patches its own | 337 | * The kernel really wants to be writable: it patches its own |
317 | * instructions. | 338 | * instructions. |
318 | * | 339 | * |
319 | * MAP_PRIVATE means that the page won't be copied until a write is | 340 | * MAP_PRIVATE means that the page won't be copied until a write is |
320 | * done to it. This allows us to share untouched memory between | 341 | * done to it. This allows us to share untouched memory between |
321 | * Guests. */ | 342 | * Guests. |
343 | */ | ||
322 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, | 344 | if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC, |
323 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) | 345 | MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED) |
324 | return; | 346 | return; |
@@ -329,7 +351,8 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
329 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); | 351 | err(1, "Reading offset %lu len %lu gave %zi", offset, len, r); |
330 | } | 352 | } |
331 | 353 | ||
332 | /* This routine takes an open vmlinux image, which is in ELF, and maps it into | 354 | /* |
355 | * This routine takes an open vmlinux image, which is in ELF, and maps it into | ||
333 | * the Guest memory. ELF = Embedded Linking Format, which is the format used | 356 | * the Guest memory. ELF = Embedded Linking Format, which is the format used |
334 | * by all modern binaries on Linux including the kernel. | 357 | * by all modern binaries on Linux including the kernel. |
335 | * | 358 | * |
@@ -337,23 +360,28 @@ static void map_at(int fd, void *addr, unsigned long offset, unsigned long len) | |||
337 | * address. We use the physical address; the Guest will map itself to the | 360 | * address. We use the physical address; the Guest will map itself to the |
338 | * virtual address. | 361 | * virtual address. |
339 | * | 362 | * |
340 | * We return the starting address. */ | 363 | * We return the starting address. |
364 | */ | ||
341 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | 365 | static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) |
342 | { | 366 | { |
343 | Elf32_Phdr phdr[ehdr->e_phnum]; | 367 | Elf32_Phdr phdr[ehdr->e_phnum]; |
344 | unsigned int i; | 368 | unsigned int i; |
345 | 369 | ||
346 | /* Sanity checks on the main ELF header: an x86 executable with a | 370 | /* |
347 | * reasonable number of correctly-sized program headers. */ | 371 | * Sanity checks on the main ELF header: an x86 executable with a |
372 | * reasonable number of correctly-sized program headers. | ||
373 | */ | ||
348 | if (ehdr->e_type != ET_EXEC | 374 | if (ehdr->e_type != ET_EXEC |
349 | || ehdr->e_machine != EM_386 | 375 | || ehdr->e_machine != EM_386 |
350 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) | 376 | || ehdr->e_phentsize != sizeof(Elf32_Phdr) |
351 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) | 377 | || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr)) |
352 | errx(1, "Malformed elf header"); | 378 | errx(1, "Malformed elf header"); |
353 | 379 | ||
354 | /* An ELF executable contains an ELF header and a number of "program" | 380 | /* |
381 | * An ELF executable contains an ELF header and a number of "program" | ||
355 | * headers which indicate which parts ("segments") of the program to | 382 | * headers which indicate which parts ("segments") of the program to |
356 | * load where. */ | 383 | * load where. |
384 | */ | ||
357 | 385 | ||
358 | /* We read in all the program headers at once: */ | 386 | /* We read in all the program headers at once: */ |
359 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) | 387 | if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0) |
@@ -361,8 +389,10 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
361 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) | 389 | if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr)) |
362 | err(1, "Reading program headers"); | 390 | err(1, "Reading program headers"); |
363 | 391 | ||
364 | /* Try all the headers: there are usually only three. A read-only one, | 392 | /* |
365 | * a read-write one, and a "note" section which we don't load. */ | 393 | * Try all the headers: there are usually only three. A read-only one, |
394 | * a read-write one, and a "note" section which we don't load. | ||
395 | */ | ||
366 | for (i = 0; i < ehdr->e_phnum; i++) { | 396 | for (i = 0; i < ehdr->e_phnum; i++) { |
367 | /* If this isn't a loadable segment, we ignore it */ | 397 | /* If this isn't a loadable segment, we ignore it */ |
368 | if (phdr[i].p_type != PT_LOAD) | 398 | if (phdr[i].p_type != PT_LOAD) |
@@ -380,13 +410,15 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr) | |||
380 | return ehdr->e_entry; | 410 | return ehdr->e_entry; |
381 | } | 411 | } |
382 | 412 | ||
383 | /*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're | 413 | /*L:150 |
384 | * supposed to jump into it and it will unpack itself. We used to have to | 414 | * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed |
385 | * perform some hairy magic because the unpacking code scared me. | 415 | * to jump into it and it will unpack itself. We used to have to perform some |
416 | * hairy magic because the unpacking code scared me. | ||
386 | * | 417 | * |
387 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote | 418 | * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote |
388 | * a small patch to jump over the tricky bits in the Guest, so now we just read | 419 | * a small patch to jump over the tricky bits in the Guest, so now we just read |
389 | * the funky header so we know where in the file to load, and away we go! */ | 420 | * the funky header so we know where in the file to load, and away we go! |
421 | */ | ||
390 | static unsigned long load_bzimage(int fd) | 422 | static unsigned long load_bzimage(int fd) |
391 | { | 423 | { |
392 | struct boot_params boot; | 424 | struct boot_params boot; |
@@ -394,8 +426,10 @@ static unsigned long load_bzimage(int fd) | |||
394 | /* Modern bzImages get loaded at 1M. */ | 426 | /* Modern bzImages get loaded at 1M. */ |
395 | void *p = from_guest_phys(0x100000); | 427 | void *p = from_guest_phys(0x100000); |
396 | 428 | ||
397 | /* Go back to the start of the file and read the header. It should be | 429 | /* |
398 | * a Linux boot header (see Documentation/x86/i386/boot.txt) */ | 430 | * Go back to the start of the file and read the header. It should be |
431 | * a Linux boot header (see Documentation/x86/i386/boot.txt) | ||
432 | */ | ||
399 | lseek(fd, 0, SEEK_SET); | 433 | lseek(fd, 0, SEEK_SET); |
400 | read(fd, &boot, sizeof(boot)); | 434 | read(fd, &boot, sizeof(boot)); |
401 | 435 | ||
@@ -414,9 +448,11 @@ static unsigned long load_bzimage(int fd) | |||
414 | return boot.hdr.code32_start; | 448 | return boot.hdr.code32_start; |
415 | } | 449 | } |
416 | 450 | ||
417 | /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels | 451 | /*L:140 |
452 | * Loading the kernel is easy when it's a "vmlinux", but most kernels | ||
418 | * come wrapped up in the self-decompressing "bzImage" format. With a little | 453 | * come wrapped up in the self-decompressing "bzImage" format. With a little |
419 | * work, we can load those, too. */ | 454 | * work, we can load those, too. |
455 | */ | ||
420 | static unsigned long load_kernel(int fd) | 456 | static unsigned long load_kernel(int fd) |
421 | { | 457 | { |
422 | Elf32_Ehdr hdr; | 458 | Elf32_Ehdr hdr; |
@@ -433,24 +469,28 @@ static unsigned long load_kernel(int fd) | |||
433 | return load_bzimage(fd); | 469 | return load_bzimage(fd); |
434 | } | 470 | } |
435 | 471 | ||
436 | /* This is a trivial little helper to align pages. Andi Kleen hated it because | 472 | /* |
473 | * This is a trivial little helper to align pages. Andi Kleen hated it because | ||
437 | * it calls getpagesize() twice: "it's dumb code." | 474 | * it calls getpagesize() twice: "it's dumb code." |
438 | * | 475 | * |
439 | * Kernel guys get really het up about optimization, even when it's not | 476 | * Kernel guys get really het up about optimization, even when it's not |
440 | * necessary. I leave this code as a reaction against that. */ | 477 | * necessary. I leave this code as a reaction against that. |
478 | */ | ||
441 | static inline unsigned long page_align(unsigned long addr) | 479 | static inline unsigned long page_align(unsigned long addr) |
442 | { | 480 | { |
443 | /* Add upwards and truncate downwards. */ | 481 | /* Add upwards and truncate downwards. */ |
444 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); | 482 | return ((addr + getpagesize()-1) & ~(getpagesize()-1)); |
445 | } | 483 | } |
446 | 484 | ||
447 | /*L:180 An "initial ram disk" is a disk image loaded into memory along with | 485 | /*L:180 |
448 | * the kernel which the kernel can use to boot from without needing any | 486 | * An "initial ram disk" is a disk image loaded into memory along with the |
449 | * drivers. Most distributions now use this as standard: the initrd contains | 487 | * kernel which the kernel can use to boot from without needing any drivers. |
450 | * the code to load the appropriate driver modules for the current machine. | 488 | * Most distributions now use this as standard: the initrd contains the code to |
489 | * load the appropriate driver modules for the current machine. | ||
451 | * | 490 | * |
452 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its | 491 | * Importantly, James Morris works for RedHat, and Fedora uses initrds for its |
453 | * kernels. He sent me this (and tells me when I break it). */ | 492 | * kernels. He sent me this (and tells me when I break it). |
493 | */ | ||
454 | static unsigned long load_initrd(const char *name, unsigned long mem) | 494 | static unsigned long load_initrd(const char *name, unsigned long mem) |
455 | { | 495 | { |
456 | int ifd; | 496 | int ifd; |
@@ -462,12 +502,16 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
462 | if (fstat(ifd, &st) < 0) | 502 | if (fstat(ifd, &st) < 0) |
463 | err(1, "fstat() on initrd '%s'", name); | 503 | err(1, "fstat() on initrd '%s'", name); |
464 | 504 | ||
465 | /* We map the initrd at the top of memory, but mmap wants it to be | 505 | /* |
466 | * page-aligned, so we round the size up for that. */ | 506 | * We map the initrd at the top of memory, but mmap wants it to be |
507 | * page-aligned, so we round the size up for that. | ||
508 | */ | ||
467 | len = page_align(st.st_size); | 509 | len = page_align(st.st_size); |
468 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); | 510 | map_at(ifd, from_guest_phys(mem - len), 0, st.st_size); |
469 | /* Once a file is mapped, you can close the file descriptor. It's a | 511 | /* |
470 | * little odd, but quite useful. */ | 512 | * Once a file is mapped, you can close the file descriptor. It's a |
513 | * little odd, but quite useful. | ||
514 | */ | ||
471 | close(ifd); | 515 | close(ifd); |
472 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); | 516 | verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len); |
473 | 517 | ||
@@ -476,8 +520,10 @@ static unsigned long load_initrd(const char *name, unsigned long mem) | |||
476 | } | 520 | } |
477 | /*:*/ | 521 | /*:*/ |
478 | 522 | ||
479 | /* Simple routine to roll all the commandline arguments together with spaces | 523 | /* |
480 | * between them. */ | 524 | * Simple routine to roll all the commandline arguments together with spaces |
525 | * between them. | ||
526 | */ | ||
481 | static void concat(char *dst, char *args[]) | 527 | static void concat(char *dst, char *args[]) |
482 | { | 528 | { |
483 | unsigned int i, len = 0; | 529 | unsigned int i, len = 0; |
@@ -494,10 +540,12 @@ static void concat(char *dst, char *args[]) | |||
494 | dst[len] = '\0'; | 540 | dst[len] = '\0'; |
495 | } | 541 | } |
496 | 542 | ||
497 | /*L:185 This is where we actually tell the kernel to initialize the Guest. We | 543 | /*L:185 |
544 | * This is where we actually tell the kernel to initialize the Guest. We | ||
498 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: | 545 | * saw the arguments it expects when we looked at initialize() in lguest_user.c: |
499 | * the base of Guest "physical" memory, the top physical page to allow and the | 546 | * the base of Guest "physical" memory, the top physical page to allow and the |
500 | * entry point for the Guest. */ | 547 | * entry point for the Guest. |
548 | */ | ||
501 | static void tell_kernel(unsigned long start) | 549 | static void tell_kernel(unsigned long start) |
502 | { | 550 | { |
503 | unsigned long args[] = { LHREQ_INITIALIZE, | 551 | unsigned long args[] = { LHREQ_INITIALIZE, |
@@ -511,7 +559,7 @@ static void tell_kernel(unsigned long start) | |||
511 | } | 559 | } |
512 | /*:*/ | 560 | /*:*/ |
513 | 561 | ||
514 | /* | 562 | /*L:200 |
515 | * Device Handling. | 563 | * Device Handling. |
516 | * | 564 | * |
517 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. | 565 | * When the Guest gives us a buffer, it sends an array of addresses and sizes. |
@@ -522,20 +570,26 @@ static void tell_kernel(unsigned long start) | |||
522 | static void *_check_pointer(unsigned long addr, unsigned int size, | 570 | static void *_check_pointer(unsigned long addr, unsigned int size, |
523 | unsigned int line) | 571 | unsigned int line) |
524 | { | 572 | { |
525 | /* We have to separately check addr and addr+size, because size could | 573 | /* |
526 | * be huge and addr + size might wrap around. */ | 574 | * We have to separately check addr and addr+size, because size could |
575 | * be huge and addr + size might wrap around. | ||
576 | */ | ||
527 | if (addr >= guest_limit || addr + size >= guest_limit) | 577 | if (addr >= guest_limit || addr + size >= guest_limit) |
528 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); | 578 | errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); |
529 | /* We return a pointer for the caller's convenience, now we know it's | 579 | /* |
530 | * safe to use. */ | 580 | * We return a pointer for the caller's convenience, now we know it's |
581 | * safe to use. | ||
582 | */ | ||
531 | return from_guest_phys(addr); | 583 | return from_guest_phys(addr); |
532 | } | 584 | } |
533 | /* A macro which transparently hands the line number to the real function. */ | 585 | /* A macro which transparently hands the line number to the real function. */ |
534 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) | 586 | #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) |
535 | 587 | ||
536 | /* Each buffer in the virtqueues is actually a chain of descriptors. This | 588 | /* |
589 | * Each buffer in the virtqueues is actually a chain of descriptors. This | ||
537 | * function returns the next descriptor in the chain, or vq->vring.num if we're | 590 | * function returns the next descriptor in the chain, or vq->vring.num if we're |
538 | * at the end. */ | 591 | * at the end. |
592 | */ | ||
539 | static unsigned next_desc(struct vring_desc *desc, | 593 | static unsigned next_desc(struct vring_desc *desc, |
540 | unsigned int i, unsigned int max) | 594 | unsigned int i, unsigned int max) |
541 | { | 595 | { |
@@ -556,7 +610,10 @@ static unsigned next_desc(struct vring_desc *desc, | |||
556 | return next; | 610 | return next; |
557 | } | 611 | } |
558 | 612 | ||
559 | /* This actually sends the interrupt for this virtqueue */ | 613 | /* |
614 | * This actually sends the interrupt for this virtqueue, if we've used a | ||
615 | * buffer. | ||
616 | */ | ||
560 | static void trigger_irq(struct virtqueue *vq) | 617 | static void trigger_irq(struct virtqueue *vq) |
561 | { | 618 | { |
562 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; | 619 | unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; |
@@ -576,12 +633,14 @@ static void trigger_irq(struct virtqueue *vq) | |||
576 | err(1, "Triggering irq %i", vq->config.irq); | 633 | err(1, "Triggering irq %i", vq->config.irq); |
577 | } | 634 | } |
578 | 635 | ||
579 | /* This looks in the virtqueue and for the first available buffer, and converts | 636 | /* |
637 | * This looks in the virtqueue for the first available buffer, and converts | ||
580 | * it to an iovec for convenient access. Since descriptors consist of some | 638 | * it to an iovec for convenient access. Since descriptors consist of some |
581 | * number of output then some number of input descriptors, it's actually two | 639 | * number of output then some number of input descriptors, it's actually two |
582 | * iovecs, but we pack them into one and note how many of each there were. | 640 | * iovecs, but we pack them into one and note how many of each there were. |
583 | * | 641 | * |
584 | * This function returns the descriptor number found. */ | 642 | * This function waits if necessary, and returns the descriptor number found. |
643 | */ | ||
585 | static unsigned wait_for_vq_desc(struct virtqueue *vq, | 644 | static unsigned wait_for_vq_desc(struct virtqueue *vq, |
586 | struct iovec iov[], | 645 | struct iovec iov[], |
587 | unsigned int *out_num, unsigned int *in_num) | 646 | unsigned int *out_num, unsigned int *in_num) |
@@ -590,17 +649,23 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
590 | struct vring_desc *desc; | 649 | struct vring_desc *desc; |
591 | u16 last_avail = lg_last_avail(vq); | 650 | u16 last_avail = lg_last_avail(vq); |
592 | 651 | ||
652 | /* There's nothing available? */ | ||
593 | while (last_avail == vq->vring.avail->idx) { | 653 | while (last_avail == vq->vring.avail->idx) { |
594 | u64 event; | 654 | u64 event; |
595 | 655 | ||
596 | /* OK, tell Guest about progress up to now. */ | 656 | /* |
657 | * Since we're about to sleep, now is a good time to tell the | ||
658 | * Guest about what we've used up to now. | ||
659 | */ | ||
597 | trigger_irq(vq); | 660 | trigger_irq(vq); |
598 | 661 | ||
599 | /* OK, now we need to know about added descriptors. */ | 662 | /* OK, now we need to know about added descriptors. */ |
600 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; | 663 | vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; |
601 | 664 | ||
602 | /* They could have slipped one in as we were doing that: make | 665 | /* |
603 | * sure it's written, then check again. */ | 666 | * They could have slipped one in as we were doing that: make |
667 | * sure it's written, then check again. | ||
668 | */ | ||
604 | mb(); | 669 | mb(); |
605 | if (last_avail != vq->vring.avail->idx) { | 670 | if (last_avail != vq->vring.avail->idx) { |
606 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; | 671 | vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; |
@@ -620,8 +685,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
620 | errx(1, "Guest moved used index from %u to %u", | 685 | errx(1, "Guest moved used index from %u to %u", |
621 | last_avail, vq->vring.avail->idx); | 686 | last_avail, vq->vring.avail->idx); |
622 | 687 | ||
623 | /* Grab the next descriptor number they're advertising, and increment | 688 | /* |
624 | * the index we've seen. */ | 689 | * Grab the next descriptor number they're advertising, and increment |
690 | * the index we've seen. | ||
691 | */ | ||
625 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; | 692 | head = vq->vring.avail->ring[last_avail % vq->vring.num]; |
626 | lg_last_avail(vq)++; | 693 | lg_last_avail(vq)++; |
627 | 694 | ||
@@ -636,8 +703,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
636 | desc = vq->vring.desc; | 703 | desc = vq->vring.desc; |
637 | i = head; | 704 | i = head; |
638 | 705 | ||
639 | /* If this is an indirect entry, then this buffer contains a descriptor | 706 | /* |
640 | * table which we handle as if it's any normal descriptor chain. */ | 707 | * If this is an indirect entry, then this buffer contains a descriptor |
708 | * table which we handle as if it's any normal descriptor chain. | ||
709 | */ | ||
641 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { | 710 | if (desc[i].flags & VRING_DESC_F_INDIRECT) { |
642 | if (desc[i].len % sizeof(struct vring_desc)) | 711 | if (desc[i].len % sizeof(struct vring_desc)) |
643 | errx(1, "Invalid size for indirect buffer table"); | 712 | errx(1, "Invalid size for indirect buffer table"); |
@@ -656,8 +725,10 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
656 | if (desc[i].flags & VRING_DESC_F_WRITE) | 725 | if (desc[i].flags & VRING_DESC_F_WRITE) |
657 | (*in_num)++; | 726 | (*in_num)++; |
658 | else { | 727 | else { |
659 | /* If it's an output descriptor, they're all supposed | 728 | /* |
660 | * to come before any input descriptors. */ | 729 | * If it's an output descriptor, they're all supposed |
730 | * to come before any input descriptors. | ||
731 | */ | ||
661 | if (*in_num) | 732 | if (*in_num) |
662 | errx(1, "Descriptor has out after in"); | 733 | errx(1, "Descriptor has out after in"); |
663 | (*out_num)++; | 734 | (*out_num)++; |
@@ -671,14 +742,19 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, | |||
671 | return head; | 742 | return head; |
672 | } | 743 | } |
673 | 744 | ||
674 | /* After we've used one of their buffers, we tell them about it. We'll then | 745 | /* |
675 | * want to send them an interrupt, using trigger_irq(). */ | 746 | * After we've used one of their buffers, we tell the Guest about it. Sometime |
747 | * later we'll want to send them an interrupt using trigger_irq(); note that | ||
748 | * wait_for_vq_desc() does that for us if it has to wait. | ||
749 | */ | ||
676 | static void add_used(struct virtqueue *vq, unsigned int head, int len) | 750 | static void add_used(struct virtqueue *vq, unsigned int head, int len) |
677 | { | 751 | { |
678 | struct vring_used_elem *used; | 752 | struct vring_used_elem *used; |
679 | 753 | ||
680 | /* The virtqueue contains a ring of used buffers. Get a pointer to the | 754 | /* |
681 | * next entry in that used ring. */ | 755 | * The virtqueue contains a ring of used buffers. Get a pointer to the |
756 | * next entry in that used ring. | ||
757 | */ | ||
682 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; | 758 | used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; |
683 | used->id = head; | 759 | used->id = head; |
684 | used->len = len; | 760 | used->len = len; |
@@ -698,9 +774,9 @@ static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) | |||
698 | /* | 774 | /* |
699 | * The Console | 775 | * The Console |
700 | * | 776 | * |
701 | * We associate some data with the console for our exit hack. */ | 777 | * We associate some data with the console for our exit hack. |
702 | struct console_abort | 778 | */ |
703 | { | 779 | struct console_abort { |
704 | /* How many times have they hit ^C? */ | 780 | /* How many times have they hit ^C? */ |
705 | int count; | 781 | int count; |
706 | /* When did they start? */ | 782 | /* When did they start? */ |
@@ -715,30 +791,35 @@ static void console_input(struct virtqueue *vq) | |||
715 | struct console_abort *abort = vq->dev->priv; | 791 | struct console_abort *abort = vq->dev->priv; |
716 | struct iovec iov[vq->vring.num]; | 792 | struct iovec iov[vq->vring.num]; |
717 | 793 | ||
718 | /* Make sure there's a descriptor waiting. */ | 794 | /* Make sure there's a descriptor available. */ |
719 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 795 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
720 | if (out_num) | 796 | if (out_num) |
721 | errx(1, "Output buffers in console in queue?"); | 797 | errx(1, "Output buffers in console in queue?"); |
722 | 798 | ||
723 | /* Read it in. */ | 799 | /* Read into it. This is where we usually wait. */ |
724 | len = readv(STDIN_FILENO, iov, in_num); | 800 | len = readv(STDIN_FILENO, iov, in_num); |
725 | if (len <= 0) { | 801 | if (len <= 0) { |
726 | /* Ran out of input? */ | 802 | /* Ran out of input? */ |
727 | warnx("Failed to get console input, ignoring console."); | 803 | warnx("Failed to get console input, ignoring console."); |
728 | /* For simplicity, dying threads kill the whole Launcher. So | 804 | /* |
729 | * just nap here. */ | 805 | * For simplicity, dying threads kill the whole Launcher. So |
806 | * just nap here. | ||
807 | */ | ||
730 | for (;;) | 808 | for (;;) |
731 | pause(); | 809 | pause(); |
732 | } | 810 | } |
733 | 811 | ||
812 | /* Tell the Guest we used a buffer. */ | ||
734 | add_used_and_trigger(vq, head, len); | 813 | add_used_and_trigger(vq, head, len); |
735 | 814 | ||
736 | /* Three ^C within one second? Exit. | 815 | /* |
816 | * Three ^C within one second? Exit. | ||
737 | * | 817 | * |
738 | * This is such a hack, but works surprisingly well. Each ^C has to | 818 | * This is such a hack, but works surprisingly well. Each ^C has to |
739 | * be in a buffer by itself, so they can't be too fast. But we check | 819 | * be in a buffer by itself, so they can't be too fast. But we check |
740 | * that we get three within about a second, so they can't be too | 820 | * that we get three within about a second, so they can't be too |
741 | * slow. */ | 821 | * slow. |
822 | */ | ||
742 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { | 823 | if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { |
743 | abort->count = 0; | 824 | abort->count = 0; |
744 | return; | 825 | return; |
@@ -763,15 +844,23 @@ static void console_output(struct virtqueue *vq) | |||
763 | unsigned int head, out, in; | 844 | unsigned int head, out, in; |
764 | struct iovec iov[vq->vring.num]; | 845 | struct iovec iov[vq->vring.num]; |
765 | 846 | ||
847 | /* We usually wait in here, for the Guest to give us something. */ | ||
766 | head = wait_for_vq_desc(vq, iov, &out, &in); | 848 | head = wait_for_vq_desc(vq, iov, &out, &in); |
767 | if (in) | 849 | if (in) |
768 | errx(1, "Input buffers in console output queue?"); | 850 | errx(1, "Input buffers in console output queue?"); |
851 | |||
852 | /* writev can return a partial write, so we loop here. */ | ||
769 | while (!iov_empty(iov, out)) { | 853 | while (!iov_empty(iov, out)) { |
770 | int len = writev(STDOUT_FILENO, iov, out); | 854 | int len = writev(STDOUT_FILENO, iov, out); |
771 | if (len <= 0) | 855 | if (len <= 0) |
772 | err(1, "Write to stdout gave %i", len); | 856 | err(1, "Write to stdout gave %i", len); |
773 | iov_consume(iov, out, len); | 857 | iov_consume(iov, out, len); |
774 | } | 858 | } |
859 | |||
860 | /* | ||
861 | * We're finished with that buffer: if we're going to sleep, | ||
862 | * wait_for_vq_desc() will prod the Guest with an interrupt. | ||
863 | */ | ||
775 | add_used(vq, head, 0); | 864 | add_used(vq, head, 0); |
776 | } | 865 | } |
777 | 866 | ||
@@ -791,15 +880,30 @@ static void net_output(struct virtqueue *vq) | |||
791 | unsigned int head, out, in; | 880 | unsigned int head, out, in; |
792 | struct iovec iov[vq->vring.num]; | 881 | struct iovec iov[vq->vring.num]; |
793 | 882 | ||
883 | /* We usually wait in here for the Guest to give us a packet. */ | ||
794 | head = wait_for_vq_desc(vq, iov, &out, &in); | 884 | head = wait_for_vq_desc(vq, iov, &out, &in); |
795 | if (in) | 885 | if (in) |
796 | errx(1, "Input buffers in net output queue?"); | 886 | errx(1, "Input buffers in net output queue?"); |
887 | /* | ||
888 | * Send the whole thing through to /dev/net/tun. It expects the exact | ||
889 | * same format: what a coincidence! | ||
890 | */ | ||
797 | if (writev(net_info->tunfd, iov, out) < 0) | 891 | if (writev(net_info->tunfd, iov, out) < 0) |
798 | errx(1, "Write to tun failed?"); | 892 | errx(1, "Write to tun failed?"); |
893 | |||
894 | /* | ||
895 | * Done with that one; wait_for_vq_desc() will send the interrupt if | ||
896 | * all packets are processed. | ||
897 | */ | ||
799 | add_used(vq, head, 0); | 898 | add_used(vq, head, 0); |
800 | } | 899 | } |
801 | 900 | ||
802 | /* Will reading from this file descriptor block? */ | 901 | /* |
902 | * Handling network input is a bit trickier, because I've tried to optimize it. | ||
903 | * | ||
904 | * First we have a helper routine which tells is if from this file descriptor | ||
905 | * (ie. the /dev/net/tun device) will block: | ||
906 | */ | ||
803 | static bool will_block(int fd) | 907 | static bool will_block(int fd) |
804 | { | 908 | { |
805 | fd_set fdset; | 909 | fd_set fdset; |
@@ -809,8 +913,11 @@ static bool will_block(int fd) | |||
809 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; | 913 | return select(fd+1, &fdset, NULL, NULL, &zero) != 1; |
810 | } | 914 | } |
811 | 915 | ||
812 | /* This is where we handle packets coming in from the tun device to our | 916 | /* |
813 | * Guest. */ | 917 | * This handles packets coming in from the tun device to our Guest. Like all |
918 | * service routines, it gets called again as soon as it returns, so you don't | ||
919 | * see a while(1) loop here. | ||
920 | */ | ||
814 | static void net_input(struct virtqueue *vq) | 921 | static void net_input(struct virtqueue *vq) |
815 | { | 922 | { |
816 | int len; | 923 | int len; |
@@ -818,21 +925,38 @@ static void net_input(struct virtqueue *vq) | |||
818 | struct iovec iov[vq->vring.num]; | 925 | struct iovec iov[vq->vring.num]; |
819 | struct net_info *net_info = vq->dev->priv; | 926 | struct net_info *net_info = vq->dev->priv; |
820 | 927 | ||
928 | /* | ||
929 | * Get a descriptor to write an incoming packet into. This will also | ||
930 | * send an interrupt if they're out of descriptors. | ||
931 | */ | ||
821 | head = wait_for_vq_desc(vq, iov, &out, &in); | 932 | head = wait_for_vq_desc(vq, iov, &out, &in); |
822 | if (out) | 933 | if (out) |
823 | errx(1, "Output buffers in net input queue?"); | 934 | errx(1, "Output buffers in net input queue?"); |
824 | 935 | ||
825 | /* Deliver interrupt now, since we're about to sleep. */ | 936 | /* |
937 | * If it looks like we'll block reading from the tun device, send them | ||
938 | * an interrupt. | ||
939 | */ | ||
826 | if (vq->pending_used && will_block(net_info->tunfd)) | 940 | if (vq->pending_used && will_block(net_info->tunfd)) |
827 | trigger_irq(vq); | 941 | trigger_irq(vq); |
828 | 942 | ||
943 | /* | ||
944 | * Read in the packet. This is where we normally wait (when there's no | ||
945 | * incoming network traffic). | ||
946 | */ | ||
829 | len = readv(net_info->tunfd, iov, in); | 947 | len = readv(net_info->tunfd, iov, in); |
830 | if (len <= 0) | 948 | if (len <= 0) |
831 | err(1, "Failed to read from tun."); | 949 | err(1, "Failed to read from tun."); |
950 | |||
951 | /* | ||
952 | * Mark that packet buffer as used, but don't interrupt here. We want | ||
953 | * to wait until we've done as much work as we can. | ||
954 | */ | ||
832 | add_used(vq, head, len); | 955 | add_used(vq, head, len); |
833 | } | 956 | } |
957 | /*:*/ | ||
834 | 958 | ||
835 | /* This is the helper to create threads. */ | 959 | /* This is the helper to create threads: run the service routine in a loop. */ |
836 | static int do_thread(void *_vq) | 960 | static int do_thread(void *_vq) |
837 | { | 961 | { |
838 | struct virtqueue *vq = _vq; | 962 | struct virtqueue *vq = _vq; |
@@ -842,8 +966,10 @@ static int do_thread(void *_vq) | |||
842 | return 0; | 966 | return 0; |
843 | } | 967 | } |
844 | 968 | ||
845 | /* When a child dies, we kill our entire process group with SIGTERM. This | 969 | /* |
846 | * also has the side effect that the shell restores the console for us! */ | 970 | * When a child dies, we kill our entire process group with SIGTERM. This |
971 | * also has the side effect that the shell restores the console for us! | ||
972 | */ | ||
847 | static void kill_launcher(int signal) | 973 | static void kill_launcher(int signal) |
848 | { | 974 | { |
849 | kill(0, SIGTERM); | 975 | kill(0, SIGTERM); |
@@ -878,11 +1004,15 @@ static void reset_device(struct device *dev) | |||
878 | signal(SIGCHLD, (void *)kill_launcher); | 1004 | signal(SIGCHLD, (void *)kill_launcher); |
879 | } | 1005 | } |
880 | 1006 | ||
1007 | /*L:216 | ||
1008 | * This actually creates the thread which services the virtqueue for a device. | ||
1009 | */ | ||
881 | static void create_thread(struct virtqueue *vq) | 1010 | static void create_thread(struct virtqueue *vq) |
882 | { | 1011 | { |
883 | /* Create stack for thread and run it. Since stack grows | 1012 | /* |
884 | * upwards, we point the stack pointer to the end of this | 1013 | * Create stack for thread. Since the stack grows upwards, we point |
885 | * region. */ | 1014 | * the stack pointer to the end of this region. |
1015 | */ | ||
886 | char *stack = malloc(32768); | 1016 | char *stack = malloc(32768); |
887 | unsigned long args[] = { LHREQ_EVENTFD, | 1017 | unsigned long args[] = { LHREQ_EVENTFD, |
888 | vq->config.pfn*getpagesize(), 0 }; | 1018 | vq->config.pfn*getpagesize(), 0 }; |
@@ -893,17 +1023,22 @@ static void create_thread(struct virtqueue *vq) | |||
893 | err(1, "Creating eventfd"); | 1023 | err(1, "Creating eventfd"); |
894 | args[2] = vq->eventfd; | 1024 | args[2] = vq->eventfd; |
895 | 1025 | ||
896 | /* Attach an eventfd to this virtqueue: it will go off | 1026 | /* |
897 | * when the Guest does an LHCALL_NOTIFY for this vq. */ | 1027 | * Attach an eventfd to this virtqueue: it will go off when the Guest |
1028 | * does an LHCALL_NOTIFY for this vq. | ||
1029 | */ | ||
898 | if (write(lguest_fd, &args, sizeof(args)) != 0) | 1030 | if (write(lguest_fd, &args, sizeof(args)) != 0) |
899 | err(1, "Attaching eventfd"); | 1031 | err(1, "Attaching eventfd"); |
900 | 1032 | ||
901 | /* CLONE_VM: because it has to access the Guest memory, and | 1033 | /* |
902 | * SIGCHLD so we get a signal if it dies. */ | 1034 | * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so |
1035 | * we get a signal if it dies. | ||
1036 | */ | ||
903 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); | 1037 | vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); |
904 | if (vq->thread == (pid_t)-1) | 1038 | if (vq->thread == (pid_t)-1) |
905 | err(1, "Creating clone"); | 1039 | err(1, "Creating clone"); |
906 | /* We close our local copy, now the child has it. */ | 1040 | |
1041 | /* We close our local copy now the child has it. */ | ||
907 | close(vq->eventfd); | 1042 | close(vq->eventfd); |
908 | } | 1043 | } |
909 | 1044 | ||
@@ -955,7 +1090,10 @@ static void update_device_status(struct device *dev) | |||
955 | } | 1090 | } |
956 | } | 1091 | } |
957 | 1092 | ||
958 | /* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ | 1093 | /*L:215 |
1094 | * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In | ||
1095 | * particular, it's used to notify us of device status changes during boot. | ||
1096 | */ | ||
959 | static void handle_output(unsigned long addr) | 1097 | static void handle_output(unsigned long addr) |
960 | { | 1098 | { |
961 | struct device *i; | 1099 | struct device *i; |
@@ -964,25 +1102,42 @@ static void handle_output(unsigned long addr) | |||
964 | for (i = devices.dev; i; i = i->next) { | 1102 | for (i = devices.dev; i; i = i->next) { |
965 | struct virtqueue *vq; | 1103 | struct virtqueue *vq; |
966 | 1104 | ||
967 | /* Notifications to device descriptors update device status. */ | 1105 | /* |
1106 | * Notifications to device descriptors mean they updated the | ||
1107 | * device status. | ||
1108 | */ | ||
968 | if (from_guest_phys(addr) == i->desc) { | 1109 | if (from_guest_phys(addr) == i->desc) { |
969 | update_device_status(i); | 1110 | update_device_status(i); |
970 | return; | 1111 | return; |
971 | } | 1112 | } |
972 | 1113 | ||
973 | /* Devices *can* be used before status is set to DRIVER_OK. */ | 1114 | /* |
1115 | * Devices *can* be used before status is set to DRIVER_OK. | ||
1116 | * The original plan was that they would never do this: they | ||
1117 | * would always finish setting up their status bits before | ||
1118 | * actually touching the virtqueues. In practice, we allowed | ||
1119 | * them to, and they do (eg. the disk probes for partition | ||
1120 | * tables as part of initialization). | ||
1121 | * | ||
1122 | * If we see this, we start the device: once it's running, we | ||
1123 | * expect the device to catch all the notifications. | ||
1124 | */ | ||
974 | for (vq = i->vq; vq; vq = vq->next) { | 1125 | for (vq = i->vq; vq; vq = vq->next) { |
975 | if (addr != vq->config.pfn*getpagesize()) | 1126 | if (addr != vq->config.pfn*getpagesize()) |
976 | continue; | 1127 | continue; |
977 | if (i->running) | 1128 | if (i->running) |
978 | errx(1, "Notification on running %s", i->name); | 1129 | errx(1, "Notification on running %s", i->name); |
1130 | /* This just calls create_thread() for each virtqueue */ | ||
979 | start_device(i); | 1131 | start_device(i); |
980 | return; | 1132 | return; |
981 | } | 1133 | } |
982 | } | 1134 | } |
983 | 1135 | ||
984 | /* Early console write is done using notify on a nul-terminated string | 1136 | /* |
985 | * in Guest memory. */ | 1137 | * Early console write is done using notify on a nul-terminated string |
1138 | * in Guest memory. It's also great for hacking debugging messages | ||
1139 | * into a Guest. | ||
1140 | */ | ||
986 | if (addr >= guest_limit) | 1141 | if (addr >= guest_limit) |
987 | errx(1, "Bad NOTIFY %#lx", addr); | 1142 | errx(1, "Bad NOTIFY %#lx", addr); |
988 | 1143 | ||
@@ -998,10 +1153,12 @@ static void handle_output(unsigned long addr) | |||
998 | * routines to allocate and manage them. | 1153 | * routines to allocate and manage them. |
999 | */ | 1154 | */ |
1000 | 1155 | ||
1001 | /* The layout of the device page is a "struct lguest_device_desc" followed by a | 1156 | /* |
1157 | * The layout of the device page is a "struct lguest_device_desc" followed by a | ||
1002 | * number of virtqueue descriptors, then two sets of feature bits, then an | 1158 | * number of virtqueue descriptors, then two sets of feature bits, then an |
1003 | * array of configuration bytes. This routine returns the configuration | 1159 | * array of configuration bytes. This routine returns the configuration |
1004 | * pointer. */ | 1160 | * pointer. |
1161 | */ | ||
1005 | static u8 *device_config(const struct device *dev) | 1162 | static u8 *device_config(const struct device *dev) |
1006 | { | 1163 | { |
1007 | return (void *)(dev->desc + 1) | 1164 | return (void *)(dev->desc + 1) |
@@ -1009,9 +1166,11 @@ static u8 *device_config(const struct device *dev) | |||
1009 | + dev->feature_len * 2; | 1166 | + dev->feature_len * 2; |
1010 | } | 1167 | } |
1011 | 1168 | ||
1012 | /* This routine allocates a new "struct lguest_device_desc" from descriptor | 1169 | /* |
1170 | * This routine allocates a new "struct lguest_device_desc" from descriptor | ||
1013 | * table page just above the Guest's normal memory. It returns a pointer to | 1171 | * table page just above the Guest's normal memory. It returns a pointer to |
1014 | * that descriptor. */ | 1172 | * that descriptor. |
1173 | */ | ||
1015 | static struct lguest_device_desc *new_dev_desc(u16 type) | 1174 | static struct lguest_device_desc *new_dev_desc(u16 type) |
1016 | { | 1175 | { |
1017 | struct lguest_device_desc d = { .type = type }; | 1176 | struct lguest_device_desc d = { .type = type }; |
@@ -1032,8 +1191,10 @@ static struct lguest_device_desc *new_dev_desc(u16 type) | |||
1032 | return memcpy(p, &d, sizeof(d)); | 1191 | return memcpy(p, &d, sizeof(d)); |
1033 | } | 1192 | } |
1034 | 1193 | ||
1035 | /* Each device descriptor is followed by the description of its virtqueues. We | 1194 | /* |
1036 | * specify how many descriptors the virtqueue is to have. */ | 1195 | * Each device descriptor is followed by the description of its virtqueues. We |
1196 | * specify how many descriptors the virtqueue is to have. | ||
1197 | */ | ||
1037 | static void add_virtqueue(struct device *dev, unsigned int num_descs, | 1198 | static void add_virtqueue(struct device *dev, unsigned int num_descs, |
1038 | void (*service)(struct virtqueue *)) | 1199 | void (*service)(struct virtqueue *)) |
1039 | { | 1200 | { |
@@ -1050,6 +1211,11 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1050 | vq->next = NULL; | 1211 | vq->next = NULL; |
1051 | vq->last_avail_idx = 0; | 1212 | vq->last_avail_idx = 0; |
1052 | vq->dev = dev; | 1213 | vq->dev = dev; |
1214 | |||
1215 | /* | ||
1216 | * This is the routine the service thread will run, and its Process ID | ||
1217 | * once it's running. | ||
1218 | */ | ||
1053 | vq->service = service; | 1219 | vq->service = service; |
1054 | vq->thread = (pid_t)-1; | 1220 | vq->thread = (pid_t)-1; |
1055 | 1221 | ||
@@ -1061,10 +1227,12 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1061 | /* Initialize the vring. */ | 1227 | /* Initialize the vring. */ |
1062 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); | 1228 | vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); |
1063 | 1229 | ||
1064 | /* Append virtqueue to this device's descriptor. We use | 1230 | /* |
1231 | * Append virtqueue to this device's descriptor. We use | ||
1065 | * device_config() to get the end of the device's current virtqueues; | 1232 | * device_config() to get the end of the device's current virtqueues; |
1066 | * we check that we haven't added any config or feature information | 1233 | * we check that we haven't added any config or feature information |
1067 | * yet, otherwise we'd be overwriting them. */ | 1234 | * yet, otherwise we'd be overwriting them. |
1235 | */ | ||
1068 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); | 1236 | assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); |
1069 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); | 1237 | memcpy(device_config(dev), &vq->config, sizeof(vq->config)); |
1070 | dev->num_vq++; | 1238 | dev->num_vq++; |
@@ -1072,14 +1240,18 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, | |||
1072 | 1240 | ||
1073 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); | 1241 | verbose("Virtqueue page %#lx\n", to_guest_phys(p)); |
1074 | 1242 | ||
1075 | /* Add to tail of list, so dev->vq is first vq, dev->vq->next is | 1243 | /* |
1076 | * second. */ | 1244 | * Add to tail of list, so dev->vq is first vq, dev->vq->next is |
1245 | * second. | ||
1246 | */ | ||
1077 | for (i = &dev->vq; *i; i = &(*i)->next); | 1247 | for (i = &dev->vq; *i; i = &(*i)->next); |
1078 | *i = vq; | 1248 | *i = vq; |
1079 | } | 1249 | } |
1080 | 1250 | ||
1081 | /* The first half of the feature bitmask is for us to advertise features. The | 1251 | /* |
1082 | * second half is for the Guest to accept features. */ | 1252 | * The first half of the feature bitmask is for us to advertise features. The |
1253 | * second half is for the Guest to accept features. | ||
1254 | */ | ||
1083 | static void add_feature(struct device *dev, unsigned bit) | 1255 | static void add_feature(struct device *dev, unsigned bit) |
1084 | { | 1256 | { |
1085 | u8 *features = get_feature_bits(dev); | 1257 | u8 *features = get_feature_bits(dev); |
@@ -1093,9 +1265,11 @@ static void add_feature(struct device *dev, unsigned bit) | |||
1093 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); | 1265 | features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); |
1094 | } | 1266 | } |
1095 | 1267 | ||
1096 | /* This routine sets the configuration fields for an existing device's | 1268 | /* |
1269 | * This routine sets the configuration fields for an existing device's | ||
1097 | * descriptor. It only works for the last device, but that's OK because that's | 1270 | * descriptor. It only works for the last device, but that's OK because that's |
1098 | * how we use it. */ | 1271 | * how we use it. |
1272 | */ | ||
1099 | static void set_config(struct device *dev, unsigned len, const void *conf) | 1273 | static void set_config(struct device *dev, unsigned len, const void *conf) |
1100 | { | 1274 | { |
1101 | /* Check we haven't overflowed our single page. */ | 1275 | /* Check we haven't overflowed our single page. */ |
@@ -1105,12 +1279,18 @@ static void set_config(struct device *dev, unsigned len, const void *conf) | |||
1105 | /* Copy in the config information, and store the length. */ | 1279 | /* Copy in the config information, and store the length. */ |
1106 | memcpy(device_config(dev), conf, len); | 1280 | memcpy(device_config(dev), conf, len); |
1107 | dev->desc->config_len = len; | 1281 | dev->desc->config_len = len; |
1282 | |||
1283 | /* Size must fit in config_len field (8 bits)! */ | ||
1284 | assert(dev->desc->config_len == len); | ||
1108 | } | 1285 | } |
1109 | 1286 | ||
1110 | /* This routine does all the creation and setup of a new device, including | 1287 | /* |
1111 | * calling new_dev_desc() to allocate the descriptor and device memory. | 1288 | * This routine does all the creation and setup of a new device, including |
1289 | * calling new_dev_desc() to allocate the descriptor and device memory. We | ||
1290 | * don't actually start the service threads until later. | ||
1112 | * | 1291 | * |
1113 | * See what I mean about userspace being boring? */ | 1292 | * See what I mean about userspace being boring? |
1293 | */ | ||
1114 | static struct device *new_device(const char *name, u16 type) | 1294 | static struct device *new_device(const char *name, u16 type) |
1115 | { | 1295 | { |
1116 | struct device *dev = malloc(sizeof(*dev)); | 1296 | struct device *dev = malloc(sizeof(*dev)); |
@@ -1123,10 +1303,12 @@ static struct device *new_device(const char *name, u16 type) | |||
1123 | dev->num_vq = 0; | 1303 | dev->num_vq = 0; |
1124 | dev->running = false; | 1304 | dev->running = false; |
1125 | 1305 | ||
1126 | /* Append to device list. Prepending to a single-linked list is | 1306 | /* |
1307 | * Append to device list. Prepending to a single-linked list is | ||
1127 | * easier, but the user expects the devices to be arranged on the bus | 1308 | * easier, but the user expects the devices to be arranged on the bus |
1128 | * in command-line order. The first network device on the command line | 1309 | * in command-line order. The first network device on the command line |
1129 | * is eth0, the first block device /dev/vda, etc. */ | 1310 | * is eth0, the first block device /dev/vda, etc. |
1311 | */ | ||
1130 | if (devices.lastdev) | 1312 | if (devices.lastdev) |
1131 | devices.lastdev->next = dev; | 1313 | devices.lastdev->next = dev; |
1132 | else | 1314 | else |
@@ -1136,8 +1318,10 @@ static struct device *new_device(const char *name, u16 type) | |||
1136 | return dev; | 1318 | return dev; |
1137 | } | 1319 | } |
1138 | 1320 | ||
1139 | /* Our first setup routine is the console. It's a fairly simple device, but | 1321 | /* |
1140 | * UNIX tty handling makes it uglier than it could be. */ | 1322 | * Our first setup routine is the console. It's a fairly simple device, but |
1323 | * UNIX tty handling makes it uglier than it could be. | ||
1324 | */ | ||
1141 | static void setup_console(void) | 1325 | static void setup_console(void) |
1142 | { | 1326 | { |
1143 | struct device *dev; | 1327 | struct device *dev; |
@@ -1145,8 +1329,10 @@ static void setup_console(void) | |||
1145 | /* If we can save the initial standard input settings... */ | 1329 | /* If we can save the initial standard input settings... */ |
1146 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { | 1330 | if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { |
1147 | struct termios term = orig_term; | 1331 | struct termios term = orig_term; |
1148 | /* Then we turn off echo, line buffering and ^C etc. We want a | 1332 | /* |
1149 | * raw input stream to the Guest. */ | 1333 | * Then we turn off echo, line buffering and ^C etc: We want a |
1334 | * raw input stream to the Guest. | ||
1335 | */ | ||
1150 | term.c_lflag &= ~(ISIG|ICANON|ECHO); | 1336 | term.c_lflag &= ~(ISIG|ICANON|ECHO); |
1151 | tcsetattr(STDIN_FILENO, TCSANOW, &term); | 1337 | tcsetattr(STDIN_FILENO, TCSANOW, &term); |
1152 | } | 1338 | } |
@@ -1157,10 +1343,12 @@ static void setup_console(void) | |||
1157 | dev->priv = malloc(sizeof(struct console_abort)); | 1343 | dev->priv = malloc(sizeof(struct console_abort)); |
1158 | ((struct console_abort *)dev->priv)->count = 0; | 1344 | ((struct console_abort *)dev->priv)->count = 0; |
1159 | 1345 | ||
1160 | /* The console needs two virtqueues: the input then the output. When | 1346 | /* |
1347 | * The console needs two virtqueues: the input then the output. When | ||
1161 | * they put something the input queue, we make sure we're listening to | 1348 | * they put something the input queue, we make sure we're listening to |
1162 | * stdin. When they put something in the output queue, we write it to | 1349 | * stdin. When they put something in the output queue, we write it to |
1163 | * stdout. */ | 1350 | * stdout. |
1351 | */ | ||
1164 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); | 1352 | add_virtqueue(dev, VIRTQUEUE_NUM, console_input); |
1165 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); | 1353 | add_virtqueue(dev, VIRTQUEUE_NUM, console_output); |
1166 | 1354 | ||
@@ -1168,7 +1356,8 @@ static void setup_console(void) | |||
1168 | } | 1356 | } |
1169 | /*:*/ | 1357 | /*:*/ |
1170 | 1358 | ||
1171 | /*M:010 Inter-guest networking is an interesting area. Simplest is to have a | 1359 | /*M:010 |
1360 | * Inter-guest networking is an interesting area. Simplest is to have a | ||
1172 | * --sharenet=<name> option which opens or creates a named pipe. This can be | 1361 | * --sharenet=<name> option which opens or creates a named pipe. This can be |
1173 | * used to send packets to another guest in a 1:1 manner. | 1362 | * used to send packets to another guest in a 1:1 manner. |
1174 | * | 1363 | * |
@@ -1182,7 +1371,8 @@ static void setup_console(void) | |||
1182 | * multiple inter-guest channels behind one interface, although it would | 1371 | * multiple inter-guest channels behind one interface, although it would |
1183 | * require some manner of hotplugging new virtio channels. | 1372 | * require some manner of hotplugging new virtio channels. |
1184 | * | 1373 | * |
1185 | * Finally, we could implement a virtio network switch in the kernel. :*/ | 1374 | * Finally, we could implement a virtio network switch in the kernel. |
1375 | :*/ | ||
1186 | 1376 | ||
1187 | static u32 str2ip(const char *ipaddr) | 1377 | static u32 str2ip(const char *ipaddr) |
1188 | { | 1378 | { |
@@ -1207,11 +1397,13 @@ static void str2mac(const char *macaddr, unsigned char mac[6]) | |||
1207 | mac[5] = m[5]; | 1397 | mac[5] = m[5]; |
1208 | } | 1398 | } |
1209 | 1399 | ||
1210 | /* This code is "adapted" from libbridge: it attaches the Host end of the | 1400 | /* |
1401 | * This code is "adapted" from libbridge: it attaches the Host end of the | ||
1211 | * network device to the bridge device specified by the command line. | 1402 | * network device to the bridge device specified by the command line. |
1212 | * | 1403 | * |
1213 | * This is yet another James Morris contribution (I'm an IP-level guy, so I | 1404 | * This is yet another James Morris contribution (I'm an IP-level guy, so I |
1214 | * dislike bridging), and I just try not to break it. */ | 1405 | * dislike bridging), and I just try not to break it. |
1406 | */ | ||
1215 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) | 1407 | static void add_to_bridge(int fd, const char *if_name, const char *br_name) |
1216 | { | 1408 | { |
1217 | int ifidx; | 1409 | int ifidx; |
@@ -1231,9 +1423,11 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name) | |||
1231 | err(1, "can't add %s to bridge %s", if_name, br_name); | 1423 | err(1, "can't add %s to bridge %s", if_name, br_name); |
1232 | } | 1424 | } |
1233 | 1425 | ||
1234 | /* This sets up the Host end of the network device with an IP address, brings | 1426 | /* |
1427 | * This sets up the Host end of the network device with an IP address, brings | ||
1235 | * it up so packets will flow, the copies the MAC address into the hwaddr | 1428 | * it up so packets will flow, the copies the MAC address into the hwaddr |
1236 | * pointer. */ | 1429 | * pointer. |
1430 | */ | ||
1237 | static void configure_device(int fd, const char *tapif, u32 ipaddr) | 1431 | static void configure_device(int fd, const char *tapif, u32 ipaddr) |
1238 | { | 1432 | { |
1239 | struct ifreq ifr; | 1433 | struct ifreq ifr; |
@@ -1260,10 +1454,12 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
1260 | /* Start with this zeroed. Messy but sure. */ | 1454 | /* Start with this zeroed. Messy but sure. */ |
1261 | memset(&ifr, 0, sizeof(ifr)); | 1455 | memset(&ifr, 0, sizeof(ifr)); |
1262 | 1456 | ||
1263 | /* We open the /dev/net/tun device and tell it we want a tap device. A | 1457 | /* |
1458 | * We open the /dev/net/tun device and tell it we want a tap device. A | ||
1264 | * tap device is like a tun device, only somehow different. To tell | 1459 | * tap device is like a tun device, only somehow different. To tell |
1265 | * the truth, I completely blundered my way through this code, but it | 1460 | * the truth, I completely blundered my way through this code, but it |
1266 | * works now! */ | 1461 | * works now! |
1462 | */ | ||
1267 | netfd = open_or_die("/dev/net/tun", O_RDWR); | 1463 | netfd = open_or_die("/dev/net/tun", O_RDWR); |
1268 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; | 1464 | ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR; |
1269 | strcpy(ifr.ifr_name, "tap%d"); | 1465 | strcpy(ifr.ifr_name, "tap%d"); |
@@ -1274,18 +1470,22 @@ static int get_tun_device(char tapif[IFNAMSIZ]) | |||
1274 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) | 1470 | TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0) |
1275 | err(1, "Could not set features for tun device"); | 1471 | err(1, "Could not set features for tun device"); |
1276 | 1472 | ||
1277 | /* We don't need checksums calculated for packets coming in this | 1473 | /* |
1278 | * device: trust us! */ | 1474 | * We don't need checksums calculated for packets coming in this |
1475 | * device: trust us! | ||
1476 | */ | ||
1279 | ioctl(netfd, TUNSETNOCSUM, 1); | 1477 | ioctl(netfd, TUNSETNOCSUM, 1); |
1280 | 1478 | ||
1281 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); | 1479 | memcpy(tapif, ifr.ifr_name, IFNAMSIZ); |
1282 | return netfd; | 1480 | return netfd; |
1283 | } | 1481 | } |
1284 | 1482 | ||
1285 | /*L:195 Our network is a Host<->Guest network. This can either use bridging or | 1483 | /*L:195 |
1484 | * Our network is a Host<->Guest network. This can either use bridging or | ||
1286 | * routing, but the principle is the same: it uses the "tun" device to inject | 1485 | * routing, but the principle is the same: it uses the "tun" device to inject |
1287 | * packets into the Host as if they came in from a normal network card. We | 1486 | * packets into the Host as if they came in from a normal network card. We |
1288 | * just shunt packets between the Guest and the tun device. */ | 1487 | * just shunt packets between the Guest and the tun device. |
1488 | */ | ||
1289 | static void setup_tun_net(char *arg) | 1489 | static void setup_tun_net(char *arg) |
1290 | { | 1490 | { |
1291 | struct device *dev; | 1491 | struct device *dev; |
@@ -1302,13 +1502,14 @@ static void setup_tun_net(char *arg) | |||
1302 | dev = new_device("net", VIRTIO_ID_NET); | 1502 | dev = new_device("net", VIRTIO_ID_NET); |
1303 | dev->priv = net_info; | 1503 | dev->priv = net_info; |
1304 | 1504 | ||
1305 | /* Network devices need a receive and a send queue, just like | 1505 | /* Network devices need a recv and a send queue, just like console. */ |
1306 | * console. */ | ||
1307 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); | 1506 | add_virtqueue(dev, VIRTQUEUE_NUM, net_input); |
1308 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); | 1507 | add_virtqueue(dev, VIRTQUEUE_NUM, net_output); |
1309 | 1508 | ||
1310 | /* We need a socket to perform the magic network ioctls to bring up the | 1509 | /* |
1311 | * tap interface, connect to the bridge etc. Any socket will do! */ | 1510 | * We need a socket to perform the magic network ioctls to bring up the |
1511 | * tap interface, connect to the bridge etc. Any socket will do! | ||
1512 | */ | ||
1312 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); | 1513 | ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); |
1313 | if (ipfd < 0) | 1514 | if (ipfd < 0) |
1314 | err(1, "opening IP socket"); | 1515 | err(1, "opening IP socket"); |
@@ -1362,39 +1563,31 @@ static void setup_tun_net(char *arg) | |||
1362 | verbose("device %u: tun %s: %s\n", | 1563 | verbose("device %u: tun %s: %s\n", |
1363 | devices.device_num, tapif, arg); | 1564 | devices.device_num, tapif, arg); |
1364 | } | 1565 | } |
1365 | 1566 | /*:*/ | |
1366 | /* Our block (disk) device should be really simple: the Guest asks for a block | ||
1367 | * number and we read or write that position in the file. Unfortunately, that | ||
1368 | * was amazingly slow: the Guest waits until the read is finished before | ||
1369 | * running anything else, even if it could have been doing useful work. | ||
1370 | * | ||
1371 | * We could use async I/O, except it's reputed to suck so hard that characters | ||
1372 | * actually go missing from your code when you try to use it. | ||
1373 | * | ||
1374 | * So we farm the I/O out to thread, and communicate with it via a pipe. */ | ||
1375 | 1567 | ||
1376 | /* This hangs off device->priv. */ | 1568 | /* This hangs off device->priv. */ |
1377 | struct vblk_info | 1569 | struct vblk_info { |
1378 | { | ||
1379 | /* The size of the file. */ | 1570 | /* The size of the file. */ |
1380 | off64_t len; | 1571 | off64_t len; |
1381 | 1572 | ||
1382 | /* The file descriptor for the file. */ | 1573 | /* The file descriptor for the file. */ |
1383 | int fd; | 1574 | int fd; |
1384 | 1575 | ||
1385 | /* IO thread listens on this file descriptor [0]. */ | ||
1386 | int workpipe[2]; | ||
1387 | |||
1388 | /* IO thread writes to this file descriptor to mark it done, then | ||
1389 | * Launcher triggers interrupt to Guest. */ | ||
1390 | int done_fd; | ||
1391 | }; | 1576 | }; |
1392 | 1577 | ||
1393 | /*L:210 | 1578 | /*L:210 |
1394 | * The Disk | 1579 | * The Disk |
1395 | * | 1580 | * |
1396 | * Remember that the block device is handled by a separate I/O thread. We head | 1581 | * The disk only has one virtqueue, so it only has one thread. It is really |
1397 | * straight into the core of that thread here: | 1582 | * simple: the Guest asks for a block number and we read or write that position |
1583 | * in the file. | ||
1584 | * | ||
1585 | * Before we serviced each virtqueue in a separate thread, that was unacceptably | ||
1586 | * slow: the Guest waits until the read is finished before running anything | ||
1587 | * else, even if it could have been doing useful work. | ||
1588 | * | ||
1589 | * We could have used async I/O, except it's reputed to suck so hard that | ||
1590 | * characters actually go missing from your code when you try to use it. | ||
1398 | */ | 1591 | */ |
1399 | static void blk_request(struct virtqueue *vq) | 1592 | static void blk_request(struct virtqueue *vq) |
1400 | { | 1593 | { |
@@ -1406,47 +1599,64 @@ static void blk_request(struct virtqueue *vq) | |||
1406 | struct iovec iov[vq->vring.num]; | 1599 | struct iovec iov[vq->vring.num]; |
1407 | off64_t off; | 1600 | off64_t off; |
1408 | 1601 | ||
1409 | /* Get the next request. */ | 1602 | /* |
1603 | * Get the next request, where we normally wait. It triggers the | ||
1604 | * interrupt to acknowledge previously serviced requests (if any). | ||
1605 | */ | ||
1410 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); | 1606 | head = wait_for_vq_desc(vq, iov, &out_num, &in_num); |
1411 | 1607 | ||
1412 | /* Every block request should contain at least one output buffer | 1608 | /* |
1609 | * Every block request should contain at least one output buffer | ||
1413 | * (detailing the location on disk and the type of request) and one | 1610 | * (detailing the location on disk and the type of request) and one |
1414 | * input buffer (to hold the result). */ | 1611 | * input buffer (to hold the result). |
1612 | */ | ||
1415 | if (out_num == 0 || in_num == 0) | 1613 | if (out_num == 0 || in_num == 0) |
1416 | errx(1, "Bad virtblk cmd %u out=%u in=%u", | 1614 | errx(1, "Bad virtblk cmd %u out=%u in=%u", |
1417 | head, out_num, in_num); | 1615 | head, out_num, in_num); |
1418 | 1616 | ||
1419 | out = convert(&iov[0], struct virtio_blk_outhdr); | 1617 | out = convert(&iov[0], struct virtio_blk_outhdr); |
1420 | in = convert(&iov[out_num+in_num-1], u8); | 1618 | in = convert(&iov[out_num+in_num-1], u8); |
1619 | /* | ||
1620 | * For historical reasons, block operations are expressed in 512 byte | ||
1621 | * "sectors". | ||
1622 | */ | ||
1421 | off = out->sector * 512; | 1623 | off = out->sector * 512; |
1422 | 1624 | ||
1423 | /* The block device implements "barriers", where the Guest indicates | 1625 | /* |
1626 | * The block device implements "barriers", where the Guest indicates | ||
1424 | * that it wants all previous writes to occur before this write. We | 1627 | * that it wants all previous writes to occur before this write. We |
1425 | * don't have a way of asking our kernel to do a barrier, so we just | 1628 | * don't have a way of asking our kernel to do a barrier, so we just |
1426 | * synchronize all the data in the file. Pretty poor, no? */ | 1629 | * synchronize all the data in the file. Pretty poor, no? |
1630 | */ | ||
1427 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1631 | if (out->type & VIRTIO_BLK_T_BARRIER) |
1428 | fdatasync(vblk->fd); | 1632 | fdatasync(vblk->fd); |
1429 | 1633 | ||
1430 | /* In general the virtio block driver is allowed to try SCSI commands. | 1634 | /* |
1431 | * It'd be nice if we supported eject, for example, but we don't. */ | 1635 | * In general the virtio block driver is allowed to try SCSI commands. |
1636 | * It'd be nice if we supported eject, for example, but we don't. | ||
1637 | */ | ||
1432 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { | 1638 | if (out->type & VIRTIO_BLK_T_SCSI_CMD) { |
1433 | fprintf(stderr, "Scsi commands unsupported\n"); | 1639 | fprintf(stderr, "Scsi commands unsupported\n"); |
1434 | *in = VIRTIO_BLK_S_UNSUPP; | 1640 | *in = VIRTIO_BLK_S_UNSUPP; |
1435 | wlen = sizeof(*in); | 1641 | wlen = sizeof(*in); |
1436 | } else if (out->type & VIRTIO_BLK_T_OUT) { | 1642 | } else if (out->type & VIRTIO_BLK_T_OUT) { |
1437 | /* Write */ | 1643 | /* |
1438 | 1644 | * Write | |
1439 | /* Move to the right location in the block file. This can fail | 1645 | * |
1440 | * if they try to write past end. */ | 1646 | * Move to the right location in the block file. This can fail |
1647 | * if they try to write past end. | ||
1648 | */ | ||
1441 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1649 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
1442 | err(1, "Bad seek to sector %llu", out->sector); | 1650 | err(1, "Bad seek to sector %llu", out->sector); |
1443 | 1651 | ||
1444 | ret = writev(vblk->fd, iov+1, out_num-1); | 1652 | ret = writev(vblk->fd, iov+1, out_num-1); |
1445 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); | 1653 | verbose("WRITE to sector %llu: %i\n", out->sector, ret); |
1446 | 1654 | ||
1447 | /* Grr... Now we know how long the descriptor they sent was, we | 1655 | /* |
1656 | * Grr... Now we know how long the descriptor they sent was, we | ||
1448 | * make sure they didn't try to write over the end of the block | 1657 | * make sure they didn't try to write over the end of the block |
1449 | * file (possibly extending it). */ | 1658 | * file (possibly extending it). |
1659 | */ | ||
1450 | if (ret > 0 && off + ret > vblk->len) { | 1660 | if (ret > 0 && off + ret > vblk->len) { |
1451 | /* Trim it back to the correct length */ | 1661 | /* Trim it back to the correct length */ |
1452 | ftruncate64(vblk->fd, vblk->len); | 1662 | ftruncate64(vblk->fd, vblk->len); |
@@ -1456,10 +1666,12 @@ static void blk_request(struct virtqueue *vq) | |||
1456 | wlen = sizeof(*in); | 1666 | wlen = sizeof(*in); |
1457 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); | 1667 | *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR); |
1458 | } else { | 1668 | } else { |
1459 | /* Read */ | 1669 | /* |
1460 | 1670 | * Read | |
1461 | /* Move to the right location in the block file. This can fail | 1671 | * |
1462 | * if they try to read past end. */ | 1672 | * Move to the right location in the block file. This can fail |
1673 | * if they try to read past end. | ||
1674 | */ | ||
1463 | if (lseek64(vblk->fd, off, SEEK_SET) != off) | 1675 | if (lseek64(vblk->fd, off, SEEK_SET) != off) |
1464 | err(1, "Bad seek to sector %llu", out->sector); | 1676 | err(1, "Bad seek to sector %llu", out->sector); |
1465 | 1677 | ||
@@ -1474,13 +1686,16 @@ static void blk_request(struct virtqueue *vq) | |||
1474 | } | 1686 | } |
1475 | } | 1687 | } |
1476 | 1688 | ||
1477 | /* OK, so we noted that it was pretty poor to use an fdatasync as a | 1689 | /* |
1690 | * OK, so we noted that it was pretty poor to use an fdatasync as a | ||
1478 | * barrier. But Christoph Hellwig points out that we need a sync | 1691 | * barrier. But Christoph Hellwig points out that we need a sync |
1479 | * *afterwards* as well: "Barriers specify no reordering to the front | 1692 | * *afterwards* as well: "Barriers specify no reordering to the front |
1480 | * or the back." And Jens Axboe confirmed it, so here we are: */ | 1693 | * or the back." And Jens Axboe confirmed it, so here we are: |
1694 | */ | ||
1481 | if (out->type & VIRTIO_BLK_T_BARRIER) | 1695 | if (out->type & VIRTIO_BLK_T_BARRIER) |
1482 | fdatasync(vblk->fd); | 1696 | fdatasync(vblk->fd); |
1483 | 1697 | ||
1698 | /* Finished that request. */ | ||
1484 | add_used(vq, head, wlen); | 1699 | add_used(vq, head, wlen); |
1485 | } | 1700 | } |
1486 | 1701 | ||
@@ -1491,7 +1706,7 @@ static void setup_block_file(const char *filename) | |||
1491 | struct vblk_info *vblk; | 1706 | struct vblk_info *vblk; |
1492 | struct virtio_blk_config conf; | 1707 | struct virtio_blk_config conf; |
1493 | 1708 | ||
1494 | /* The device responds to return from I/O thread. */ | 1709 | /* Creat the device. */ |
1495 | dev = new_device("block", VIRTIO_ID_BLOCK); | 1710 | dev = new_device("block", VIRTIO_ID_BLOCK); |
1496 | 1711 | ||
1497 | /* The device has one virtqueue, where the Guest places requests. */ | 1712 | /* The device has one virtqueue, where the Guest places requests. */ |
@@ -1510,27 +1725,32 @@ static void setup_block_file(const char *filename) | |||
1510 | /* Tell Guest how many sectors this device has. */ | 1725 | /* Tell Guest how many sectors this device has. */ |
1511 | conf.capacity = cpu_to_le64(vblk->len / 512); | 1726 | conf.capacity = cpu_to_le64(vblk->len / 512); |
1512 | 1727 | ||
1513 | /* Tell Guest not to put in too many descriptors at once: two are used | 1728 | /* |
1514 | * for the in and out elements. */ | 1729 | * Tell Guest not to put in too many descriptors at once: two are used |
1730 | * for the in and out elements. | ||
1731 | */ | ||
1515 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); | 1732 | add_feature(dev, VIRTIO_BLK_F_SEG_MAX); |
1516 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); | 1733 | conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); |
1517 | 1734 | ||
1518 | set_config(dev, sizeof(conf), &conf); | 1735 | /* Don't try to put whole struct: we have 8 bit limit. */ |
1736 | set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); | ||
1519 | 1737 | ||
1520 | verbose("device %u: virtblock %llu sectors\n", | 1738 | verbose("device %u: virtblock %llu sectors\n", |
1521 | ++devices.device_num, le64_to_cpu(conf.capacity)); | 1739 | ++devices.device_num, le64_to_cpu(conf.capacity)); |
1522 | } | 1740 | } |
1523 | 1741 | ||
1524 | struct rng_info { | 1742 | /*L:211 |
1525 | int rfd; | 1743 | * Our random number generator device reads from /dev/random into the Guest's |
1526 | }; | ||
1527 | |||
1528 | /* Our random number generator device reads from /dev/random into the Guest's | ||
1529 | * input buffers. The usual case is that the Guest doesn't want random numbers | 1744 | * input buffers. The usual case is that the Guest doesn't want random numbers |
1530 | * and so has no buffers although /dev/random is still readable, whereas | 1745 | * and so has no buffers although /dev/random is still readable, whereas |
1531 | * console is the reverse. | 1746 | * console is the reverse. |
1532 | * | 1747 | * |
1533 | * The same logic applies, however. */ | 1748 | * The same logic applies, however. |
1749 | */ | ||
1750 | struct rng_info { | ||
1751 | int rfd; | ||
1752 | }; | ||
1753 | |||
1534 | static void rng_input(struct virtqueue *vq) | 1754 | static void rng_input(struct virtqueue *vq) |
1535 | { | 1755 | { |
1536 | int len; | 1756 | int len; |
@@ -1543,9 +1763,10 @@ static void rng_input(struct virtqueue *vq) | |||
1543 | if (out_num) | 1763 | if (out_num) |
1544 | errx(1, "Output buffers in rng?"); | 1764 | errx(1, "Output buffers in rng?"); |
1545 | 1765 | ||
1546 | /* This is why we convert to iovecs: the readv() call uses them, and so | 1766 | /* |
1547 | * it reads straight into the Guest's buffer. We loop to make sure we | 1767 | * Just like the console write, we loop to cover the whole iovec. |
1548 | * fill it. */ | 1768 | * In this case, short reads actually happen quite a bit. |
1769 | */ | ||
1549 | while (!iov_empty(iov, in_num)) { | 1770 | while (!iov_empty(iov, in_num)) { |
1550 | len = readv(rng_info->rfd, iov, in_num); | 1771 | len = readv(rng_info->rfd, iov, in_num); |
1551 | if (len <= 0) | 1772 | if (len <= 0) |
@@ -1558,15 +1779,18 @@ static void rng_input(struct virtqueue *vq) | |||
1558 | add_used(vq, head, totlen); | 1779 | add_used(vq, head, totlen); |
1559 | } | 1780 | } |
1560 | 1781 | ||
1561 | /* And this creates a "hardware" random number device for the Guest. */ | 1782 | /*L:199 |
1783 | * This creates a "hardware" random number device for the Guest. | ||
1784 | */ | ||
1562 | static void setup_rng(void) | 1785 | static void setup_rng(void) |
1563 | { | 1786 | { |
1564 | struct device *dev; | 1787 | struct device *dev; |
1565 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); | 1788 | struct rng_info *rng_info = malloc(sizeof(*rng_info)); |
1566 | 1789 | ||
1790 | /* Our device's privat info simply contains the /dev/random fd. */ | ||
1567 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); | 1791 | rng_info->rfd = open_or_die("/dev/random", O_RDONLY); |
1568 | 1792 | ||
1569 | /* The device responds to return from I/O thread. */ | 1793 | /* Create the new device. */ |
1570 | dev = new_device("rng", VIRTIO_ID_RNG); | 1794 | dev = new_device("rng", VIRTIO_ID_RNG); |
1571 | dev->priv = rng_info; | 1795 | dev->priv = rng_info; |
1572 | 1796 | ||
@@ -1582,8 +1806,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
1582 | { | 1806 | { |
1583 | unsigned int i; | 1807 | unsigned int i; |
1584 | 1808 | ||
1585 | /* Since we don't track all open fds, we simply close everything beyond | 1809 | /* |
1586 | * stderr. */ | 1810 | * Since we don't track all open fds, we simply close everything beyond |
1811 | * stderr. | ||
1812 | */ | ||
1587 | for (i = 3; i < FD_SETSIZE; i++) | 1813 | for (i = 3; i < FD_SETSIZE; i++) |
1588 | close(i); | 1814 | close(i); |
1589 | 1815 | ||
@@ -1594,8 +1820,10 @@ static void __attribute__((noreturn)) restart_guest(void) | |||
1594 | err(1, "Could not exec %s", main_args[0]); | 1820 | err(1, "Could not exec %s", main_args[0]); |
1595 | } | 1821 | } |
1596 | 1822 | ||
1597 | /*L:220 Finally we reach the core of the Launcher which runs the Guest, serves | 1823 | /*L:220 |
1598 | * its input and output, and finally, lays it to rest. */ | 1824 | * Finally we reach the core of the Launcher which runs the Guest, serves |
1825 | * its input and output, and finally, lays it to rest. | ||
1826 | */ | ||
1599 | static void __attribute__((noreturn)) run_guest(void) | 1827 | static void __attribute__((noreturn)) run_guest(void) |
1600 | { | 1828 | { |
1601 | for (;;) { | 1829 | for (;;) { |
@@ -1630,7 +1858,7 @@ static void __attribute__((noreturn)) run_guest(void) | |||
1630 | * | 1858 | * |
1631 | * Are you ready? Take a deep breath and join me in the core of the Host, in | 1859 | * Are you ready? Take a deep breath and join me in the core of the Host, in |
1632 | * "make Host". | 1860 | * "make Host". |
1633 | :*/ | 1861 | :*/ |
1634 | 1862 | ||
1635 | static struct option opts[] = { | 1863 | static struct option opts[] = { |
1636 | { "verbose", 0, NULL, 'v' }, | 1864 | { "verbose", 0, NULL, 'v' }, |
@@ -1651,8 +1879,7 @@ static void usage(void) | |||
1651 | /*L:105 The main routine is where the real work begins: */ | 1879 | /*L:105 The main routine is where the real work begins: */ |
1652 | int main(int argc, char *argv[]) | 1880 | int main(int argc, char *argv[]) |
1653 | { | 1881 | { |
1654 | /* Memory, top-level pagetable, code startpoint and size of the | 1882 | /* Memory, code startpoint and size of the (optional) initrd. */ |
1655 | * (optional) initrd. */ | ||
1656 | unsigned long mem = 0, start, initrd_size = 0; | 1883 | unsigned long mem = 0, start, initrd_size = 0; |
1657 | /* Two temporaries. */ | 1884 | /* Two temporaries. */ |
1658 | int i, c; | 1885 | int i, c; |
@@ -1664,24 +1891,32 @@ int main(int argc, char *argv[]) | |||
1664 | /* Save the args: we "reboot" by execing ourselves again. */ | 1891 | /* Save the args: we "reboot" by execing ourselves again. */ |
1665 | main_args = argv; | 1892 | main_args = argv; |
1666 | 1893 | ||
1667 | /* First we initialize the device list. We keep a pointer to the last | 1894 | /* |
1895 | * First we initialize the device list. We keep a pointer to the last | ||
1668 | * device, and the next interrupt number to use for devices (1: | 1896 | * device, and the next interrupt number to use for devices (1: |
1669 | * remember that 0 is used by the timer). */ | 1897 | * remember that 0 is used by the timer). |
1898 | */ | ||
1670 | devices.lastdev = NULL; | 1899 | devices.lastdev = NULL; |
1671 | devices.next_irq = 1; | 1900 | devices.next_irq = 1; |
1672 | 1901 | ||
1902 | /* We're CPU 0. In fact, that's the only CPU possible right now. */ | ||
1673 | cpu_id = 0; | 1903 | cpu_id = 0; |
1674 | /* We need to know how much memory so we can set up the device | 1904 | |
1905 | /* | ||
1906 | * We need to know how much memory so we can set up the device | ||
1675 | * descriptor and memory pages for the devices as we parse the command | 1907 | * descriptor and memory pages for the devices as we parse the command |
1676 | * line. So we quickly look through the arguments to find the amount | 1908 | * line. So we quickly look through the arguments to find the amount |
1677 | * of memory now. */ | 1909 | * of memory now. |
1910 | */ | ||
1678 | for (i = 1; i < argc; i++) { | 1911 | for (i = 1; i < argc; i++) { |
1679 | if (argv[i][0] != '-') { | 1912 | if (argv[i][0] != '-') { |
1680 | mem = atoi(argv[i]) * 1024 * 1024; | 1913 | mem = atoi(argv[i]) * 1024 * 1024; |
1681 | /* We start by mapping anonymous pages over all of | 1914 | /* |
1915 | * We start by mapping anonymous pages over all of | ||
1682 | * guest-physical memory range. This fills it with 0, | 1916 | * guest-physical memory range. This fills it with 0, |
1683 | * and ensures that the Guest won't be killed when it | 1917 | * and ensures that the Guest won't be killed when it |
1684 | * tries to access it. */ | 1918 | * tries to access it. |
1919 | */ | ||
1685 | guest_base = map_zeroed_pages(mem / getpagesize() | 1920 | guest_base = map_zeroed_pages(mem / getpagesize() |
1686 | + DEVICE_PAGES); | 1921 | + DEVICE_PAGES); |
1687 | guest_limit = mem; | 1922 | guest_limit = mem; |
@@ -1714,8 +1949,10 @@ int main(int argc, char *argv[]) | |||
1714 | usage(); | 1949 | usage(); |
1715 | } | 1950 | } |
1716 | } | 1951 | } |
1717 | /* After the other arguments we expect memory and kernel image name, | 1952 | /* |
1718 | * followed by command line arguments for the kernel. */ | 1953 | * After the other arguments we expect memory and kernel image name, |
1954 | * followed by command line arguments for the kernel. | ||
1955 | */ | ||
1719 | if (optind + 2 > argc) | 1956 | if (optind + 2 > argc) |
1720 | usage(); | 1957 | usage(); |
1721 | 1958 | ||
@@ -1733,20 +1970,26 @@ int main(int argc, char *argv[]) | |||
1733 | /* Map the initrd image if requested (at top of physical memory) */ | 1970 | /* Map the initrd image if requested (at top of physical memory) */ |
1734 | if (initrd_name) { | 1971 | if (initrd_name) { |
1735 | initrd_size = load_initrd(initrd_name, mem); | 1972 | initrd_size = load_initrd(initrd_name, mem); |
1736 | /* These are the location in the Linux boot header where the | 1973 | /* |
1737 | * start and size of the initrd are expected to be found. */ | 1974 | * These are the location in the Linux boot header where the |
1975 | * start and size of the initrd are expected to be found. | ||
1976 | */ | ||
1738 | boot->hdr.ramdisk_image = mem - initrd_size; | 1977 | boot->hdr.ramdisk_image = mem - initrd_size; |
1739 | boot->hdr.ramdisk_size = initrd_size; | 1978 | boot->hdr.ramdisk_size = initrd_size; |
1740 | /* The bootloader type 0xFF means "unknown"; that's OK. */ | 1979 | /* The bootloader type 0xFF means "unknown"; that's OK. */ |
1741 | boot->hdr.type_of_loader = 0xFF; | 1980 | boot->hdr.type_of_loader = 0xFF; |
1742 | } | 1981 | } |
1743 | 1982 | ||
1744 | /* The Linux boot header contains an "E820" memory map: ours is a | 1983 | /* |
1745 | * simple, single region. */ | 1984 | * The Linux boot header contains an "E820" memory map: ours is a |
1985 | * simple, single region. | ||
1986 | */ | ||
1746 | boot->e820_entries = 1; | 1987 | boot->e820_entries = 1; |
1747 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); | 1988 | boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM }); |
1748 | /* The boot header contains a command line pointer: we put the command | 1989 | /* |
1749 | * line after the boot header. */ | 1990 | * The boot header contains a command line pointer: we put the command |
1991 | * line after the boot header. | ||
1992 | */ | ||
1750 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); | 1993 | boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1); |
1751 | /* We use a simple helper to copy the arguments separated by spaces. */ | 1994 | /* We use a simple helper to copy the arguments separated by spaces. */ |
1752 | concat((char *)(boot + 1), argv+optind+2); | 1995 | concat((char *)(boot + 1), argv+optind+2); |
@@ -1760,11 +2003,13 @@ int main(int argc, char *argv[]) | |||
1760 | /* Tell the entry path not to try to reload segment registers. */ | 2003 | /* Tell the entry path not to try to reload segment registers. */ |
1761 | boot->hdr.loadflags |= KEEP_SEGMENTS; | 2004 | boot->hdr.loadflags |= KEEP_SEGMENTS; |
1762 | 2005 | ||
1763 | /* We tell the kernel to initialize the Guest: this returns the open | 2006 | /* |
1764 | * /dev/lguest file descriptor. */ | 2007 | * We tell the kernel to initialize the Guest: this returns the open |
2008 | * /dev/lguest file descriptor. | ||
2009 | */ | ||
1765 | tell_kernel(start); | 2010 | tell_kernel(start); |
1766 | 2011 | ||
1767 | /* Ensure that we terminate if a child dies. */ | 2012 | /* Ensure that we terminate if a device-servicing child dies. */ |
1768 | signal(SIGCHLD, kill_launcher); | 2013 | signal(SIGCHLD, kill_launcher); |
1769 | 2014 | ||
1770 | /* If we exit via err(), this kills all the threads, restores tty. */ | 2015 | /* If we exit via err(), this kills all the threads, restores tty. */ |
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index cf42b820ff9d..d56a01775423 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt | |||
@@ -66,7 +66,8 @@ On all - write a character to /proc/sysrq-trigger. e.g.: | |||
66 | 'b' - Will immediately reboot the system without syncing or unmounting | 66 | 'b' - Will immediately reboot the system without syncing or unmounting |
67 | your disks. | 67 | your disks. |
68 | 68 | ||
69 | 'c' - Will perform a kexec reboot in order to take a crashdump. | 69 | 'c' - Will perform a system crash by a NULL pointer dereference. |
70 | A crashdump will be taken if configured. | ||
70 | 71 | ||
71 | 'd' - Shows all locks that are held. | 72 | 'd' - Shows all locks that are held. |
72 | 73 | ||
@@ -141,8 +142,8 @@ useful when you want to exit a program that will not let you switch consoles. | |||
141 | re'B'oot is good when you're unable to shut down. But you should also 'S'ync | 142 | re'B'oot is good when you're unable to shut down. But you should also 'S'ync |
142 | and 'U'mount first. | 143 | and 'U'mount first. |
143 | 144 | ||
144 | 'C'rashdump can be used to manually trigger a crashdump when the system is hung. | 145 | 'C'rash can be used to manually trigger a crashdump when the system is hung. |
145 | The kernel needs to have been built with CONFIG_KEXEC enabled. | 146 | Note that this just triggers a crash if there is no dump mechanism available. |
146 | 147 | ||
147 | 'S'ync is great when your system is locked up, it allows you to sync your | 148 | 'S'ync is great when your system is locked up, it allows you to sync your |
148 | disks and will certainly lessen the chance of data loss and fscking. Note | 149 | disks and will certainly lessen the chance of data loss and fscking. Note |
diff --git a/MAINTAINERS b/MAINTAINERS index ebc269152faf..79471ba4981b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -73,8 +73,8 @@ Note: For the hard of thinking, this list is meant to remain in alphabetical | |||
73 | order. If you could add yourselves to it in alphabetical order that would be | 73 | order. If you could add yourselves to it in alphabetical order that would be |
74 | so much easier [Ed] | 74 | so much easier [Ed] |
75 | 75 | ||
76 | P: Person | 76 | P: Person (obsolete) |
77 | M: Mail patches to | 77 | M: Mail patches to: FullName <address@domain> |
78 | L: Mailing list that is relevant to this area | 78 | L: Mailing list that is relevant to this area |
79 | W: Web-page with status/info | 79 | W: Web-page with status/info |
80 | T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. | 80 | T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. |
@@ -104,88 +104,74 @@ X: Files and directories that are NOT maintained, same rules as F: | |||
104 | matches all files in and below net excluding net/ipv6/ | 104 | matches all files in and below net excluding net/ipv6/ |
105 | 105 | ||
106 | 3C505 NETWORK DRIVER | 106 | 3C505 NETWORK DRIVER |
107 | P: Philip Blundell | 107 | M: Philip Blundell <philb@gnu.org> |
108 | M: philb@gnu.org | ||
109 | L: netdev@vger.kernel.org | 108 | L: netdev@vger.kernel.org |
110 | S: Maintained | 109 | S: Maintained |
111 | F: drivers/net/3c505* | 110 | F: drivers/net/3c505* |
112 | 111 | ||
113 | 3C59X NETWORK DRIVER | 112 | 3C59X NETWORK DRIVER |
114 | P: Steffen Klassert | 113 | M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de> |
115 | M: klassert@mathematik.tu-chemnitz.de | ||
116 | L: netdev@vger.kernel.org | 114 | L: netdev@vger.kernel.org |
117 | S: Maintained | 115 | S: Maintained |
118 | F: Documentation/networking/vortex.txt | 116 | F: Documentation/networking/vortex.txt |
119 | F: drivers/net/3c59x.c | 117 | F: drivers/net/3c59x.c |
120 | 118 | ||
121 | 3CR990 NETWORK DRIVER | 119 | 3CR990 NETWORK DRIVER |
122 | P: David Dillow | 120 | M: David Dillow <dave@thedillows.org> |
123 | M: dave@thedillows.org | ||
124 | L: netdev@vger.kernel.org | 121 | L: netdev@vger.kernel.org |
125 | S: Maintained | 122 | S: Maintained |
126 | F: drivers/net/typhoon* | 123 | F: drivers/net/typhoon* |
127 | 124 | ||
128 | 3W-9XXX SATA-RAID CONTROLLER DRIVER | 125 | 3W-9XXX SATA-RAID CONTROLLER DRIVER |
129 | P: Adam Radford | 126 | M: Adam Radford <linuxraid@amcc.com> |
130 | M: linuxraid@amcc.com | ||
131 | L: linux-scsi@vger.kernel.org | 127 | L: linux-scsi@vger.kernel.org |
132 | W: http://www.amcc.com | 128 | W: http://www.amcc.com |
133 | S: Supported | 129 | S: Supported |
134 | F: drivers/scsi/3w-9xxx* | 130 | F: drivers/scsi/3w-9xxx* |
135 | 131 | ||
136 | 3W-XXXX ATA-RAID CONTROLLER DRIVER | 132 | 3W-XXXX ATA-RAID CONTROLLER DRIVER |
137 | P: Adam Radford | 133 | M: Adam Radford <linuxraid@amcc.com> |
138 | M: linuxraid@amcc.com | ||
139 | L: linux-scsi@vger.kernel.org | 134 | L: linux-scsi@vger.kernel.org |
140 | W: http://www.amcc.com | 135 | W: http://www.amcc.com |
141 | S: Supported | 136 | S: Supported |
142 | F: drivers/scsi/3w-xxxx* | 137 | F: drivers/scsi/3w-xxxx* |
143 | 138 | ||
144 | 53C700 AND 53C700-66 SCSI DRIVER | 139 | 53C700 AND 53C700-66 SCSI DRIVER |
145 | P: James E.J. Bottomley | 140 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
146 | M: James.Bottomley@HansenPartnership.com | ||
147 | L: linux-scsi@vger.kernel.org | 141 | L: linux-scsi@vger.kernel.org |
148 | S: Maintained | 142 | S: Maintained |
149 | F: drivers/scsi/53c700* | 143 | F: drivers/scsi/53c700* |
150 | 144 | ||
151 | 6PACK NETWORK DRIVER FOR AX.25 | 145 | 6PACK NETWORK DRIVER FOR AX.25 |
152 | P: Andreas Koensgen | 146 | M: Andreas Koensgen <ajk@comnets.uni-bremen.de> |
153 | M: ajk@comnets.uni-bremen.de | ||
154 | L: linux-hams@vger.kernel.org | 147 | L: linux-hams@vger.kernel.org |
155 | S: Maintained | 148 | S: Maintained |
156 | F: drivers/net/hamradio/6pack.c | 149 | F: drivers/net/hamradio/6pack.c |
157 | 150 | ||
158 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER | 151 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER |
159 | P: Francois Romieu | 152 | M: Francois Romieu <romieu@fr.zoreil.com> |
160 | M: romieu@fr.zoreil.com | ||
161 | L: netdev@vger.kernel.org | 153 | L: netdev@vger.kernel.org |
162 | S: Maintained | 154 | S: Maintained |
163 | F: drivers/net/r8169.c | 155 | F: drivers/net/r8169.c |
164 | 156 | ||
165 | 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER | 157 | 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER |
166 | P: Alan Cox | ||
167 | M: alan@lxorguk.ukuu.org.uk | ||
168 | L: linux-serial@vger.kernel.org | 158 | L: linux-serial@vger.kernel.org |
169 | W: http://serial.sourceforge.net | 159 | W: http://serial.sourceforge.net |
170 | S: Odd Fixes | 160 | S: Orphan |
171 | F: drivers/serial/8250* | 161 | F: drivers/serial/8250* |
172 | F: include/linux/serial_8250.h | 162 | F: include/linux/serial_8250.h |
173 | 163 | ||
174 | 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] | 164 | 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.] |
175 | P: Paul Gortmaker | 165 | M: Paul Gortmaker <p_gortmaker@yahoo.com> |
176 | M: p_gortmaker@yahoo.com | ||
177 | L: netdev@vger.kernel.org | 166 | L: netdev@vger.kernel.org |
178 | S: Maintained | 167 | S: Maintained |
179 | F: drivers/net/*8390* | 168 | F: drivers/net/*8390* |
180 | F: drivers/net/ax88796.c | 169 | F: drivers/net/ax88796.c |
181 | 170 | ||
182 | 9P FILE SYSTEM | 171 | 9P FILE SYSTEM |
183 | P: Eric Van Hensbergen | 172 | M: Eric Van Hensbergen <ericvh@gmail.com> |
184 | M: ericvh@gmail.com | 173 | M: Ron Minnich <rminnich@sandia.gov> |
185 | P: Ron Minnich | 174 | M: Latchesar Ionkov <lucho@ionkov.net> |
186 | M: rminnich@sandia.gov | ||
187 | P: Latchesar Ionkov | ||
188 | M: lucho@ionkov.net | ||
189 | L: v9fs-developer@lists.sourceforge.net | 175 | L: v9fs-developer@lists.sourceforge.net |
190 | W: http://swik.net/v9fs | 176 | W: http://swik.net/v9fs |
191 | T: git git://git.kernel.org/pub/scm/linux/kernel/ericvh/v9fs.git | 177 | T: git git://git.kernel.org/pub/scm/linux/kernel/ericvh/v9fs.git |
@@ -194,15 +180,13 @@ F: Documentation/filesystems/9p.txt | |||
194 | F: fs/9p/ | 180 | F: fs/9p/ |
195 | 181 | ||
196 | A2232 SERIAL BOARD DRIVER | 182 | A2232 SERIAL BOARD DRIVER |
197 | P: Enver Haase | 183 | M: Enver Haase <A2232@gmx.net> |
198 | M: A2232@gmx.net | ||
199 | L: linux-m68k@lists.linux-m68k.org | 184 | L: linux-m68k@lists.linux-m68k.org |
200 | S: Maintained | 185 | S: Maintained |
201 | F: drivers/char/ser_a2232* | 186 | F: drivers/char/ser_a2232* |
202 | 187 | ||
203 | AACRAID SCSI RAID DRIVER | 188 | AACRAID SCSI RAID DRIVER |
204 | P: Adaptec OEM Raid Solutions | 189 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> |
205 | M: aacraid@adaptec.com | ||
206 | L: linux-scsi@vger.kernel.org | 190 | L: linux-scsi@vger.kernel.org |
207 | W: http://www.adaptec.com/ | 191 | W: http://www.adaptec.com/ |
208 | S: Supported | 192 | S: Supported |
@@ -210,44 +194,38 @@ F: Documentation/scsi/aacraid.txt | |||
210 | F: drivers/scsi/aacraid/ | 194 | F: drivers/scsi/aacraid/ |
211 | 195 | ||
212 | ABIT UGURU 1,2 HARDWARE MONITOR DRIVER | 196 | ABIT UGURU 1,2 HARDWARE MONITOR DRIVER |
213 | P: Hans de Goede | 197 | M: Hans de Goede <j.w.r.degoede@hhs.nl> |
214 | M: j.w.r.degoede@hhs.nl | ||
215 | L: lm-sensors@lm-sensors.org | 198 | L: lm-sensors@lm-sensors.org |
216 | S: Maintained | 199 | S: Maintained |
217 | F: drivers/hwmon/abituguru.c | 200 | F: drivers/hwmon/abituguru.c |
218 | 201 | ||
219 | ABIT UGURU 3 HARDWARE MONITOR DRIVER | 202 | ABIT UGURU 3 HARDWARE MONITOR DRIVER |
220 | P: Alistair John Strachan | 203 | M: Alistair John Strachan <alistair@devzero.co.uk> |
221 | M: alistair@devzero.co.uk | ||
222 | L: lm-sensors@lm-sensors.org | 204 | L: lm-sensors@lm-sensors.org |
223 | S: Maintained | 205 | S: Maintained |
224 | F: drivers/hwmon/abituguru3.c | 206 | F: drivers/hwmon/abituguru3.c |
225 | 207 | ||
226 | ACENIC DRIVER | 208 | ACENIC DRIVER |
227 | P: Jes Sorensen | 209 | M: Jes Sorensen <jes@trained-monkey.org> |
228 | M: jes@trained-monkey.org | ||
229 | L: linux-acenic@sunsite.dk | 210 | L: linux-acenic@sunsite.dk |
230 | S: Maintained | 211 | S: Maintained |
231 | F: drivers/net/acenic* | 212 | F: drivers/net/acenic* |
232 | 213 | ||
233 | ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER | 214 | ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER |
234 | P: Peter Feuerer | 215 | M: Peter Feuerer <peter@piie.net> |
235 | M: peter@piie.net | 216 | W: http://piie.net/?section=acerhdf |
236 | W: http://piie.net/?section=acerhdf | 217 | S: Maintained |
237 | S: Maintained | 218 | F: drivers/platform/x86/acerhdf.c |
238 | F: drivers/platform/x86/acerhdf.c | ||
239 | 219 | ||
240 | ACER WMI LAPTOP EXTRAS | 220 | ACER WMI LAPTOP EXTRAS |
241 | P: Carlos Corbacho | 221 | M: Carlos Corbacho <carlos@strangeworlds.co.uk> |
242 | M: carlos@strangeworlds.co.uk | ||
243 | L: aceracpi@googlegroups.com (subscribers-only) | 222 | L: aceracpi@googlegroups.com (subscribers-only) |
244 | W: http://code.google.com/p/aceracpi | 223 | W: http://code.google.com/p/aceracpi |
245 | S: Maintained | 224 | S: Maintained |
246 | F: drivers/platform/x86/acer-wmi.c | 225 | F: drivers/platform/x86/acer-wmi.c |
247 | 226 | ||
248 | ACPI | 227 | ACPI |
249 | P: Len Brown | 228 | M: Len Brown <lenb@kernel.org> |
250 | M: lenb@kernel.org | ||
251 | L: linux-acpi@vger.kernel.org | 229 | L: linux-acpi@vger.kernel.org |
252 | W: http://www.lesswatts.org/projects/acpi/ | 230 | W: http://www.lesswatts.org/projects/acpi/ |
253 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git | 231 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git |
@@ -257,8 +235,7 @@ F: drivers/pnp/pnpacpi/ | |||
257 | F: include/linux/acpi.h | 235 | F: include/linux/acpi.h |
258 | 236 | ||
259 | ACPI BATTERY DRIVERS | 237 | ACPI BATTERY DRIVERS |
260 | P: Alexey Starikovskiy | 238 | M: Alexey Starikovskiy <astarikovskiy@suse.de> |
261 | M: astarikovskiy@suse.de | ||
262 | L: linux-acpi@vger.kernel.org | 239 | L: linux-acpi@vger.kernel.org |
263 | W: http://www.lesswatts.org/projects/acpi/ | 240 | W: http://www.lesswatts.org/projects/acpi/ |
264 | S: Supported | 241 | S: Supported |
@@ -266,80 +243,69 @@ F: drivers/acpi/battery.c | |||
266 | F: drivers/acpi/*sbs* | 243 | F: drivers/acpi/*sbs* |
267 | 244 | ||
268 | ACPI EC DRIVER | 245 | ACPI EC DRIVER |
269 | P: Alexey Starikovskiy | 246 | M: Alexey Starikovskiy <astarikovskiy@suse.de> |
270 | M: astarikovskiy@suse.de | ||
271 | L: linux-acpi@vger.kernel.org | 247 | L: linux-acpi@vger.kernel.org |
272 | W: http://www.lesswatts.org/projects/acpi/ | 248 | W: http://www.lesswatts.org/projects/acpi/ |
273 | S: Supported | 249 | S: Supported |
274 | F: drivers/acpi/ec.c | 250 | F: drivers/acpi/ec.c |
275 | 251 | ||
276 | ACPI FAN DRIVER | 252 | ACPI FAN DRIVER |
277 | P: Zhang Rui | 253 | M: Zhang Rui <rui.zhang@intel.com> |
278 | M: rui.zhang@intel.com | ||
279 | L: linux-acpi@vger.kernel.org | 254 | L: linux-acpi@vger.kernel.org |
280 | W: http://www.lesswatts.org/projects/acpi/ | 255 | W: http://www.lesswatts.org/projects/acpi/ |
281 | S: Supported | 256 | S: Supported |
282 | F: drivers/acpi/fan.c | 257 | F: drivers/acpi/fan.c |
283 | 258 | ||
284 | ACPI PCI HOTPLUG DRIVER | 259 | ACPI PCI HOTPLUG DRIVER |
285 | P: Kristen Carlson Accardi | 260 | M: Kristen Carlson Accardi <kristen.c.accardi@intel.com> |
286 | M: kristen.c.accardi@intel.com | ||
287 | L: linux-pci@vger.kernel.org | 261 | L: linux-pci@vger.kernel.org |
288 | S: Supported | 262 | S: Supported |
289 | F: drivers/pci/hotplug/acpi* | 263 | F: drivers/pci/hotplug/acpi* |
290 | 264 | ||
291 | ACPI THERMAL DRIVER | 265 | ACPI THERMAL DRIVER |
292 | P: Zhang Rui | 266 | M: Zhang Rui <rui.zhang@intel.com> |
293 | M: rui.zhang@intel.com | ||
294 | L: linux-acpi@vger.kernel.org | 267 | L: linux-acpi@vger.kernel.org |
295 | W: http://www.lesswatts.org/projects/acpi/ | 268 | W: http://www.lesswatts.org/projects/acpi/ |
296 | S: Supported | 269 | S: Supported |
297 | F: drivers/acpi/*thermal* | 270 | F: drivers/acpi/*thermal* |
298 | 271 | ||
299 | ACPI VIDEO DRIVER | 272 | ACPI VIDEO DRIVER |
300 | P: Zhang Rui | 273 | M: Zhang Rui <rui.zhang@intel.com> |
301 | M: rui.zhang@intel.com | ||
302 | L: linux-acpi@vger.kernel.org | 274 | L: linux-acpi@vger.kernel.org |
303 | W: http://www.lesswatts.org/projects/acpi/ | 275 | W: http://www.lesswatts.org/projects/acpi/ |
304 | S: Supported | 276 | S: Supported |
305 | F: drivers/acpi/video.c | 277 | F: drivers/acpi/video.c |
306 | 278 | ||
307 | ACPI WMI DRIVER | 279 | ACPI WMI DRIVER |
308 | P: Carlos Corbacho | 280 | M: Carlos Corbacho <carlos@strangeworlds.co.uk> |
309 | M: carlos@strangeworlds.co.uk | ||
310 | L: linux-acpi@vger.kernel.org | 281 | L: linux-acpi@vger.kernel.org |
311 | W: http://www.lesswatts.org/projects/acpi/ | 282 | W: http://www.lesswatts.org/projects/acpi/ |
312 | S: Maintained | 283 | S: Maintained |
313 | F: drivers/platform/x86/wmi.c | 284 | F: drivers/platform/x86/wmi.c |
314 | 285 | ||
315 | AD1889 ALSA SOUND DRIVER | 286 | AD1889 ALSA SOUND DRIVER |
316 | P: Kyle McMartin | 287 | M: Kyle McMartin <kyle@mcmartin.ca> |
317 | M: kyle@mcmartin.ca | 288 | M: Thibaut Varene <T-Bone@parisc-linux.org> |
318 | P: Thibaut Varene | ||
319 | M: T-Bone@parisc-linux.org | ||
320 | W: http://wiki.parisc-linux.org/AD1889 | 289 | W: http://wiki.parisc-linux.org/AD1889 |
321 | L: linux-parisc@vger.kernel.org | 290 | L: linux-parisc@vger.kernel.org |
322 | S: Maintained | 291 | S: Maintained |
323 | F: sound/pci/ad1889.* | 292 | F: sound/pci/ad1889.* |
324 | 293 | ||
325 | ADM1025 HARDWARE MONITOR DRIVER | 294 | ADM1025 HARDWARE MONITOR DRIVER |
326 | P: Jean Delvare | 295 | M: Jean Delvare <khali@linux-fr.org> |
327 | M: khali@linux-fr.org | ||
328 | L: lm-sensors@lm-sensors.org | 296 | L: lm-sensors@lm-sensors.org |
329 | S: Maintained | 297 | S: Maintained |
330 | F: Documentation/hwmon/adm1025 | 298 | F: Documentation/hwmon/adm1025 |
331 | F: drivers/hwmon/adm1025.c | 299 | F: drivers/hwmon/adm1025.c |
332 | 300 | ||
333 | ADM1029 HARDWARE MONITOR DRIVER | 301 | ADM1029 HARDWARE MONITOR DRIVER |
334 | P: Corentin Labbe | 302 | M: Corentin Labbe <corentin.labbe@geomatys.fr> |
335 | M: corentin.labbe@geomatys.fr | ||
336 | L: lm-sensors@lm-sensors.org | 303 | L: lm-sensors@lm-sensors.org |
337 | S: Maintained | 304 | S: Maintained |
338 | F: drivers/hwmon/adm1029.c | 305 | F: drivers/hwmon/adm1029.c |
339 | 306 | ||
340 | ADM8211 WIRELESS DRIVER | 307 | ADM8211 WIRELESS DRIVER |
341 | P: Michael Wu | 308 | M: Michael Wu <flamingice@sourmilk.net> |
342 | M: flamingice@sourmilk.net | ||
343 | L: linux-wireless@vger.kernel.org | 309 | L: linux-wireless@vger.kernel.org |
344 | W: http://linuxwireless.org/ | 310 | W: http://linuxwireless.org/ |
345 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git | 311 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git |
@@ -347,35 +313,30 @@ S: Maintained | |||
347 | F: drivers/net/wireless/adm8211.* | 313 | F: drivers/net/wireless/adm8211.* |
348 | 314 | ||
349 | ADT746X FAN DRIVER | 315 | ADT746X FAN DRIVER |
350 | P: Colin Leroy | 316 | M: Colin Leroy <colin@colino.net> |
351 | M: colin@colino.net | ||
352 | S: Maintained | 317 | S: Maintained |
353 | F: drivers/macintosh/therm_adt746x.c | 318 | F: drivers/macintosh/therm_adt746x.c |
354 | 319 | ||
355 | ADVANSYS SCSI DRIVER | 320 | ADVANSYS SCSI DRIVER |
356 | P: Matthew Wilcox | 321 | M: Matthew Wilcox <matthew@wil.cx> |
357 | M: matthew@wil.cx | ||
358 | L: linux-scsi@vger.kernel.org | 322 | L: linux-scsi@vger.kernel.org |
359 | S: Maintained | 323 | S: Maintained |
360 | F: Documentation/scsi/advansys.txt | 324 | F: Documentation/scsi/advansys.txt |
361 | F: drivers/scsi/advansys.c | 325 | F: drivers/scsi/advansys.c |
362 | 326 | ||
363 | AEDSP16 DRIVER | 327 | AEDSP16 DRIVER |
364 | P: Riccardo Facchetti | 328 | M: Riccardo Facchetti <fizban@tin.it> |
365 | M: fizban@tin.it | ||
366 | S: Maintained | 329 | S: Maintained |
367 | F: sound/oss/aedsp16.c | 330 | F: sound/oss/aedsp16.c |
368 | 331 | ||
369 | AFFS FILE SYSTEM | 332 | AFFS FILE SYSTEM |
370 | P: Roman Zippel | 333 | M: Roman Zippel <zippel@linux-m68k.org> |
371 | M: zippel@linux-m68k.org | ||
372 | S: Maintained | 334 | S: Maintained |
373 | F: Documentation/filesystems/affs.txt | 335 | F: Documentation/filesystems/affs.txt |
374 | F: fs/affs/ | 336 | F: fs/affs/ |
375 | 337 | ||
376 | AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN | 338 | AFS FILESYSTEM & AF_RXRPC SOCKET DOMAIN |
377 | P: David Howells | 339 | M: David Howells <dhowells@redhat.com> |
378 | M: dhowells@redhat.com | ||
379 | L: linux-afs@lists.infradead.org | 340 | L: linux-afs@lists.infradead.org |
380 | S: Supported | 341 | S: Supported |
381 | F: fs/afs/ | 342 | F: fs/afs/ |
@@ -383,40 +344,35 @@ F: include/net/af_rxrpc.h | |||
383 | F: net/rxrpc/af_rxrpc.c | 344 | F: net/rxrpc/af_rxrpc.c |
384 | 345 | ||
385 | AGPGART DRIVER | 346 | AGPGART DRIVER |
386 | P: David Airlie | 347 | M: David Airlie <airlied@linux.ie> |
387 | M: airlied@linux.ie | ||
388 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git | 348 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git |
389 | S: Maintained | 349 | S: Maintained |
390 | F: drivers/char/agp/ | 350 | F: drivers/char/agp/ |
391 | F: include/linux/agp* | 351 | F: include/linux/agp* |
392 | 352 | ||
393 | AHA152X SCSI DRIVER | 353 | AHA152X SCSI DRIVER |
394 | P: Juergen E. Fischer | 354 | M: "Juergen E. Fischer" <fischer@norbit.de> |
395 | M: fischer@norbit.de | ||
396 | L: linux-scsi@vger.kernel.org | 355 | L: linux-scsi@vger.kernel.org |
397 | S: Maintained | 356 | S: Maintained |
398 | F: drivers/scsi/aha152x* | 357 | F: drivers/scsi/aha152x* |
399 | F: drivers/scsi/pcmcia/aha152x* | 358 | F: drivers/scsi/pcmcia/aha152x* |
400 | 359 | ||
401 | AIC7XXX / AIC79XX SCSI DRIVER | 360 | AIC7XXX / AIC79XX SCSI DRIVER |
402 | P: Hannes Reinecke | 361 | M: Hannes Reinecke <hare@suse.de> |
403 | M: hare@suse.de | ||
404 | L: linux-scsi@vger.kernel.org | 362 | L: linux-scsi@vger.kernel.org |
405 | S: Maintained | 363 | S: Maintained |
406 | F: drivers/scsi/aic7xxx/ | 364 | F: drivers/scsi/aic7xxx/ |
407 | F: drivers/scsi/aic7xxx_old/ | 365 | F: drivers/scsi/aic7xxx_old/ |
408 | 366 | ||
409 | AIO | 367 | AIO |
410 | P: Benjamin LaHaise | 368 | M: Benjamin LaHaise <bcrl@kvack.org> |
411 | M: bcrl@kvack.org | ||
412 | L: linux-aio@kvack.org | 369 | L: linux-aio@kvack.org |
413 | S: Supported | 370 | S: Supported |
414 | F: fs/aio.c | 371 | F: fs/aio.c |
415 | F: include/linux/*aio*.h | 372 | F: include/linux/*aio*.h |
416 | 373 | ||
417 | ALCATEL SPEEDTOUCH USB DRIVER | 374 | ALCATEL SPEEDTOUCH USB DRIVER |
418 | P: Duncan Sands | 375 | M: Duncan Sands <duncan.sands@free.fr> |
419 | M: duncan.sands@free.fr | ||
420 | L: linux-usb@vger.kernel.org | 376 | L: linux-usb@vger.kernel.org |
421 | W: http://www.linux-usb.org/SpeedTouch/ | 377 | W: http://www.linux-usb.org/SpeedTouch/ |
422 | S: Maintained | 378 | S: Maintained |
@@ -424,32 +380,27 @@ F: drivers/usb/atm/speedtch.c | |||
424 | F: drivers/usb/atm/usbatm.c | 380 | F: drivers/usb/atm/usbatm.c |
425 | 381 | ||
426 | ALCHEMY AU1XX0 MMC DRIVER | 382 | ALCHEMY AU1XX0 MMC DRIVER |
427 | P: Manuel Lauss | 383 | M: Manuel Lauss <manuel.lauss@gmail.com> |
428 | M: manuel.lauss@gmail.com | ||
429 | S: Maintained | 384 | S: Maintained |
430 | F: drivers/mmc/host/au1xmmc.c | 385 | F: drivers/mmc/host/au1xmmc.c |
431 | 386 | ||
432 | ALI1563 I2C DRIVER | 387 | ALI1563 I2C DRIVER |
433 | P: Rudolf Marek | 388 | M: Rudolf Marek <r.marek@assembler.cz> |
434 | M: r.marek@assembler.cz | ||
435 | L: linux-i2c@vger.kernel.org | 389 | L: linux-i2c@vger.kernel.org |
436 | S: Maintained | 390 | S: Maintained |
437 | F: Documentation/i2c/busses/i2c-ali1563 | 391 | F: Documentation/i2c/busses/i2c-ali1563 |
438 | F: drivers/i2c/busses/i2c-ali1563.c | 392 | F: drivers/i2c/busses/i2c-ali1563.c |
439 | 393 | ||
440 | ALPHA PORT | 394 | ALPHA PORT |
441 | P: Richard Henderson | 395 | M: Richard Henderson <rth@twiddle.net> |
442 | M: rth@twiddle.net | ||
443 | S: Odd Fixes for 2.4; Maintained for 2.6. | 396 | S: Odd Fixes for 2.4; Maintained for 2.6. |
444 | P: Ivan Kokshaysky | 397 | M: Ivan Kokshaysky <ink@jurassic.park.msu.ru> |
445 | M: ink@jurassic.park.msu.ru | ||
446 | S: Maintained for 2.4; PCI support for 2.6. | 398 | S: Maintained for 2.4; PCI support for 2.6. |
447 | L: linux-alpha@vger.kernel.org | 399 | L: linux-alpha@vger.kernel.org |
448 | F: arch/alpha/ | 400 | F: arch/alpha/ |
449 | 401 | ||
450 | AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER | 402 | AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER |
451 | P: Thomas Dahlmann | 403 | M: Thomas Dahlmann <dahlmann.thomas@arcor.de> |
452 | M: dahlmann.thomas@arcor.de | ||
453 | L: linux-geode@lists.infradead.org (moderated for non-subscribers) | 404 | L: linux-geode@lists.infradead.org (moderated for non-subscribers) |
454 | S: Supported | 405 | S: Supported |
455 | F: drivers/usb/gadget/amd5536udc.* | 406 | F: drivers/usb/gadget/amd5536udc.* |
@@ -466,8 +417,7 @@ F: drivers/video/geode/ | |||
466 | F: arch/x86/include/asm/geode.h | 417 | F: arch/x86/include/asm/geode.h |
467 | 418 | ||
468 | AMD IOMMU (AMD-VI) | 419 | AMD IOMMU (AMD-VI) |
469 | P: Joerg Roedel | 420 | M: Joerg Roedel <joerg.roedel@amd.com> |
470 | M: joerg.roedel@amd.com | ||
471 | L: iommu@lists.linux-foundation.org | 421 | L: iommu@lists.linux-foundation.org |
472 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git | 422 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git |
473 | S: Supported | 423 | S: Supported |
@@ -475,40 +425,33 @@ F: arch/x86/kernel/amd_iommu*.c | |||
475 | F: arch/x86/include/asm/amd_iommu*.h | 425 | F: arch/x86/include/asm/amd_iommu*.h |
476 | 426 | ||
477 | AMD MICROCODE UPDATE SUPPORT | 427 | AMD MICROCODE UPDATE SUPPORT |
478 | P: Andreas Herrmann | 428 | M: Andreas Herrmann <andreas.herrmann3@amd.com> |
479 | M: andreas.herrmann3@amd.com | ||
480 | L: amd64-microcode@amd64.org | 429 | L: amd64-microcode@amd64.org |
481 | S: Supported | 430 | S: Supported |
482 | F: arch/x86/kernel/microcode_amd.c | 431 | F: arch/x86/kernel/microcode_amd.c |
483 | 432 | ||
484 | AMS (Apple Motion Sensor) DRIVER | 433 | AMS (Apple Motion Sensor) DRIVER |
485 | P: Stelian Pop | 434 | M: Stelian Pop <stelian@popies.net> |
486 | M: stelian@popies.net | 435 | M: Michael Hanselmann <linux-kernel@hansmi.ch> |
487 | P: Michael Hanselmann | ||
488 | M: linux-kernel@hansmi.ch | ||
489 | S: Supported | 436 | S: Supported |
490 | F: drivers/hwmon/ams/ | 437 | F: drivers/hwmon/ams/ |
491 | 438 | ||
492 | AMSO1100 RNIC DRIVER | 439 | AMSO1100 RNIC DRIVER |
493 | P: Tom Tucker | 440 | M: Tom Tucker <tom@opengridcomputing.com> |
494 | M: tom@opengridcomputing.com | 441 | M: Steve Wise <swise@opengridcomputing.com> |
495 | P: Steve Wise | ||
496 | M: swise@opengridcomputing.com | ||
497 | L: general@lists.openfabrics.org | 442 | L: general@lists.openfabrics.org |
498 | S: Maintained | 443 | S: Maintained |
499 | F: drivers/infiniband/hw/amso1100/ | 444 | F: drivers/infiniband/hw/amso1100/ |
500 | 445 | ||
501 | AOA (Apple Onboard Audio) ALSA DRIVER | 446 | AOA (Apple Onboard Audio) ALSA DRIVER |
502 | P: Johannes Berg | 447 | M: Johannes Berg <johannes@sipsolutions.net> |
503 | M: johannes@sipsolutions.net | ||
504 | L: linuxppc-dev@ozlabs.org | 448 | L: linuxppc-dev@ozlabs.org |
505 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 449 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
506 | S: Maintained | 450 | S: Maintained |
507 | F: sound/aoa/ | 451 | F: sound/aoa/ |
508 | 452 | ||
509 | APM DRIVER | 453 | APM DRIVER |
510 | P: Stephen Rothwell | 454 | M: Stephen Rothwell <sfr@canb.auug.org.au> |
511 | M: sfr@canb.auug.org.au | ||
512 | L: linux-laptop@vger.kernel.org | 455 | L: linux-laptop@vger.kernel.org |
513 | W: http://www.canb.auug.org.au/~sfr/ | 456 | W: http://www.canb.auug.org.au/~sfr/ |
514 | S: Supported | 457 | S: Supported |
@@ -516,51 +459,44 @@ F: arch/x86/kernel/apm_32.c | |||
516 | F: include/linux/apm_bios.h | 459 | F: include/linux/apm_bios.h |
517 | 460 | ||
518 | APPLE BCM5974 MULTITOUCH DRIVER | 461 | APPLE BCM5974 MULTITOUCH DRIVER |
519 | P: Henrik Rydberg | 462 | M: Henrik Rydberg <rydberg@euromail.se> |
520 | M: rydberg@euromail.se | ||
521 | L: linux-input@vger.kernel.org | 463 | L: linux-input@vger.kernel.org |
522 | S: Maintained | 464 | S: Maintained |
523 | F: drivers/input/mouse/bcm5974.c | 465 | F: drivers/input/mouse/bcm5974.c |
524 | 466 | ||
525 | APPLE SMC DRIVER | 467 | APPLE SMC DRIVER |
526 | P: Nicolas Boichat | 468 | M: Nicolas Boichat <nicolas@boichat.ch> |
527 | M: nicolas@boichat.ch | ||
528 | L: mactel-linux-devel@lists.sourceforge.net | 469 | L: mactel-linux-devel@lists.sourceforge.net |
529 | S: Maintained | 470 | S: Maintained |
530 | F: drivers/hwmon/applesmc.c | 471 | F: drivers/hwmon/applesmc.c |
531 | 472 | ||
532 | APPLETALK NETWORK LAYER | 473 | APPLETALK NETWORK LAYER |
533 | P: Arnaldo Carvalho de Melo | 474 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
534 | M: acme@ghostprotocols.net | ||
535 | S: Maintained | 475 | S: Maintained |
536 | F: drivers/net/appletalk/ | 476 | F: drivers/net/appletalk/ |
537 | F: net/appletalk/ | 477 | F: net/appletalk/ |
538 | 478 | ||
539 | APPLETOUCH TOUCHPAD DRIVER | 479 | APPLETOUCH TOUCHPAD DRIVER |
540 | P: Johannes Berg | 480 | M: Johannes Berg <johannes@sipsolutions.net> |
541 | M: johannes@sipsolutions.net | ||
542 | L: linux-input@vger.kernel.org | 481 | L: linux-input@vger.kernel.org |
543 | S: Maintained | 482 | S: Maintained |
544 | F: Documentation/input/appletouch.txt | 483 | F: Documentation/input/appletouch.txt |
545 | F: drivers/input/mouse/appletouch.c | 484 | F: drivers/input/mouse/appletouch.c |
546 | 485 | ||
547 | ARC FRAMEBUFFER DRIVER | 486 | ARC FRAMEBUFFER DRIVER |
548 | P: Jaya Kumar | 487 | M: Jaya Kumar <jayalk@intworks.biz> |
549 | M: jayalk@intworks.biz | ||
550 | S: Maintained | 488 | S: Maintained |
551 | F: drivers/video/arcfb.c | 489 | F: drivers/video/arcfb.c |
552 | F: drivers/video/fb_defio.c | 490 | F: drivers/video/fb_defio.c |
553 | 491 | ||
554 | ARM MFM AND FLOPPY DRIVERS | 492 | ARM MFM AND FLOPPY DRIVERS |
555 | P: Ian Molton | 493 | M: Ian Molton <spyro@f2s.com> |
556 | M: spyro@f2s.com | ||
557 | S: Maintained | 494 | S: Maintained |
558 | F: arch/arm/lib/floppydma.S | 495 | F: arch/arm/lib/floppydma.S |
559 | F: arch/arm/include/asm/floppy.h | 496 | F: arch/arm/include/asm/floppy.h |
560 | 497 | ||
561 | ARM PORT | 498 | ARM PORT |
562 | P: Russell King | 499 | M: Russell King <linux@arm.linux.org.uk> |
563 | M: linux@arm.linux.org.uk | ||
564 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 500 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
565 | W: http://www.arm.linux.org.uk/ | 501 | W: http://www.arm.linux.org.uk/ |
566 | S: Maintained | 502 | S: Maintained |
@@ -571,79 +507,67 @@ S: Orphan | |||
571 | F: drivers/mmc/host/mmci.* | 507 | F: drivers/mmc/host/mmci.* |
572 | 508 | ||
573 | ARM/ADI ROADRUNNER MACHINE SUPPORT | 509 | ARM/ADI ROADRUNNER MACHINE SUPPORT |
574 | P: Lennert Buytenhek | 510 | M: Lennert Buytenhek <kernel@wantstofly.org> |
575 | M: kernel@wantstofly.org | ||
576 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 511 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
577 | S: Maintained | 512 | S: Maintained |
578 | F: arch/arm/mach-ixp23xx/ | 513 | F: arch/arm/mach-ixp23xx/ |
579 | F: arch/arm/mach-ixp23xx/include/mach/ | 514 | F: arch/arm/mach-ixp23xx/include/mach/ |
580 | 515 | ||
581 | ARM/ADS SPHERE MACHINE SUPPORT | 516 | ARM/ADS SPHERE MACHINE SUPPORT |
582 | P: Lennert Buytenhek | 517 | M: Lennert Buytenhek <kernel@wantstofly.org> |
583 | M: kernel@wantstofly.org | ||
584 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 518 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
585 | S: Maintained | 519 | S: Maintained |
586 | 520 | ||
587 | ARM/AFEB9260 MACHINE SUPPORT | 521 | ARM/AFEB9260 MACHINE SUPPORT |
588 | P: Sergey Lapin | 522 | M: Sergey Lapin <slapin@ossfans.org> |
589 | M: slapin@ossfans.org | ||
590 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 523 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
591 | S: Maintained | 524 | S: Maintained |
592 | 525 | ||
593 | ARM/AJECO 1ARM MACHINE SUPPORT | 526 | ARM/AJECO 1ARM MACHINE SUPPORT |
594 | P: Lennert Buytenhek | 527 | M: Lennert Buytenhek <kernel@wantstofly.org> |
595 | M: kernel@wantstofly.org | ||
596 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 528 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
597 | S: Maintained | 529 | S: Maintained |
598 | 530 | ||
599 | ARM/ATMEL AT91RM9200 ARM ARCHITECTURE | 531 | ARM/ATMEL AT91RM9200 ARM ARCHITECTURE |
600 | P: Andrew Victor | 532 | M: Andrew Victor <linux@maxim.org.za> |
601 | M: linux@maxim.org.za | ||
602 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 533 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
603 | W: http://maxim.org.za/at91_26.html | 534 | W: http://maxim.org.za/at91_26.html |
604 | S: Maintained | 535 | S: Maintained |
605 | 536 | ||
606 | ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE | 537 | ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE |
607 | P: Lennert Buytenhek | 538 | M: Lennert Buytenhek <kernel@wantstofly.org> |
608 | M: kernel@wantstofly.org | ||
609 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 539 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
610 | S: Maintained | 540 | S: Maintained |
611 | 541 | ||
612 | ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT | 542 | ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT |
613 | P: Lennert Buytenhek | 543 | M: Lennert Buytenhek <kernel@wantstofly.org> |
614 | M: kernel@wantstofly.org | ||
615 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 544 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
616 | S: Maintained | 545 | S: Maintained |
617 | 546 | ||
618 | ARM/CLKDEV SUPPORT | 547 | ARM/CLKDEV SUPPORT |
619 | P: Russell King | 548 | M: Russell King <linux@arm.linux.org.uk> |
620 | M: linux@arm.linux.org.uk | ||
621 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 549 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
622 | F: arch/arm/common/clkdev.c | 550 | F: arch/arm/common/clkdev.c |
623 | F: arch/arm/include/asm/clkdev.h | 551 | F: arch/arm/include/asm/clkdev.h |
624 | 552 | ||
625 | ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT | 553 | ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT |
626 | P: Mike Rapoport | 554 | M: Mike Rapoport <mike@compulab.co.il> |
627 | M: mike@compulab.co.il | ||
628 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 555 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
629 | S: Maintained | 556 | S: Maintained |
630 | 557 | ||
631 | ARM/CORGI MACHINE SUPPORT | 558 | ARM/CORGI MACHINE SUPPORT |
632 | P: Richard Purdie | 559 | M: Richard Purdie <rpurdie@rpsys.net> |
633 | M: rpurdie@rpsys.net | ||
634 | S: Maintained | 560 | S: Maintained |
635 | 561 | ||
636 | ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE | 562 | ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE |
637 | P: Paulius Zaleckas | 563 | M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> |
638 | M: paulius.zaleckas@teltonika.lt | ||
639 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 564 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
640 | T: git git://gitorious.org/linux-gemini/mainline.git | 565 | T: git git://gitorious.org/linux-gemini/mainline.git |
641 | S: Maintained | 566 | S: Maintained |
642 | F: arch/arm/mach-gemini/ | 567 | F: arch/arm/mach-gemini/ |
643 | 568 | ||
644 | ARM/EBSA110 MACHINE SUPPORT | 569 | ARM/EBSA110 MACHINE SUPPORT |
645 | P: Russell King | 570 | M: Russell King <linux@arm.linux.org.uk> |
646 | M: linux@arm.linux.org.uk | ||
647 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 571 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
648 | W: http://www.arm.linux.org.uk/ | 572 | W: http://www.arm.linux.org.uk/ |
649 | S: Maintained | 573 | S: Maintained |
@@ -651,12 +575,9 @@ F: arch/arm/mach-ebsa110/ | |||
651 | F: drivers/net/arm/am79c961a.* | 575 | F: drivers/net/arm/am79c961a.* |
652 | 576 | ||
653 | ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) | 577 | ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6) |
654 | P: Daniel Ribeiro | 578 | M: Daniel Ribeiro <drwyrm@gmail.com> |
655 | M: drwyrm@gmail.com | 579 | M: Stefan Schmidt <stefan@openezx.org> |
656 | P: Stefan Schmidt | 580 | M: Harald Welte <laforge@openezx.org> |
657 | M: stefan@openezx.org | ||
658 | P: Harald Welte | ||
659 | M: laforge@openezx.org | ||
660 | L: openezx-devel@lists.openezx.org (subscribers-only) | 581 | L: openezx-devel@lists.openezx.org (subscribers-only) |
661 | W: http://www.openezx.org/ | 582 | W: http://www.openezx.org/ |
662 | S: Maintained | 583 | S: Maintained |
@@ -664,15 +585,13 @@ T: topgit git://git.openezx.org/openezx.git | |||
664 | F: arch/arm/mach-pxa/ezx.c | 585 | F: arch/arm/mach-pxa/ezx.c |
665 | 586 | ||
666 | ARM/FARADAY FA526 PORT | 587 | ARM/FARADAY FA526 PORT |
667 | P: Paulius Zaleckas | 588 | M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> |
668 | M: paulius.zaleckas@teltonika.lt | ||
669 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 589 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
670 | S: Maintained | 590 | S: Maintained |
671 | F: arch/arm/mm/*-fa* | 591 | F: arch/arm/mm/*-fa* |
672 | 592 | ||
673 | ARM/FOOTBRIDGE ARCHITECTURE | 593 | ARM/FOOTBRIDGE ARCHITECTURE |
674 | P: Russell King | 594 | M: Russell King <linux@arm.linux.org.uk> |
675 | M: linux@arm.linux.org.uk | ||
676 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 595 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
677 | W: http://www.arm.linux.org.uk/ | 596 | W: http://www.arm.linux.org.uk/ |
678 | S: Maintained | 597 | S: Maintained |
@@ -680,175 +599,146 @@ F: arch/arm/include/asm/hardware/dec21285.h | |||
680 | F: arch/arm/mach-footbridge/ | 599 | F: arch/arm/mach-footbridge/ |
681 | 600 | ||
682 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 601 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
683 | P: Sascha Hauer | 602 | M: Sascha Hauer <kernel@pengutronix.de> |
684 | M: kernel@pengutronix.de | ||
685 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 603 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
686 | S: Maintained | 604 | S: Maintained |
687 | 605 | ||
688 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 606 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
689 | P: Lennert Buytenhek | 607 | M: Lennert Buytenhek <kernel@wantstofly.org> |
690 | M: kernel@wantstofly.org | ||
691 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 608 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
692 | S: Maintained | 609 | S: Maintained |
693 | 610 | ||
694 | ARM/GUMSTIX MACHINE SUPPORT | 611 | ARM/GUMSTIX MACHINE SUPPORT |
695 | P: Steve Sakoman | 612 | M: Steve Sakoman <sakoman@gmail.com> |
696 | M: sakoman@gmail.com | ||
697 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 613 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
698 | S: Maintained | 614 | S: Maintained |
699 | 615 | ||
700 | ARM/H4700 (HP IPAQ HX4700) MACHINE SUPPORT | 616 | ARM/H4700 (HP IPAQ HX4700) MACHINE SUPPORT |
701 | P: Philipp Zabel | 617 | M: Philipp Zabel <philipp.zabel@gmail.com> |
702 | M: philipp.zabel@gmail.com | ||
703 | S: Maintained | 618 | S: Maintained |
704 | F: arch/arm/mach-pxa/hx4700.c | 619 | F: arch/arm/mach-pxa/hx4700.c |
705 | F: arch/arm/mach-pxa/include/mach/hx4700.h | 620 | F: arch/arm/mach-pxa/include/mach/hx4700.h |
706 | 621 | ||
707 | ARM/HP JORNADA 7XX MACHINE SUPPORT | 622 | ARM/HP JORNADA 7XX MACHINE SUPPORT |
708 | P: Kristoffer Ericson | 623 | M: Kristoffer Ericson <kristoffer.ericson@gmail.com> |
709 | M: kristoffer.ericson@gmail.com | ||
710 | W: www.jlime.com | 624 | W: www.jlime.com |
711 | S: Maintained | 625 | S: Maintained |
626 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git | ||
627 | F: arch/arm/mach-sa1100/jornada720.c | ||
628 | F: arch/arm/mach-sa1100/include/mach/jornada720.h | ||
712 | 629 | ||
713 | ARM/INTEL IOP32X ARM ARCHITECTURE | 630 | ARM/INTEL IOP32X ARM ARCHITECTURE |
714 | P: Lennert Buytenhek | 631 | M: Lennert Buytenhek <kernel@wantstofly.org> |
715 | M: kernel@wantstofly.org | 632 | M: Dan Williams <dan.j.williams@intel.com> |
716 | P: Dan Williams | ||
717 | M: dan.j.williams@intel.com | ||
718 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 633 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
719 | S: Supported | 634 | S: Supported |
720 | 635 | ||
721 | ARM/INTEL IOP33X ARM ARCHITECTURE | 636 | ARM/INTEL IOP33X ARM ARCHITECTURE |
722 | P: Dan Williams | 637 | M: Dan Williams <dan.j.williams@intel.com> |
723 | M: dan.j.williams@intel.com | ||
724 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 638 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
725 | S: Supported | 639 | S: Supported |
726 | 640 | ||
727 | ARM/INTEL IOP13XX ARM ARCHITECTURE | 641 | ARM/INTEL IOP13XX ARM ARCHITECTURE |
728 | P: Lennert Buytenhek | 642 | M: Lennert Buytenhek <kernel@wantstofly.org> |
729 | M: kernel@wantstofly.org | 643 | M: Dan Williams <dan.j.williams@intel.com> |
730 | P: Dan Williams | ||
731 | M: dan.j.williams@intel.com | ||
732 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 644 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
733 | S: Supported | 645 | S: Supported |
734 | 646 | ||
735 | ARM/INTEL IQ81342EX MACHINE SUPPORT | 647 | ARM/INTEL IQ81342EX MACHINE SUPPORT |
736 | P: Lennert Buytenhek | 648 | M: Lennert Buytenhek <kernel@wantstofly.org> |
737 | M: kernel@wantstofly.org | 649 | M: Dan Williams <dan.j.williams@intel.com> |
738 | P: Dan Williams | ||
739 | M: dan.j.williams@intel.com | ||
740 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 650 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
741 | S: Supported | 651 | S: Supported |
742 | 652 | ||
743 | ARM/INTEL IXP2000 ARM ARCHITECTURE | 653 | ARM/INTEL IXP2000 ARM ARCHITECTURE |
744 | P: Lennert Buytenhek | 654 | M: Lennert Buytenhek <kernel@wantstofly.org> |
745 | M: kernel@wantstofly.org | ||
746 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 655 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
747 | S: Maintained | 656 | S: Maintained |
748 | 657 | ||
749 | ARM/INTEL IXDP2850 MACHINE SUPPORT | 658 | ARM/INTEL IXDP2850 MACHINE SUPPORT |
750 | P: Lennert Buytenhek | 659 | M: Lennert Buytenhek <kernel@wantstofly.org> |
751 | M: kernel@wantstofly.org | ||
752 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 660 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
753 | S: Maintained | 661 | S: Maintained |
754 | 662 | ||
755 | ARM/INTEL IXP23XX ARM ARCHITECTURE | 663 | ARM/INTEL IXP23XX ARM ARCHITECTURE |
756 | P: Lennert Buytenhek | 664 | M: Lennert Buytenhek <kernel@wantstofly.org> |
757 | M: kernel@wantstofly.org | ||
758 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 665 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
759 | S: Maintained | 666 | S: Maintained |
760 | 667 | ||
761 | ARM/INTEL XSC3 (MANZANO) ARM CORE | 668 | ARM/INTEL XSC3 (MANZANO) ARM CORE |
762 | P: Lennert Buytenhek | 669 | M: Lennert Buytenhek <kernel@wantstofly.org> |
763 | M: kernel@wantstofly.org | 670 | M: Dan Williams <dan.j.williams@intel.com> |
764 | P: Dan Williams | ||
765 | M: dan.j.williams@intel.com | ||
766 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 671 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
767 | S: Supported | 672 | S: Supported |
768 | 673 | ||
769 | ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT | 674 | ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT |
770 | P: Lennert Buytenhek | 675 | M: Lennert Buytenhek <kernel@wantstofly.org> |
771 | M: kernel@wantstofly.org | ||
772 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 676 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
773 | S: Maintained | 677 | S: Maintained |
774 | 678 | ||
775 | ARM/LOGICPD PXA270 MACHINE SUPPORT | 679 | ARM/LOGICPD PXA270 MACHINE SUPPORT |
776 | P: Lennert Buytenhek | 680 | M: Lennert Buytenhek <kernel@wantstofly.org> |
777 | M: kernel@wantstofly.org | ||
778 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 681 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
779 | S: Maintained | 682 | S: Maintained |
780 | 683 | ||
781 | ARM/MAGICIAN MACHINE SUPPORT | 684 | ARM/MAGICIAN MACHINE SUPPORT |
782 | P: Philipp Zabel | 685 | M: Philipp Zabel <philipp.zabel@gmail.com> |
783 | M: philipp.zabel@gmail.com | ||
784 | S: Maintained | 686 | S: Maintained |
785 | 687 | ||
786 | ARM/MIOA701 MACHINE SUPPORT | 688 | ARM/MIOA701 MACHINE SUPPORT |
787 | P: Robert Jarzmik | 689 | M: Robert Jarzmik <robert.jarzmik@free.fr> |
788 | M: robert.jarzmik@free.fr | ||
789 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 690 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
790 | F: arch/arm/mach-pxa/mioa701.c | 691 | F: arch/arm/mach-pxa/mioa701.c |
791 | S: Maintained | 692 | S: Maintained |
792 | 693 | ||
793 | ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT | 694 | ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT |
794 | P: Michael Petchkovsky | 695 | M: Michael Petchkovsky <mkpetch@internode.on.net> |
795 | M: mkpetch@internode.on.net | ||
796 | S: Maintained | 696 | S: Maintained |
797 | 697 | ||
798 | ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT | 698 | ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT |
799 | P: Nelson Castillo | 699 | M: Nelson Castillo <arhuaco@freaks-unidos.net> |
800 | M: arhuaco@freaks-unidos.net | ||
801 | L: openmoko-kernel@lists.openmoko.org (subscribers-only) | 700 | L: openmoko-kernel@lists.openmoko.org (subscribers-only) |
802 | W: http://wiki.openmoko.org/wiki/Neo_FreeRunner | 701 | W: http://wiki.openmoko.org/wiki/Neo_FreeRunner |
803 | S: Supported | 702 | S: Supported |
804 | 703 | ||
805 | ARM/TOSA MACHINE SUPPORT | 704 | ARM/TOSA MACHINE SUPPORT |
806 | P: Dmitry Eremin-Solenikov | 705 | M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> |
807 | M: dbaryshkov@gmail.com | 706 | M: Dirk Opfer <dirk@opfer-online.de> |
808 | P: Dirk Opfer | ||
809 | M: dirk@opfer-online.de | ||
810 | S: Maintained | 707 | S: Maintained |
811 | 708 | ||
812 | ARM/PALMTX,PALMT5,PALMLD,PALMTE2 SUPPORT | 709 | ARM/PALMTX,PALMT5,PALMLD,PALMTE2 SUPPORT |
813 | P: Marek Vasut | 710 | M: Marek Vasut <marek.vasut@gmail.com> |
814 | M: marek.vasut@gmail.com | ||
815 | W: http://hackndev.com | 711 | W: http://hackndev.com |
816 | S: Maintained | 712 | S: Maintained |
817 | 713 | ||
818 | ARM/PALM TREO 680 SUPPORT | 714 | ARM/PALM TREO 680 SUPPORT |
819 | P: Tomas Cech | 715 | M: Tomas Cech <sleep_walker@suse.cz> |
820 | M: sleep_walker@suse.cz | ||
821 | W: http://hackndev.com | 716 | W: http://hackndev.com |
822 | S: Maintained | 717 | S: Maintained |
823 | 718 | ||
824 | ARM/PALMZ72 SUPPORT | 719 | ARM/PALMZ72 SUPPORT |
825 | P: Sergey Lapin | 720 | M: Sergey Lapin <slapin@ossfans.org> |
826 | M: slapin@ossfans.org | ||
827 | W: http://hackndev.com | 721 | W: http://hackndev.com |
828 | S: Maintained | 722 | S: Maintained |
829 | 723 | ||
830 | ARM/PLEB SUPPORT | 724 | ARM/PLEB SUPPORT |
831 | P: Peter Chubb | 725 | M: Peter Chubb <pleb@gelato.unsw.edu.au> |
832 | M: pleb@gelato.unsw.edu.au | ||
833 | W: http://www.disy.cse.unsw.edu.au/Hardware/PLEB | 726 | W: http://www.disy.cse.unsw.edu.au/Hardware/PLEB |
834 | S: Maintained | 727 | S: Maintained |
835 | 728 | ||
836 | ARM/PT DIGITAL BOARD PORT | 729 | ARM/PT DIGITAL BOARD PORT |
837 | P: Stefan Eletzhofer | 730 | M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> |
838 | M: stefan.eletzhofer@eletztrick.de | ||
839 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 731 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
840 | W: http://www.arm.linux.org.uk/ | 732 | W: http://www.arm.linux.org.uk/ |
841 | S: Maintained | 733 | S: Maintained |
842 | 734 | ||
843 | ARM/RADISYS ENP2611 MACHINE SUPPORT | 735 | ARM/RADISYS ENP2611 MACHINE SUPPORT |
844 | P: Lennert Buytenhek | 736 | M: Lennert Buytenhek <kernel@wantstofly.org> |
845 | M: kernel@wantstofly.org | ||
846 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 737 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
847 | S: Maintained | 738 | S: Maintained |
848 | 739 | ||
849 | ARM/RISCPC ARCHITECTURE | 740 | ARM/RISCPC ARCHITECTURE |
850 | P: Russell King | 741 | M: Russell King <linux@arm.linux.org.uk> |
851 | M: linux@arm.linux.org.uk | ||
852 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 742 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
853 | W: http://www.arm.linux.org.uk/ | 743 | W: http://www.arm.linux.org.uk/ |
854 | S: Maintained | 744 | S: Maintained |
@@ -862,14 +752,12 @@ F: drivers/net/arm/ether* | |||
862 | F: drivers/scsi/arm/ | 752 | F: drivers/scsi/arm/ |
863 | 753 | ||
864 | ARM/SHARK MACHINE SUPPORT | 754 | ARM/SHARK MACHINE SUPPORT |
865 | P: Alexander Schulz | 755 | M: Alexander Schulz <alex@shark-linux.de> |
866 | M: alex@shark-linux.de | ||
867 | W: http://www.shark-linux.de/shark.html | 756 | W: http://www.shark-linux.de/shark.html |
868 | S: Maintained | 757 | S: Maintained |
869 | 758 | ||
870 | ARM/SAMSUNG ARM ARCHITECTURES | 759 | ARM/SAMSUNG ARM ARCHITECTURES |
871 | P: Ben Dooks | 760 | M: Ben Dooks <ben-linux@fluff.org> |
872 | M: ben-linux@fluff.org | ||
873 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 761 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
874 | W: http://www.fluff.org/ben/linux/ | 762 | W: http://www.fluff.org/ben/linux/ |
875 | S: Maintained | 763 | S: Maintained |
@@ -877,91 +765,73 @@ F: arch/arm/plat-s3c/ | |||
877 | F: arch/arm/plat-s3c24xx/ | 765 | F: arch/arm/plat-s3c24xx/ |
878 | 766 | ||
879 | ARM/S3C2410 ARM ARCHITECTURE | 767 | ARM/S3C2410 ARM ARCHITECTURE |
880 | P: Ben Dooks | 768 | M: Ben Dooks <ben-linux@fluff.org> |
881 | M: ben-linux@fluff.org | ||
882 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 769 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
883 | W: http://www.fluff.org/ben/linux/ | 770 | W: http://www.fluff.org/ben/linux/ |
884 | S: Maintained | 771 | S: Maintained |
885 | F: arch/arm/mach-s3c2410/ | 772 | F: arch/arm/mach-s3c2410/ |
886 | 773 | ||
887 | ARM/S3C2440 ARM ARCHITECTURE | 774 | ARM/S3C2440 ARM ARCHITECTURE |
888 | P: Ben Dooks | 775 | M: Ben Dooks <ben-linux@fluff.org> |
889 | M: ben-linux@fluff.org | ||
890 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 776 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
891 | W: http://www.fluff.org/ben/linux/ | 777 | W: http://www.fluff.org/ben/linux/ |
892 | S: Maintained | 778 | S: Maintained |
893 | F: arch/arm/mach-s3c2440/ | 779 | F: arch/arm/mach-s3c2440/ |
894 | 780 | ||
895 | ARM/S3C2442 ARM ARCHITECTURE | 781 | ARM/S3C2442 ARM ARCHITECTURE |
896 | P: Ben Dooks | 782 | M: Ben Dooks <ben-linux@fluff.org> |
897 | M: ben-linux@fluff.org | ||
898 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 783 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
899 | W: http://www.fluff.org/ben/linux/ | 784 | W: http://www.fluff.org/ben/linux/ |
900 | S: Maintained | 785 | S: Maintained |
901 | F: arch/arm/mach-s3c2442/ | 786 | F: arch/arm/mach-s3c2442/ |
902 | 787 | ||
903 | ARM/S3C2443 ARM ARCHITECTURE | 788 | ARM/S3C2443 ARM ARCHITECTURE |
904 | P: Ben Dooks | 789 | M: Ben Dooks <ben-linux@fluff.org> |
905 | M: ben-linux@fluff.org | ||
906 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 790 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
907 | W: http://www.fluff.org/ben/linux/ | 791 | W: http://www.fluff.org/ben/linux/ |
908 | S: Maintained | 792 | S: Maintained |
909 | F: arch/arm/mach-s3c2443/ | 793 | F: arch/arm/mach-s3c2443/ |
910 | 794 | ||
911 | ARM/S3C6400 ARM ARCHITECTURE | 795 | ARM/S3C6400 ARM ARCHITECTURE |
912 | P: Ben Dooks | 796 | M: Ben Dooks <ben-linux@fluff.org> |
913 | M: ben-linux@fluff.org | ||
914 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 797 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
915 | W: http://www.fluff.org/ben/linux/ | 798 | W: http://www.fluff.org/ben/linux/ |
916 | S: Maintained | 799 | S: Maintained |
917 | F: arch/arm/mach-s3c6400/ | 800 | F: arch/arm/mach-s3c6400/ |
918 | 801 | ||
919 | ARM/S3C6410 ARM ARCHITECTURE | 802 | ARM/S3C6410 ARM ARCHITECTURE |
920 | P: Ben Dooks | 803 | M: Ben Dooks <ben-linux@fluff.org> |
921 | M: ben-linux@fluff.org | ||
922 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 804 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
923 | W: http://www.fluff.org/ben/linux/ | 805 | W: http://www.fluff.org/ben/linux/ |
924 | S: Maintained | 806 | S: Maintained |
925 | F: arch/arm/mach-s3c6410/ | 807 | F: arch/arm/mach-s3c6410/ |
926 | 808 | ||
927 | ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT | 809 | ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT |
928 | P: Lennert Buytenhek | 810 | M: Lennert Buytenhek <kernel@wantstofly.org> |
929 | M: kernel@wantstofly.org | ||
930 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 811 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
931 | S: Maintained | 812 | S: Maintained |
932 | 813 | ||
933 | ARM/THECUS N2100 MACHINE SUPPORT | 814 | ARM/THECUS N2100 MACHINE SUPPORT |
934 | P: Lennert Buytenhek | 815 | M: Lennert Buytenhek <kernel@wantstofly.org> |
935 | M: kernel@wantstofly.org | ||
936 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 816 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
937 | S: Maintained | 817 | S: Maintained |
938 | 818 | ||
939 | ARM/NUVOTON W90X900 ARM ARCHITECTURE | 819 | ARM/NUVOTON W90X900 ARM ARCHITECTURE |
940 | P: Wan ZongShun | 820 | M: Wan ZongShun <mcuos.com@gmail.com> |
941 | M: mcuos.com@gmail.com | ||
942 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 821 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
943 | W: http://www.mcuos.com | 822 | W: http://www.mcuos.com |
944 | S: Maintained | 823 | S: Maintained |
945 | 824 | ||
946 | ARM/VFP SUPPORT | 825 | ARM/VFP SUPPORT |
947 | P: Russell King | 826 | M: Russell King <linux@arm.linux.org.uk> |
948 | M: linux@arm.linux.org.uk | ||
949 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 827 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
950 | W: http://www.arm.linux.org.uk/ | 828 | W: http://www.arm.linux.org.uk/ |
951 | S: Maintained | 829 | S: Maintained |
952 | F: arch/arm/vfp/ | 830 | F: arch/arm/vfp/ |
953 | 831 | ||
954 | ARPD SUPPORT | ||
955 | P: Jonathan Layes | ||
956 | L: netdev@vger.kernel.org | ||
957 | S: Maintained | ||
958 | F: net/ipv4/arp.c | ||
959 | |||
960 | ASUS ACPI EXTRAS DRIVER | 832 | ASUS ACPI EXTRAS DRIVER |
961 | P: Corentin Chary | 833 | M: Corentin Chary <corentincj@iksaif.net> |
962 | M: corentincj@iksaif.net | 834 | M: Karol Kozimor <sziwan@users.sourceforge.net> |
963 | P: Karol Kozimor | ||
964 | M: sziwan@users.sourceforge.net | ||
965 | L: acpi4asus-user@lists.sourceforge.net | 835 | L: acpi4asus-user@lists.sourceforge.net |
966 | W: http://acpi4asus.sf.net | 836 | W: http://acpi4asus.sf.net |
967 | S: Maintained | 837 | S: Maintained |
@@ -969,25 +839,21 @@ F: arch/x86/kernel/acpi/boot.c | |||
969 | F: drivers/platform/x86/asus_acpi.c | 839 | F: drivers/platform/x86/asus_acpi.c |
970 | 840 | ||
971 | ASUS ASB100 HARDWARE MONITOR DRIVER | 841 | ASUS ASB100 HARDWARE MONITOR DRIVER |
972 | P: Mark M. Hoffman | 842 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
973 | M: mhoffman@lightlink.com | ||
974 | L: lm-sensors@lm-sensors.org | 843 | L: lm-sensors@lm-sensors.org |
975 | S: Maintained | 844 | S: Maintained |
976 | F: drivers/hwmon/asb100.c | 845 | F: drivers/hwmon/asb100.c |
977 | 846 | ||
978 | ASUS LAPTOP EXTRAS DRIVER | 847 | ASUS LAPTOP EXTRAS DRIVER |
979 | P: Corentin Chary | 848 | M: Corentin Chary <corentincj@iksaif.net> |
980 | M: corentincj@iksaif.net | ||
981 | L: acpi4asus-user@lists.sourceforge.net | 849 | L: acpi4asus-user@lists.sourceforge.net |
982 | W: http://acpi4asus.sf.net | 850 | W: http://acpi4asus.sf.net |
983 | S: Maintained | 851 | S: Maintained |
984 | F: drivers/platform/x86/asus-laptop.c | 852 | F: drivers/platform/x86/asus-laptop.c |
985 | 853 | ||
986 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 854 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
987 | P: Dan Williams | 855 | M: Dan Williams <dan.j.williams@intel.com> |
988 | M: dan.j.williams@intel.com | 856 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> |
989 | P: Maciej Sosnowski | ||
990 | M: maciej.sosnowski@intel.com | ||
991 | W: http://sourceforge.net/projects/xscaleiop | 857 | W: http://sourceforge.net/projects/xscaleiop |
992 | S: Supported | 858 | S: Supported |
993 | F: Documentation/crypto/async-tx-api.txt | 859 | F: Documentation/crypto/async-tx-api.txt |
@@ -997,64 +863,49 @@ F: include/linux/dmaengine.h | |||
997 | F: include/linux/async_tx.h | 863 | F: include/linux/async_tx.h |
998 | 864 | ||
999 | ATA OVER ETHERNET (AOE) DRIVER | 865 | ATA OVER ETHERNET (AOE) DRIVER |
1000 | P: Ed L. Cashin | 866 | M: "Ed L. Cashin" <ecashin@coraid.com> |
1001 | M: ecashin@coraid.com | ||
1002 | W: http://www.coraid.com/support/linux | 867 | W: http://www.coraid.com/support/linux |
1003 | S: Supported | 868 | S: Supported |
1004 | F: Documentation/aoe/ | 869 | F: Documentation/aoe/ |
1005 | F: drivers/block/aoe/ | 870 | F: drivers/block/aoe/ |
1006 | 871 | ||
1007 | ATHEROS ATH5K WIRELESS DRIVER | 872 | ATHEROS ATH5K WIRELESS DRIVER |
1008 | P: Jiri Slaby | 873 | M: Jiri Slaby <jirislaby@gmail.com> |
1009 | M: jirislaby@gmail.com | 874 | M: Nick Kossifidis <mickflemm@gmail.com> |
1010 | P: Nick Kossifidis | 875 | M: "Luis R. Rodriguez" <lrodriguez@atheros.com> |
1011 | M: mickflemm@gmail.com | 876 | M: Bob Copeland <me@bobcopeland.com> |
1012 | P: Luis R. Rodriguez | ||
1013 | M: lrodriguez@atheros.com | ||
1014 | P: Bob Copeland | ||
1015 | M: me@bobcopeland.com | ||
1016 | L: linux-wireless@vger.kernel.org | 877 | L: linux-wireless@vger.kernel.org |
1017 | L: ath5k-devel@lists.ath5k.org | 878 | L: ath5k-devel@lists.ath5k.org |
1018 | S: Maintained | 879 | S: Maintained |
1019 | F: drivers/net/wireless/ath/ath5k/ | 880 | F: drivers/net/wireless/ath/ath5k/ |
1020 | 881 | ||
1021 | ATHEROS ATH9K WIRELESS DRIVER | 882 | ATHEROS ATH9K WIRELESS DRIVER |
1022 | P: Luis R. Rodriguez | 883 | M: "Luis R. Rodriguez" <lrodriguez@atheros.com> |
1023 | M: lrodriguez@atheros.com | 884 | M: Jouni Malinen <jmalinen@atheros.com> |
1024 | P: Jouni Malinen | 885 | M: Sujith Manoharan <Sujith.Manoharan@atheros.com> |
1025 | M: jmalinen@atheros.com | 886 | M: Vasanthakumar Thiagarajan <vasanth@atheros.com> |
1026 | P: Sujith Manoharan | 887 | M: Senthil Balasubramanian <senthilkumar@atheros.com> |
1027 | M: Sujith.Manoharan@atheros.com | ||
1028 | P: Vasanthakumar Thiagarajan | ||
1029 | M: vasanth@atheros.com | ||
1030 | P: Senthil Balasubramanian | ||
1031 | M: senthilkumar@atheros.com | ||
1032 | L: linux-wireless@vger.kernel.org | 888 | L: linux-wireless@vger.kernel.org |
1033 | L: ath9k-devel@lists.ath9k.org | 889 | L: ath9k-devel@lists.ath9k.org |
1034 | S: Supported | 890 | S: Supported |
1035 | F: drivers/net/wireless/ath/ath9k/ | 891 | F: drivers/net/wireless/ath/ath9k/ |
1036 | 892 | ||
1037 | ATHEROS AR9170 WIRELESS DRIVER | 893 | ATHEROS AR9170 WIRELESS DRIVER |
1038 | P: Christian Lamparter | 894 | M: Christian Lamparter <chunkeey@web.de> |
1039 | M: chunkeey@web.de | ||
1040 | L: linux-wireless@vger.kernel.org | 895 | L: linux-wireless@vger.kernel.org |
1041 | W: http://wireless.kernel.org/en/users/Drivers/ar9170 | 896 | W: http://wireless.kernel.org/en/users/Drivers/ar9170 |
1042 | S: Maintained | 897 | S: Maintained |
1043 | F: drivers/net/wireless/ath/ar9170/ | 898 | F: drivers/net/wireless/ath/ar9170/ |
1044 | 899 | ||
1045 | ATI_REMOTE2 DRIVER | 900 | ATI_REMOTE2 DRIVER |
1046 | P: Ville Syrjala | 901 | M: Ville Syrjala <syrjala@sci.fi> |
1047 | M: syrjala@sci.fi | ||
1048 | S: Maintained | 902 | S: Maintained |
1049 | F: drivers/input/misc/ati_remote2.c | 903 | F: drivers/input/misc/ati_remote2.c |
1050 | 904 | ||
1051 | ATLX ETHERNET DRIVERS | 905 | ATLX ETHERNET DRIVERS |
1052 | P: Jay Cliburn | 906 | M: Jay Cliburn <jcliburn@gmail.com> |
1053 | M: jcliburn@gmail.com | 907 | M: Chris Snook <csnook@redhat.com> |
1054 | P: Chris Snook | 908 | M: Jie Yang <jie.yang@atheros.com> |
1055 | M: csnook@redhat.com | ||
1056 | P: Jie Yang | ||
1057 | M: jie.yang@atheros.com | ||
1058 | L: atl1-devel@lists.sourceforge.net | 909 | L: atl1-devel@lists.sourceforge.net |
1059 | W: http://sourceforge.net/projects/atl1 | 910 | W: http://sourceforge.net/projects/atl1 |
1060 | W: http://atl1.sourceforge.net | 911 | W: http://atl1.sourceforge.net |
@@ -1062,8 +913,7 @@ S: Maintained | |||
1062 | F: drivers/net/atlx/ | 913 | F: drivers/net/atlx/ |
1063 | 914 | ||
1064 | ATM | 915 | ATM |
1065 | P: Chas Williams | 916 | M: Chas Williams <chas@cmf.nrl.navy.mil> |
1066 | M: chas@cmf.nrl.navy.mil | ||
1067 | L: linux-atm-general@lists.sourceforge.net (subscribers-only) | 917 | L: linux-atm-general@lists.sourceforge.net (subscribers-only) |
1068 | L: netdev@vger.kernel.org | 918 | L: netdev@vger.kernel.org |
1069 | W: http://linux-atm.sourceforge.net | 919 | W: http://linux-atm.sourceforge.net |
@@ -1072,8 +922,7 @@ F: drivers/atm/ | |||
1072 | F: include/linux/atm* | 922 | F: include/linux/atm* |
1073 | 923 | ||
1074 | ATMEL AT91 MCI DRIVER | 924 | ATMEL AT91 MCI DRIVER |
1075 | P: Nicolas Ferre | 925 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1076 | M: nicolas.ferre@atmel.com | ||
1077 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 926 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
1078 | W: http://www.atmel.com/products/AT91/ | 927 | W: http://www.atmel.com/products/AT91/ |
1079 | W: http://www.at91.com/ | 928 | W: http://www.at91.com/ |
@@ -1081,49 +930,42 @@ S: Maintained | |||
1081 | F: drivers/mmc/host/at91_mci.c | 930 | F: drivers/mmc/host/at91_mci.c |
1082 | 931 | ||
1083 | ATMEL AT91 / AT32 MCI DRIVER | 932 | ATMEL AT91 / AT32 MCI DRIVER |
1084 | P: Nicolas Ferre | 933 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1085 | M: nicolas.ferre@atmel.com | ||
1086 | S: Maintained | 934 | S: Maintained |
1087 | F: drivers/mmc/host/atmel-mci.c | 935 | F: drivers/mmc/host/atmel-mci.c |
1088 | F: drivers/mmc/host/atmel-mci-regs.h | 936 | F: drivers/mmc/host/atmel-mci-regs.h |
1089 | 937 | ||
1090 | ATMEL AT91 / AT32 SERIAL DRIVER | 938 | ATMEL AT91 / AT32 SERIAL DRIVER |
1091 | P: Haavard Skinnemoen | 939 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1092 | M: hskinnemoen@atmel.com | ||
1093 | S: Supported | 940 | S: Supported |
1094 | F: drivers/serial/atmel_serial.c | 941 | F: drivers/serial/atmel_serial.c |
1095 | 942 | ||
1096 | ATMEL LCDFB DRIVER | 943 | ATMEL LCDFB DRIVER |
1097 | P: Nicolas Ferre | 944 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
1098 | M: nicolas.ferre@atmel.com | ||
1099 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 945 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
1100 | S: Maintained | 946 | S: Maintained |
1101 | F: drivers/video/atmel_lcdfb.c | 947 | F: drivers/video/atmel_lcdfb.c |
1102 | F: include/video/atmel_lcdc.h | 948 | F: include/video/atmel_lcdc.h |
1103 | 949 | ||
1104 | ATMEL MACB ETHERNET DRIVER | 950 | ATMEL MACB ETHERNET DRIVER |
1105 | P: Haavard Skinnemoen | 951 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1106 | M: hskinnemoen@atmel.com | ||
1107 | S: Supported | 952 | S: Supported |
1108 | F: drivers/net/macb.* | 953 | F: drivers/net/macb.* |
1109 | 954 | ||
1110 | ATMEL SPI DRIVER | 955 | ATMEL SPI DRIVER |
1111 | P: Haavard Skinnemoen | 956 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1112 | M: hskinnemoen@atmel.com | ||
1113 | S: Supported | 957 | S: Supported |
1114 | F: drivers/spi/atmel_spi.* | 958 | F: drivers/spi/atmel_spi.* |
1115 | 959 | ||
1116 | ATMEL USBA UDC DRIVER | 960 | ATMEL USBA UDC DRIVER |
1117 | P: Haavard Skinnemoen | 961 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1118 | M: hskinnemoen@atmel.com | ||
1119 | L: kernel@avr32linux.org | 962 | L: kernel@avr32linux.org |
1120 | W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver | 963 | W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver |
1121 | S: Supported | 964 | S: Supported |
1122 | F: drivers/usb/gadget/atmel_usba_udc.* | 965 | F: drivers/usb/gadget/atmel_usba_udc.* |
1123 | 966 | ||
1124 | ATMEL WIRELESS DRIVER | 967 | ATMEL WIRELESS DRIVER |
1125 | P: Simon Kelley | 968 | M: Simon Kelley <simon@thekelleys.org.uk> |
1126 | M: simon@thekelleys.org.uk | ||
1127 | L: linux-wireless@vger.kernel.org | 969 | L: linux-wireless@vger.kernel.org |
1128 | W: http://www.thekelleys.org.uk/atmel | 970 | W: http://www.thekelleys.org.uk/atmel |
1129 | W: http://atmelwlandriver.sourceforge.net/ | 971 | W: http://atmelwlandriver.sourceforge.net/ |
@@ -1131,10 +973,8 @@ S: Maintained | |||
1131 | F: drivers/net/wireless/atmel* | 973 | F: drivers/net/wireless/atmel* |
1132 | 974 | ||
1133 | AUDIT SUBSYSTEM | 975 | AUDIT SUBSYSTEM |
1134 | P: Al Viro | 976 | M: Al Viro <viro@zeniv.linux.org.uk> |
1135 | M: viro@zeniv.linux.org.uk | 977 | M: Eric Paris <eparis@redhat.com> |
1136 | P: Eric Paris | ||
1137 | M: eparis@redhat.com | ||
1138 | L: linux-audit@redhat.com (subscribers-only) | 978 | L: linux-audit@redhat.com (subscribers-only) |
1139 | W: http://people.redhat.com/sgrubb/audit/ | 979 | W: http://people.redhat.com/sgrubb/audit/ |
1140 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current.git | 980 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current.git |
@@ -1143,8 +983,7 @@ F: include/linux/audit.h | |||
1143 | F: kernel/audit* | 983 | F: kernel/audit* |
1144 | 984 | ||
1145 | AUXILIARY DISPLAY DRIVERS | 985 | AUXILIARY DISPLAY DRIVERS |
1146 | P: Miguel Ojeda Sandonis | 986 | M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com> |
1147 | M: miguel.ojeda.sandonis@gmail.com | ||
1148 | W: http://miguelojeda.es/auxdisplay.htm | 987 | W: http://miguelojeda.es/auxdisplay.htm |
1149 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm | 988 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm |
1150 | S: Maintained | 989 | S: Maintained |
@@ -1152,8 +991,7 @@ F: drivers/auxdisplay/ | |||
1152 | F: include/linux/cfag12864b.h | 991 | F: include/linux/cfag12864b.h |
1153 | 992 | ||
1154 | AVR32 ARCHITECTURE | 993 | AVR32 ARCHITECTURE |
1155 | P: Haavard Skinnemoen | 994 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1156 | M: hskinnemoen@atmel.com | ||
1157 | W: http://www.atmel.com/products/AVR32/ | 995 | W: http://www.atmel.com/products/AVR32/ |
1158 | W: http://avr32linux.org/ | 996 | W: http://avr32linux.org/ |
1159 | W: http://avrfreaks.net/ | 997 | W: http://avrfreaks.net/ |
@@ -1161,14 +999,12 @@ S: Supported | |||
1161 | F: arch/avr32/ | 999 | F: arch/avr32/ |
1162 | 1000 | ||
1163 | AVR32/AT32AP MACHINE SUPPORT | 1001 | AVR32/AT32AP MACHINE SUPPORT |
1164 | P: Haavard Skinnemoen | 1002 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
1165 | M: hskinnemoen@atmel.com | ||
1166 | S: Supported | 1003 | S: Supported |
1167 | F: arch/avr32/mach-at32ap/ | 1004 | F: arch/avr32/mach-at32ap/ |
1168 | 1005 | ||
1169 | AX.25 NETWORK LAYER | 1006 | AX.25 NETWORK LAYER |
1170 | P: Ralf Baechle | 1007 | M: Ralf Baechle <ralf@linux-mips.org> |
1171 | M: ralf@linux-mips.org | ||
1172 | L: linux-hams@vger.kernel.org | 1008 | L: linux-hams@vger.kernel.org |
1173 | W: http://www.linux-ax25.org/ | 1009 | W: http://www.linux-ax25.org/ |
1174 | S: Maintained | 1010 | S: Maintained |
@@ -1177,128 +1013,110 @@ F: include/net/ax25.h | |||
1177 | F: net/ax25/ | 1013 | F: net/ax25/ |
1178 | 1014 | ||
1179 | B43 WIRELESS DRIVER | 1015 | B43 WIRELESS DRIVER |
1180 | P: Michael Buesch | 1016 | M: Michael Buesch <mb@bu3sch.de> |
1181 | M: mb@bu3sch.de | 1017 | M: Stefano Brivio <stefano.brivio@polimi.it> |
1182 | P: Stefano Brivio | ||
1183 | M: stefano.brivio@polimi.it | ||
1184 | L: linux-wireless@vger.kernel.org | 1018 | L: linux-wireless@vger.kernel.org |
1185 | W: http://linuxwireless.org/en/users/Drivers/b43 | 1019 | W: http://linuxwireless.org/en/users/Drivers/b43 |
1186 | S: Maintained | 1020 | S: Maintained |
1187 | F: drivers/net/wireless/b43/ | 1021 | F: drivers/net/wireless/b43/ |
1188 | 1022 | ||
1189 | B43LEGACY WIRELESS DRIVER | 1023 | B43LEGACY WIRELESS DRIVER |
1190 | P: Larry Finger | 1024 | M: Larry Finger <Larry.Finger@lwfinger.net> |
1191 | M: Larry.Finger@lwfinger.net | 1025 | M: Stefano Brivio <stefano.brivio@polimi.it> |
1192 | P: Stefano Brivio | ||
1193 | M: stefano.brivio@polimi.it | ||
1194 | L: linux-wireless@vger.kernel.org | 1026 | L: linux-wireless@vger.kernel.org |
1195 | W: http://linuxwireless.org/en/users/Drivers/b43 | 1027 | W: http://linuxwireless.org/en/users/Drivers/b43 |
1196 | S: Maintained | 1028 | S: Maintained |
1197 | F: drivers/net/wireless/b43legacy/ | 1029 | F: drivers/net/wireless/b43legacy/ |
1198 | 1030 | ||
1199 | BACKLIGHT CLASS/SUBSYSTEM | 1031 | BACKLIGHT CLASS/SUBSYSTEM |
1200 | P: Richard Purdie | 1032 | M: Richard Purdie <rpurdie@rpsys.net> |
1201 | M: rpurdie@rpsys.net | ||
1202 | S: Maintained | 1033 | S: Maintained |
1203 | F: drivers/video/backlight/ | 1034 | F: drivers/video/backlight/ |
1204 | F: include/linux/backlight.h | 1035 | F: include/linux/backlight.h |
1205 | 1036 | ||
1206 | BAYCOM/HDLCDRV DRIVERS FOR AX.25 | 1037 | BAYCOM/HDLCDRV DRIVERS FOR AX.25 |
1207 | P: Thomas Sailer | 1038 | M: Thomas Sailer <t.sailer@alumni.ethz.ch> |
1208 | M: t.sailer@alumni.ethz.ch | ||
1209 | L: linux-hams@vger.kernel.org | 1039 | L: linux-hams@vger.kernel.org |
1210 | W: http://www.baycom.org/~tom/ham/ham.html | 1040 | W: http://www.baycom.org/~tom/ham/ham.html |
1211 | S: Maintained | 1041 | S: Maintained |
1212 | F: drivers/net/hamradio/baycom* | 1042 | F: drivers/net/hamradio/baycom* |
1213 | 1043 | ||
1214 | BEFS FILE SYSTEM | 1044 | BEFS FILE SYSTEM |
1215 | P: Sergey S. Kostyliov | 1045 | M: "Sergey S. Kostyliov" <rathamahata@php4.ru> |
1216 | M: rathamahata@php4.ru | ||
1217 | S: Maintained | 1046 | S: Maintained |
1218 | F: Documentation/filesystems/befs.txt | 1047 | F: Documentation/filesystems/befs.txt |
1219 | F: fs/befs/ | 1048 | F: fs/befs/ |
1220 | 1049 | ||
1221 | BFS FILE SYSTEM | 1050 | BFS FILE SYSTEM |
1222 | P: Tigran A. Aivazian | 1051 | M: "Tigran A. Aivazian" <tigran@aivazian.fsnet.co.uk> |
1223 | M: tigran@aivazian.fsnet.co.uk | ||
1224 | S: Maintained | 1052 | S: Maintained |
1225 | F: Documentation/filesystems/bfs.txt | 1053 | F: Documentation/filesystems/bfs.txt |
1226 | F: fs/bfs/ | 1054 | F: fs/bfs/ |
1227 | F: include/linux/bfs_fs.h | 1055 | F: include/linux/bfs_fs.h |
1228 | 1056 | ||
1229 | BLACKFIN ARCHITECTURE | 1057 | BLACKFIN ARCHITECTURE |
1230 | P: Mike Frysinger | 1058 | M: Mike Frysinger <vapier@gentoo.org> |
1231 | M: vapier@gentoo.org | ||
1232 | L: uclinux-dist-devel@blackfin.uclinux.org | 1059 | L: uclinux-dist-devel@blackfin.uclinux.org |
1233 | W: http://blackfin.uclinux.org | 1060 | W: http://blackfin.uclinux.org |
1234 | S: Supported | 1061 | S: Supported |
1235 | F: arch/blackfin/ | 1062 | F: arch/blackfin/ |
1236 | 1063 | ||
1237 | BLACKFIN EMAC DRIVER | 1064 | BLACKFIN EMAC DRIVER |
1238 | P: Michael Hennerich | 1065 | M: Michael Hennerich <michael.hennerich@analog.com> |
1239 | M: michael.hennerich@analog.com | ||
1240 | L: uclinux-dist-devel@blackfin.uclinux.org | 1066 | L: uclinux-dist-devel@blackfin.uclinux.org |
1241 | W: http://blackfin.uclinux.org | 1067 | W: http://blackfin.uclinux.org |
1242 | S: Supported | 1068 | S: Supported |
1243 | F: drivers/net/bfin_mac.* | 1069 | F: drivers/net/bfin_mac.* |
1244 | 1070 | ||
1245 | BLACKFIN RTC DRIVER | 1071 | BLACKFIN RTC DRIVER |
1246 | P: Mike Frysinger | 1072 | M: Mike Frysinger <vapier.adi@gmail.com> |
1247 | M: vapier.adi@gmail.com | ||
1248 | L: uclinux-dist-devel@blackfin.uclinux.org | 1073 | L: uclinux-dist-devel@blackfin.uclinux.org |
1249 | W: http://blackfin.uclinux.org | 1074 | W: http://blackfin.uclinux.org |
1250 | S: Supported | 1075 | S: Supported |
1251 | F: drivers/rtc/rtc-bfin.c | 1076 | F: drivers/rtc/rtc-bfin.c |
1252 | 1077 | ||
1253 | BLACKFIN SERIAL DRIVER | 1078 | BLACKFIN SERIAL DRIVER |
1254 | P: Sonic Zhang | 1079 | M: Sonic Zhang <sonic.zhang@analog.com> |
1255 | M: sonic.zhang@analog.com | ||
1256 | L: uclinux-dist-devel@blackfin.uclinux.org | 1080 | L: uclinux-dist-devel@blackfin.uclinux.org |
1257 | W: http://blackfin.uclinux.org | 1081 | W: http://blackfin.uclinux.org |
1258 | S: Supported | 1082 | S: Supported |
1259 | F: drivers/serial/bfin_5xx.c | 1083 | F: drivers/serial/bfin_5xx.c |
1260 | 1084 | ||
1261 | BLACKFIN WATCHDOG DRIVER | 1085 | BLACKFIN WATCHDOG DRIVER |
1262 | P: Mike Frysinger | 1086 | M: Mike Frysinger <vapier.adi@gmail.com> |
1263 | M: vapier.adi@gmail.com | ||
1264 | L: uclinux-dist-devel@blackfin.uclinux.org | 1087 | L: uclinux-dist-devel@blackfin.uclinux.org |
1265 | W: http://blackfin.uclinux.org | 1088 | W: http://blackfin.uclinux.org |
1266 | S: Supported | 1089 | S: Supported |
1267 | F: drivers/watchdog/bfin_wdt.c | 1090 | F: drivers/watchdog/bfin_wdt.c |
1268 | 1091 | ||
1269 | BLACKFIN I2C TWI DRIVER | 1092 | BLACKFIN I2C TWI DRIVER |
1270 | P: Sonic Zhang | 1093 | M: Sonic Zhang <sonic.zhang@analog.com> |
1271 | M: sonic.zhang@analog.com | ||
1272 | L: uclinux-dist-devel@blackfin.uclinux.org | 1094 | L: uclinux-dist-devel@blackfin.uclinux.org |
1273 | W: http://blackfin.uclinux.org/ | 1095 | W: http://blackfin.uclinux.org/ |
1274 | S: Supported | 1096 | S: Supported |
1275 | F: drivers/i2c/busses/i2c-bfin-twi.c | 1097 | F: drivers/i2c/busses/i2c-bfin-twi.c |
1276 | 1098 | ||
1277 | BLOCK LAYER | 1099 | BLOCK LAYER |
1278 | P: Jens Axboe | 1100 | M: Jens Axboe <axboe@kernel.dk> |
1279 | M: axboe@kernel.dk | ||
1280 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git | 1101 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git |
1281 | S: Maintained | 1102 | S: Maintained |
1282 | F: block/ | 1103 | F: block/ |
1283 | 1104 | ||
1284 | BLOCK2MTD DRIVER | 1105 | BLOCK2MTD DRIVER |
1285 | P: Joern Engel | 1106 | M: Joern Engel <joern@lazybastard.org> |
1286 | M: joern@lazybastard.org | ||
1287 | L: linux-mtd@lists.infradead.org | 1107 | L: linux-mtd@lists.infradead.org |
1288 | S: Maintained | 1108 | S: Maintained |
1289 | F: drivers/mtd/devices/block2mtd.c | 1109 | F: drivers/mtd/devices/block2mtd.c |
1290 | 1110 | ||
1291 | BLUETOOTH DRIVERS | 1111 | BLUETOOTH DRIVERS |
1292 | P: Marcel Holtmann | 1112 | M: Marcel Holtmann <marcel@holtmann.org> |
1293 | M: marcel@holtmann.org | ||
1294 | L: linux-bluetooth@vger.kernel.org | 1113 | L: linux-bluetooth@vger.kernel.org |
1295 | W: http://www.bluez.org/ | 1114 | W: http://www.bluez.org/ |
1296 | S: Maintained | 1115 | S: Maintained |
1297 | F: drivers/bluetooth/ | 1116 | F: drivers/bluetooth/ |
1298 | 1117 | ||
1299 | BLUETOOTH SUBSYSTEM | 1118 | BLUETOOTH SUBSYSTEM |
1300 | P: Marcel Holtmann | 1119 | M: Marcel Holtmann <marcel@holtmann.org> |
1301 | M: marcel@holtmann.org | ||
1302 | L: linux-bluetooth@vger.kernel.org | 1120 | L: linux-bluetooth@vger.kernel.org |
1303 | W: http://www.bluez.org/ | 1121 | W: http://www.bluez.org/ |
1304 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git | 1122 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git |
@@ -1307,8 +1125,7 @@ F: net/bluetooth/ | |||
1307 | F: include/net/bluetooth/ | 1125 | F: include/net/bluetooth/ |
1308 | 1126 | ||
1309 | BONDING DRIVER | 1127 | BONDING DRIVER |
1310 | P: Jay Vosburgh | 1128 | M: Jay Vosburgh <fubar@us.ibm.com> |
1311 | M: fubar@us.ibm.com | ||
1312 | L: bonding-devel@lists.sourceforge.net | 1129 | L: bonding-devel@lists.sourceforge.net |
1313 | W: http://sourceforge.net/projects/bonding/ | 1130 | W: http://sourceforge.net/projects/bonding/ |
1314 | S: Supported | 1131 | S: Supported |
@@ -1316,54 +1133,46 @@ F: drivers/net/bonding/ | |||
1316 | F: include/linux/if_bonding.h | 1133 | F: include/linux/if_bonding.h |
1317 | 1134 | ||
1318 | BROADCOM B44 10/100 ETHERNET DRIVER | 1135 | BROADCOM B44 10/100 ETHERNET DRIVER |
1319 | P: Gary Zambrano | 1136 | M: Gary Zambrano <zambrano@broadcom.com> |
1320 | M: zambrano@broadcom.com | ||
1321 | L: netdev@vger.kernel.org | 1137 | L: netdev@vger.kernel.org |
1322 | S: Supported | 1138 | S: Supported |
1323 | F: drivers/net/b44.* | 1139 | F: drivers/net/b44.* |
1324 | 1140 | ||
1325 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER | 1141 | BROADCOM BNX2 GIGABIT ETHERNET DRIVER |
1326 | P: Michael Chan | 1142 | M: Michael Chan <mchan@broadcom.com> |
1327 | M: mchan@broadcom.com | ||
1328 | L: netdev@vger.kernel.org | 1143 | L: netdev@vger.kernel.org |
1329 | S: Supported | 1144 | S: Supported |
1330 | F: drivers/net/bnx2.* | 1145 | F: drivers/net/bnx2.* |
1331 | F: drivers/net/bnx2_* | 1146 | F: drivers/net/bnx2_* |
1332 | 1147 | ||
1333 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | 1148 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER |
1334 | P: Eilon Greenstein | 1149 | M: Eilon Greenstein <eilong@broadcom.com> |
1335 | M: eilong@broadcom.com | ||
1336 | L: netdev@vger.kernel.org | 1150 | L: netdev@vger.kernel.org |
1337 | S: Supported | 1151 | S: Supported |
1338 | F: drivers/net/bnx2x* | 1152 | F: drivers/net/bnx2x* |
1339 | 1153 | ||
1340 | BROADCOM TG3 GIGABIT ETHERNET DRIVER | 1154 | BROADCOM TG3 GIGABIT ETHERNET DRIVER |
1341 | P: Matt Carlson | 1155 | M: Matt Carlson <mcarlson@broadcom.com> |
1342 | M: mcarlson@broadcom.com | 1156 | M: Michael Chan <mchan@broadcom.com> |
1343 | P: Michael Chan | ||
1344 | M: mchan@broadcom.com | ||
1345 | L: netdev@vger.kernel.org | 1157 | L: netdev@vger.kernel.org |
1346 | S: Supported | 1158 | S: Supported |
1347 | F: drivers/net/tg3.* | 1159 | F: drivers/net/tg3.* |
1348 | 1160 | ||
1349 | BSG (block layer generic sg v4 driver) | 1161 | BSG (block layer generic sg v4 driver) |
1350 | P: FUJITA Tomonori | 1162 | M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> |
1351 | M: fujita.tomonori@lab.ntt.co.jp | ||
1352 | L: linux-scsi@vger.kernel.org | 1163 | L: linux-scsi@vger.kernel.org |
1353 | S: Supported | 1164 | S: Supported |
1354 | F: block/bsg.c | 1165 | F: block/bsg.c |
1355 | F: include/linux/bsg.h | 1166 | F: include/linux/bsg.h |
1356 | 1167 | ||
1357 | BT8XXGPIO DRIVER | 1168 | BT8XXGPIO DRIVER |
1358 | P: Michael Buesch | 1169 | M: Michael Buesch <mb@bu3sch.de> |
1359 | M: mb@bu3sch.de | ||
1360 | W: http://bu3sch.de/btgpio.php | 1170 | W: http://bu3sch.de/btgpio.php |
1361 | S: Maintained | 1171 | S: Maintained |
1362 | F: drivers/gpio/bt8xxgpio.c | 1172 | F: drivers/gpio/bt8xxgpio.c |
1363 | 1173 | ||
1364 | BTRFS FILE SYSTEM | 1174 | BTRFS FILE SYSTEM |
1365 | P: Chris Mason | 1175 | M: Chris Mason <chris.mason@oracle.com> |
1366 | M: chris.mason@oracle.com | ||
1367 | L: linux-btrfs@vger.kernel.org | 1176 | L: linux-btrfs@vger.kernel.org |
1368 | W: http://btrfs.wiki.kernel.org/ | 1177 | W: http://btrfs.wiki.kernel.org/ |
1369 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git | 1178 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git |
@@ -1372,8 +1181,7 @@ F: Documentation/filesystems/btrfs.txt | |||
1372 | F: fs/btrfs/ | 1181 | F: fs/btrfs/ |
1373 | 1182 | ||
1374 | BTTV VIDEO4LINUX DRIVER | 1183 | BTTV VIDEO4LINUX DRIVER |
1375 | P: Mauro Carvalho Chehab | 1184 | M: Mauro Carvalho Chehab <mchehab@infradead.org> |
1376 | M: mchehab@infradead.org | ||
1377 | L: linux-media@vger.kernel.org | 1185 | L: linux-media@vger.kernel.org |
1378 | W: http://linuxtv.org | 1186 | W: http://linuxtv.org |
1379 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 1187 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -1382,16 +1190,14 @@ F: Documentation/video4linux/bttv/ | |||
1382 | F: drivers/media/video/bt8xx/bttv* | 1190 | F: drivers/media/video/bt8xx/bttv* |
1383 | 1191 | ||
1384 | CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS | 1192 | CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS |
1385 | P: David Howells | 1193 | M: David Howells <dhowells@redhat.com> |
1386 | M: dhowells@redhat.com | ||
1387 | L: linux-cachefs@redhat.com | 1194 | L: linux-cachefs@redhat.com |
1388 | S: Supported | 1195 | S: Supported |
1389 | F: Documentation/filesystems/caching/cachefiles.txt | 1196 | F: Documentation/filesystems/caching/cachefiles.txt |
1390 | F: fs/cachefiles/ | 1197 | F: fs/cachefiles/ |
1391 | 1198 | ||
1392 | CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER | 1199 | CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER |
1393 | P: Jonathan Corbet | 1200 | M: Jonathan Corbet <corbet@lwn.net> |
1394 | M: corbet@lwn.net | ||
1395 | L: linux-media@vger.kernel.org | 1201 | L: linux-media@vger.kernel.org |
1396 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 1202 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
1397 | S: Maintained | 1203 | S: Maintained |
@@ -1399,10 +1205,8 @@ F: Documentation/video4linux/cafe_ccic | |||
1399 | F: drivers/media/video/cafe_ccic* | 1205 | F: drivers/media/video/cafe_ccic* |
1400 | 1206 | ||
1401 | CALGARY x86-64 IOMMU | 1207 | CALGARY x86-64 IOMMU |
1402 | P: Muli Ben-Yehuda | 1208 | M: Muli Ben-Yehuda <muli@il.ibm.com> |
1403 | M: muli@il.ibm.com | 1209 | M: "Jon D. Mason" <jdmason@kudzu.us> |
1404 | P: Jon D. Mason | ||
1405 | M: jdmason@kudzu.us | ||
1406 | L: discuss@x86-64.org | 1210 | L: discuss@x86-64.org |
1407 | S: Maintained | 1211 | S: Maintained |
1408 | F: arch/x86/kernel/pci-calgary_64.c | 1212 | F: arch/x86/kernel/pci-calgary_64.c |
@@ -1411,10 +1215,8 @@ F: arch/x86/include/asm/calgary.h | |||
1411 | F: arch/x86/include/asm/tce.h | 1215 | F: arch/x86/include/asm/tce.h |
1412 | 1216 | ||
1413 | CAN NETWORK LAYER | 1217 | CAN NETWORK LAYER |
1414 | P: Urs Thuermann | 1218 | M: Urs Thuermann <urs.thuermann@volkswagen.de> |
1415 | M: urs.thuermann@volkswagen.de | 1219 | M: Oliver Hartkopp <oliver.hartkopp@volkswagen.de> |
1416 | P: Oliver Hartkopp | ||
1417 | M: oliver.hartkopp@volkswagen.de | ||
1418 | L: socketcan-core@lists.berlios.de (subscribers-only) | 1220 | L: socketcan-core@lists.berlios.de (subscribers-only) |
1419 | W: http://developer.berlios.de/projects/socketcan/ | 1221 | W: http://developer.berlios.de/projects/socketcan/ |
1420 | S: Maintained | 1222 | S: Maintained |
@@ -1423,15 +1225,13 @@ F: include/linux/can/ | |||
1423 | F: include/linux/can.h | 1225 | F: include/linux/can.h |
1424 | 1226 | ||
1425 | CAN NETWORK DRIVERS | 1227 | CAN NETWORK DRIVERS |
1426 | P: Wolfgang Grandegger | 1228 | M: Wolfgang Grandegger <wg@grandegger.com> |
1427 | M: wg@grandegger.com | ||
1428 | L: socketcan-core@lists.berlios.de (subscribers-only) | 1229 | L: socketcan-core@lists.berlios.de (subscribers-only) |
1429 | W: http://developer.berlios.de/projects/socketcan/ | 1230 | W: http://developer.berlios.de/projects/socketcan/ |
1430 | S: Maintained | 1231 | S: Maintained |
1431 | 1232 | ||
1432 | CELL BROADBAND ENGINE ARCHITECTURE | 1233 | CELL BROADBAND ENGINE ARCHITECTURE |
1433 | P: Arnd Bergmann | 1234 | M: Arnd Bergmann <arnd@arndb.de> |
1434 | M: arnd@arndb.de | ||
1435 | L: linuxppc-dev@ozlabs.org | 1235 | L: linuxppc-dev@ozlabs.org |
1436 | L: cbe-oss-dev@ozlabs.org | 1236 | L: cbe-oss-dev@ozlabs.org |
1437 | W: http://www.ibm.com/developerworks/power/cell/ | 1237 | W: http://www.ibm.com/developerworks/power/cell/ |
@@ -1442,8 +1242,7 @@ F: arch/powerpc/oprofile/*cell* | |||
1442 | F: arch/powerpc/platforms/cell/ | 1242 | F: arch/powerpc/platforms/cell/ |
1443 | 1243 | ||
1444 | CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: | 1244 | CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: |
1445 | P: David Vrabel | 1245 | M: David Vrabel <david.vrabel@csr.com> |
1446 | M: david.vrabel@csr.com | ||
1447 | L: linux-usb@vger.kernel.org | 1246 | L: linux-usb@vger.kernel.org |
1448 | S: Supported | 1247 | S: Supported |
1449 | F: Documentation/usb/WUSB-Design-overview.txt | 1248 | F: Documentation/usb/WUSB-Design-overview.txt |
@@ -1452,8 +1251,7 @@ F: drivers/usb/wusbcore/ | |||
1452 | F: include/linux/usb/wusb* | 1251 | F: include/linux/usb/wusb* |
1453 | 1252 | ||
1454 | CFAG12864B LCD DRIVER | 1253 | CFAG12864B LCD DRIVER |
1455 | P: Miguel Ojeda Sandonis | 1254 | M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com> |
1456 | M: miguel.ojeda.sandonis@gmail.com | ||
1457 | W: http://miguelojeda.es/auxdisplay.htm | 1255 | W: http://miguelojeda.es/auxdisplay.htm |
1458 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm | 1256 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm |
1459 | S: Maintained | 1257 | S: Maintained |
@@ -1461,8 +1259,7 @@ F: drivers/auxdisplay/cfag12864b.c | |||
1461 | F: include/linux/cfag12864b.h | 1259 | F: include/linux/cfag12864b.h |
1462 | 1260 | ||
1463 | CFAG12864BFB LCD FRAMEBUFFER DRIVER | 1261 | CFAG12864BFB LCD FRAMEBUFFER DRIVER |
1464 | P: Miguel Ojeda Sandonis | 1262 | M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com> |
1465 | M: miguel.ojeda.sandonis@gmail.com | ||
1466 | W: http://miguelojeda.es/auxdisplay.htm | 1263 | W: http://miguelojeda.es/auxdisplay.htm |
1467 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm | 1264 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm |
1468 | S: Maintained | 1265 | S: Maintained |
@@ -1470,8 +1267,7 @@ F: drivers/auxdisplay/cfag12864bfb.c | |||
1470 | F: include/linux/cfag12864b.h | 1267 | F: include/linux/cfag12864b.h |
1471 | 1268 | ||
1472 | CFG80211 and NL80211 | 1269 | CFG80211 and NL80211 |
1473 | P: Johannes Berg | 1270 | M: Johannes Berg <johannes@sipsolutions.net> |
1474 | M: johannes@sipsolutions.net | ||
1475 | L: linux-wireless@vger.kernel.org | 1271 | L: linux-wireless@vger.kernel.org |
1476 | S: Maintained | 1272 | S: Maintained |
1477 | F: include/linux/nl80211.h | 1273 | F: include/linux/nl80211.h |
@@ -1480,66 +1276,47 @@ F: net/wireless/* | |||
1480 | X: net/wireless/wext* | 1276 | X: net/wireless/wext* |
1481 | 1277 | ||
1482 | CHECKPATCH | 1278 | CHECKPATCH |
1483 | P: Andy Whitcroft | 1279 | M: Andy Whitcroft <apw@canonical.com> |
1484 | M: apw@canonical.com | ||
1485 | S: Supported | 1280 | S: Supported |
1486 | F: scripts/checkpatch.pl | 1281 | F: scripts/checkpatch.pl |
1487 | 1282 | ||
1488 | CISCO 10G ETHERNET DRIVER | 1283 | CISCO 10G ETHERNET DRIVER |
1489 | P: Scott Feldman | 1284 | M: Scott Feldman <scofeldm@cisco.com> |
1490 | M: scofeldm@cisco.com | 1285 | M: Joe Eykholt <jeykholt@cisco.com> |
1491 | P: Joe Eykholt | ||
1492 | M: jeykholt@cisco.com | ||
1493 | S: Supported | 1286 | S: Supported |
1494 | F: drivers/net/enic/ | 1287 | F: drivers/net/enic/ |
1495 | 1288 | ||
1496 | CIRRUS LOGIC EP93XX ETHERNET DRIVER | 1289 | CIRRUS LOGIC EP93XX ETHERNET DRIVER |
1497 | P: Lennert Buytenhek | 1290 | M: Lennert Buytenhek <kernel@wantstofly.org> |
1498 | M: kernel@wantstofly.org | ||
1499 | L: netdev@vger.kernel.org | 1291 | L: netdev@vger.kernel.org |
1500 | S: Maintained | 1292 | S: Maintained |
1501 | F: drivers/net/arm/ep93xx_eth.c | 1293 | F: drivers/net/arm/ep93xx_eth.c |
1502 | 1294 | ||
1503 | CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER | 1295 | CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER |
1504 | P: Lennert Buytenhek | 1296 | M: Lennert Buytenhek <kernel@wantstofly.org> |
1505 | M: kernel@wantstofly.org | ||
1506 | L: linux-usb@vger.kernel.org | 1297 | L: linux-usb@vger.kernel.org |
1507 | S: Maintained | 1298 | S: Maintained |
1508 | F: drivers/usb/host/ohci-ep93xx.c | 1299 | F: drivers/usb/host/ohci-ep93xx.c |
1509 | 1300 | ||
1510 | CIRRUS LOGIC CS4270 SOUND DRIVER | 1301 | CIRRUS LOGIC CS4270 SOUND DRIVER |
1511 | P: Timur Tabi | 1302 | M: Timur Tabi <timur@freescale.com> |
1512 | M: timur@freescale.com | ||
1513 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 1303 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
1514 | S: Supported | 1304 | S: Supported |
1515 | F: sound/soc/codecs/cs4270* | 1305 | F: sound/soc/codecs/cs4270* |
1516 | 1306 | ||
1517 | CIRRUS LOGIC CS4280/CS461x SOUNDDRIVER | ||
1518 | P: Cirrus Logic Corporation (kernel 2.2 driver) | ||
1519 | M: Cirrus Logic Corporation, Thomas Woller <twoller@crystal.cirrus.com> | ||
1520 | P: Nils Faerber (port to kernel 2.4) | ||
1521 | M: Nils Faerber <nils@kernelconcepts.de> | ||
1522 | S: Maintained | ||
1523 | F: Documentation/input/cs461x.txt | ||
1524 | F: sound/pci/cs46xx/ | ||
1525 | |||
1526 | CLK API | 1307 | CLK API |
1527 | P: Russell King | 1308 | M: Russell King <linux@arm.linux.org.uk> |
1528 | M: linux@arm.linux.org.uk | ||
1529 | F: include/linux/clk.h | 1309 | F: include/linux/clk.h |
1530 | 1310 | ||
1531 | CISCO FCOE HBA DRIVER | 1311 | CISCO FCOE HBA DRIVER |
1532 | P: Abhijeet Joglekar | 1312 | M: Abhijeet Joglekar <abjoglek@cisco.com> |
1533 | M: abjoglek@cisco.com | 1313 | M: Joe Eykholt <jeykholt@cisco.com> |
1534 | P: Joe Eykholt | ||
1535 | M: jeykholt@cisco.com | ||
1536 | L: linux-scsi@vger.kernel.org | 1314 | L: linux-scsi@vger.kernel.org |
1537 | S: Supported | 1315 | S: Supported |
1538 | F: drivers/scsi/fnic/ | 1316 | F: drivers/scsi/fnic/ |
1539 | 1317 | ||
1540 | CODA FILE SYSTEM | 1318 | CODA FILE SYSTEM |
1541 | P: Jan Harkes | 1319 | M: Jan Harkes <jaharkes@cs.cmu.edu> |
1542 | M: jaharkes@cs.cmu.edu | ||
1543 | M: coda@cs.cmu.edu | 1320 | M: coda@cs.cmu.edu |
1544 | L: codalist@coda.cs.cmu.edu | 1321 | L: codalist@coda.cs.cmu.edu |
1545 | W: http://www.coda.cs.cmu.edu/ | 1322 | W: http://www.coda.cs.cmu.edu/ |
@@ -1549,8 +1326,7 @@ F: fs/coda/ | |||
1549 | F: include/linux/coda*.h | 1326 | F: include/linux/coda*.h |
1550 | 1327 | ||
1551 | COMMON INTERNET FILE SYSTEM (CIFS) | 1328 | COMMON INTERNET FILE SYSTEM (CIFS) |
1552 | P: Steve French | 1329 | M: Steve French <sfrench@samba.org> |
1553 | M: sfrench@samba.org | ||
1554 | L: linux-cifs-client@lists.samba.org | 1330 | L: linux-cifs-client@lists.samba.org |
1555 | L: samba-technical@lists.samba.org | 1331 | L: samba-technical@lists.samba.org |
1556 | W: http://linux-cifs.samba.org/ | 1332 | W: http://linux-cifs.samba.org/ |
@@ -1560,70 +1336,57 @@ F: Documentation/filesystems/cifs.txt | |||
1560 | F: fs/cifs/ | 1336 | F: fs/cifs/ |
1561 | 1337 | ||
1562 | COMPACTPCI HOTPLUG CORE | 1338 | COMPACTPCI HOTPLUG CORE |
1563 | P: Scott Murray | 1339 | M: Scott Murray <scott@spiteful.org> |
1564 | M: scottm@somanetworks.com | ||
1565 | M: scott@spiteful.org | ||
1566 | L: linux-pci@vger.kernel.org | 1340 | L: linux-pci@vger.kernel.org |
1567 | S: Supported | 1341 | S: Maintained |
1568 | F: drivers/pci/hotplug/cpci_hotplug* | 1342 | F: drivers/pci/hotplug/cpci_hotplug* |
1569 | 1343 | ||
1570 | COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER | 1344 | COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER |
1571 | P: Scott Murray | 1345 | M: Scott Murray <scott@spiteful.org> |
1572 | M: scottm@somanetworks.com | ||
1573 | M: scott@spiteful.org | ||
1574 | L: linux-pci@vger.kernel.org | 1346 | L: linux-pci@vger.kernel.org |
1575 | S: Supported | 1347 | S: Maintained |
1576 | F: drivers/pci/hotplug/cpcihp_zt5550.* | 1348 | F: drivers/pci/hotplug/cpcihp_zt5550.* |
1577 | 1349 | ||
1578 | COMPACTPCI HOTPLUG GENERIC DRIVER | 1350 | COMPACTPCI HOTPLUG GENERIC DRIVER |
1579 | P: Scott Murray | 1351 | M: Scott Murray <scott@spiteful.org> |
1580 | M: scottm@somanetworks.com | ||
1581 | M: scott@spiteful.org | ||
1582 | L: linux-pci@vger.kernel.org | 1352 | L: linux-pci@vger.kernel.org |
1583 | S: Supported | 1353 | S: Maintained |
1584 | F: drivers/pci/hotplug/cpcihp_generic.c | 1354 | F: drivers/pci/hotplug/cpcihp_generic.c |
1585 | 1355 | ||
1586 | COMPAL LAPTOP SUPPORT | 1356 | COMPAL LAPTOP SUPPORT |
1587 | P: Cezary Jackiewicz | 1357 | M: Cezary Jackiewicz <cezary.jackiewicz@gmail.com> |
1588 | M: cezary.jackiewicz@gmail.com | ||
1589 | S: Maintained | 1358 | S: Maintained |
1590 | F: drivers/platform/x86/compal-laptop.c | 1359 | F: drivers/platform/x86/compal-laptop.c |
1591 | 1360 | ||
1592 | COMPUTONE INTELLIPORT MULTIPORT CARD | 1361 | COMPUTONE INTELLIPORT MULTIPORT CARD |
1593 | P: Michael H. Warfield | 1362 | M: "Michael H. Warfield" <mhw@wittsend.com> |
1594 | M: mhw@wittsend.com | ||
1595 | W: http://www.wittsend.com/computone.html | 1363 | W: http://www.wittsend.com/computone.html |
1596 | S: Maintained | 1364 | S: Maintained |
1597 | F: Documentation/serial/computone.txt | 1365 | F: Documentation/serial/computone.txt |
1598 | F: drivers/char/ip2/ | 1366 | F: drivers/char/ip2/ |
1599 | 1367 | ||
1600 | CONEXANT ACCESSRUNNER USB DRIVER | 1368 | CONEXANT ACCESSRUNNER USB DRIVER |
1601 | P: Simon Arlott | 1369 | M: Simon Arlott <cxacru@fire.lp0.eu> |
1602 | M: cxacru@fire.lp0.eu | ||
1603 | L: accessrunner-general@lists.sourceforge.net | 1370 | L: accessrunner-general@lists.sourceforge.net |
1604 | W: http://accessrunner.sourceforge.net/ | 1371 | W: http://accessrunner.sourceforge.net/ |
1605 | S: Maintained | 1372 | S: Maintained |
1606 | F: drivers/usb/atm/cxacru.c | 1373 | F: drivers/usb/atm/cxacru.c |
1607 | 1374 | ||
1608 | CONFIGFS | 1375 | CONFIGFS |
1609 | P: Joel Becker | 1376 | M: Joel Becker <joel.becker@oracle.com> |
1610 | M: joel.becker@oracle.com | ||
1611 | S: Supported | 1377 | S: Supported |
1612 | F: fs/configfs/ | 1378 | F: fs/configfs/ |
1613 | F: include/linux/configfs.h | 1379 | F: include/linux/configfs.h |
1614 | 1380 | ||
1615 | CONNECTOR | 1381 | CONNECTOR |
1616 | P: Evgeniy Polyakov | 1382 | M: Evgeniy Polyakov <zbr@ioremap.net> |
1617 | M: zbr@ioremap.net | ||
1618 | L: netdev@vger.kernel.org | 1383 | L: netdev@vger.kernel.org |
1619 | S: Maintained | 1384 | S: Maintained |
1620 | F: drivers/connector/ | 1385 | F: drivers/connector/ |
1621 | 1386 | ||
1622 | CONTROL GROUPS (CGROUPS) | 1387 | CONTROL GROUPS (CGROUPS) |
1623 | P: Paul Menage | 1388 | M: Paul Menage <menage@google.com> |
1624 | M: menage@google.com | 1389 | M: Li Zefan <lizf@cn.fujitsu.com> |
1625 | P: Li Zefan | ||
1626 | M: lizf@cn.fujitsu.com | ||
1627 | L: containers@lists.linux-foundation.org | 1390 | L: containers@lists.linux-foundation.org |
1628 | S: Maintained | 1391 | S: Maintained |
1629 | F: include/linux/cgroup* | 1392 | F: include/linux/cgroup* |
@@ -1631,30 +1394,26 @@ F: kernel/cgroup* | |||
1631 | F: mm/*cgroup* | 1394 | F: mm/*cgroup* |
1632 | 1395 | ||
1633 | CORETEMP HARDWARE MONITORING DRIVER | 1396 | CORETEMP HARDWARE MONITORING DRIVER |
1634 | P: Rudolf Marek | 1397 | M: Rudolf Marek <r.marek@assembler.cz> |
1635 | M: r.marek@assembler.cz | ||
1636 | L: lm-sensors@lm-sensors.org | 1398 | L: lm-sensors@lm-sensors.org |
1637 | S: Maintained | 1399 | S: Maintained |
1638 | F: Documentation/hwmon/coretemp | 1400 | F: Documentation/hwmon/coretemp |
1639 | F: drivers/hwmon/coretemp.c | 1401 | F: drivers/hwmon/coretemp.c |
1640 | 1402 | ||
1641 | COSA/SRP SYNC SERIAL DRIVER | 1403 | COSA/SRP SYNC SERIAL DRIVER |
1642 | P: Jan "Yenya" Kasprzak | 1404 | M: Jan "Yenya" Kasprzak <kas@fi.muni.cz> |
1643 | M: kas@fi.muni.cz | ||
1644 | W: http://www.fi.muni.cz/~kas/cosa/ | 1405 | W: http://www.fi.muni.cz/~kas/cosa/ |
1645 | S: Maintained | 1406 | S: Maintained |
1646 | F: drivers/net/wan/cosa* | 1407 | F: drivers/net/wan/cosa* |
1647 | 1408 | ||
1648 | CPMAC ETHERNET DRIVER | 1409 | CPMAC ETHERNET DRIVER |
1649 | P: Florian Fainelli | 1410 | M: Florian Fainelli <florian@openwrt.org> |
1650 | M: florian@openwrt.org | ||
1651 | L: netdev@vger.kernel.org | 1411 | L: netdev@vger.kernel.org |
1652 | S: Maintained | 1412 | S: Maintained |
1653 | F: drivers/net/cpmac.c | 1413 | F: drivers/net/cpmac.c |
1654 | 1414 | ||
1655 | CPU FREQUENCY DRIVERS | 1415 | CPU FREQUENCY DRIVERS |
1656 | P: Dave Jones | 1416 | M: Dave Jones <davej@redhat.com> |
1657 | M: davej@redhat.com | ||
1658 | L: cpufreq@vger.kernel.org | 1417 | L: cpufreq@vger.kernel.org |
1659 | W: http://www.codemonkey.org.uk/projects/cpufreq/ | 1418 | W: http://www.codemonkey.org.uk/projects/cpufreq/ |
1660 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git | 1419 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git |
@@ -1664,15 +1423,13 @@ F: drivers/cpufreq/ | |||
1664 | F: include/linux/cpufreq.h | 1423 | F: include/linux/cpufreq.h |
1665 | 1424 | ||
1666 | CPUID/MSR DRIVER | 1425 | CPUID/MSR DRIVER |
1667 | P: H. Peter Anvin | 1426 | M: "H. Peter Anvin" <hpa@zytor.com> |
1668 | M: hpa@zytor.com | ||
1669 | S: Maintained | 1427 | S: Maintained |
1670 | F: arch/x86/kernel/cpuid.c | 1428 | F: arch/x86/kernel/cpuid.c |
1671 | F: arch/x86/kernel/msr.c | 1429 | F: arch/x86/kernel/msr.c |
1672 | 1430 | ||
1673 | CPUSETS | 1431 | CPUSETS |
1674 | P: Paul Menage | 1432 | M: Paul Menage <menage@google.com> |
1675 | M: menage@google.com | ||
1676 | W: http://www.bullopensource.org/cpuset/ | 1433 | W: http://www.bullopensource.org/cpuset/ |
1677 | W: http://oss.sgi.com/projects/cpusets/ | 1434 | W: http://oss.sgi.com/projects/cpusets/ |
1678 | S: Supported | 1435 | S: Supported |
@@ -1687,20 +1444,16 @@ F: Documentation/filesystems/cramfs.txt | |||
1687 | F: fs/cramfs/ | 1444 | F: fs/cramfs/ |
1688 | 1445 | ||
1689 | CRIS PORT | 1446 | CRIS PORT |
1690 | P: Mikael Starvik | 1447 | M: Mikael Starvik <starvik@axis.com> |
1691 | M: starvik@axis.com | 1448 | M: Jesper Nilsson <jesper.nilsson@axis.com> |
1692 | P: Jesper Nilsson | ||
1693 | M: jesper.nilsson@axis.com | ||
1694 | L: linux-cris-kernel@axis.com | 1449 | L: linux-cris-kernel@axis.com |
1695 | W: http://developer.axis.com | 1450 | W: http://developer.axis.com |
1696 | S: Maintained | 1451 | S: Maintained |
1697 | F: arch/cris/ | 1452 | F: arch/cris/ |
1698 | 1453 | ||
1699 | CRYPTO API | 1454 | CRYPTO API |
1700 | P: Herbert Xu | 1455 | M: Herbert Xu <herbert@gondor.apana.org.au> |
1701 | M: herbert@gondor.apana.org.au | 1456 | M: "David S. Miller" <davem@davemloft.net> |
1702 | P: David S. Miller | ||
1703 | M: davem@davemloft.net | ||
1704 | L: linux-crypto@vger.kernel.org | 1457 | L: linux-crypto@vger.kernel.org |
1705 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git | 1458 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git |
1706 | S: Maintained | 1459 | S: Maintained |
@@ -1711,58 +1464,50 @@ F: drivers/crypto/ | |||
1711 | F: include/crypto/ | 1464 | F: include/crypto/ |
1712 | 1465 | ||
1713 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR | 1466 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR |
1714 | P: Neil Horman | 1467 | M: Neil Horman <nhorman@tuxdriver.com> |
1715 | M: nhorman@tuxdriver.com | ||
1716 | L: linux-crypto@vger.kernel.org | 1468 | L: linux-crypto@vger.kernel.org |
1717 | S: Maintained | 1469 | S: Maintained |
1718 | 1470 | ||
1719 | CS5535 Audio ALSA driver | 1471 | CS5535 Audio ALSA driver |
1720 | P: Jaya Kumar | 1472 | M: Jaya Kumar <jayakumar.alsa@gmail.com> |
1721 | M: jayakumar.alsa@gmail.com | ||
1722 | S: Maintained | 1473 | S: Maintained |
1723 | F: sound/pci/cs5535audio/ | 1474 | F: sound/pci/cs5535audio/ |
1724 | 1475 | ||
1725 | CX18 VIDEO4LINUX DRIVER | 1476 | CX18 VIDEO4LINUX DRIVER |
1726 | P: Hans Verkuil | 1477 | M: Hans Verkuil <hverkuil@xs4all.nl> |
1727 | M: hverkuil@xs4all.nl | 1478 | M: Andy Walls <awalls@radix.net> |
1728 | P: Andy Walls | ||
1729 | M: awalls@radix.net | ||
1730 | L: ivtv-devel@ivtvdriver.org | 1479 | L: ivtv-devel@ivtvdriver.org |
1731 | L: ivtv-users@ivtvdriver.org | ||
1732 | L: linux-media@vger.kernel.org | 1480 | L: linux-media@vger.kernel.org |
1733 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 1481 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
1734 | W: http://linuxtv.org | 1482 | W: http://linuxtv.org |
1483 | W: http://www.ivtvdriver.org/index.php/Cx18 | ||
1735 | S: Maintained | 1484 | S: Maintained |
1736 | F: Documentation/video4linux/cx18.txt | 1485 | F: Documentation/video4linux/cx18.txt |
1737 | F: drivers/media/video/cx18/ | 1486 | F: drivers/media/video/cx18/ |
1738 | 1487 | ||
1739 | CXGB3 ETHERNET DRIVER (CXGB3) | 1488 | CXGB3 ETHERNET DRIVER (CXGB3) |
1740 | P: Divy Le Ray | 1489 | M: Divy Le Ray <divy@chelsio.com> |
1741 | M: divy@chelsio.com | ||
1742 | L: netdev@vger.kernel.org | 1490 | L: netdev@vger.kernel.org |
1743 | W: http://www.chelsio.com | 1491 | W: http://www.chelsio.com |
1744 | S: Supported | 1492 | S: Supported |
1745 | F: drivers/net/cxgb3/ | 1493 | F: drivers/net/cxgb3/ |
1746 | 1494 | ||
1747 | CXGB3 IWARP RNIC DRIVER (IW_CXGB3) | 1495 | CXGB3 IWARP RNIC DRIVER (IW_CXGB3) |
1748 | P: Steve Wise | 1496 | M: Steve Wise <swise@chelsio.com> |
1749 | M: swise@chelsio.com | ||
1750 | L: general@lists.openfabrics.org | 1497 | L: general@lists.openfabrics.org |
1751 | W: http://www.openfabrics.org | 1498 | W: http://www.openfabrics.org |
1752 | S: Supported | 1499 | S: Supported |
1753 | F: drivers/infiniband/hw/cxgb3/ | 1500 | F: drivers/infiniband/hw/cxgb3/ |
1754 | 1501 | ||
1755 | CYBERPRO FB DRIVER | 1502 | CYBERPRO FB DRIVER |
1756 | P: Russell King | 1503 | M: Russell King <linux@arm.linux.org.uk> |
1757 | M: linux@arm.linux.org.uk | ||
1758 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 1504 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
1759 | W: http://www.arm.linux.org.uk/ | 1505 | W: http://www.arm.linux.org.uk/ |
1760 | S: Maintained | 1506 | S: Maintained |
1761 | F: drivers/video/cyber2000fb.* | 1507 | F: drivers/video/cyber2000fb.* |
1762 | 1508 | ||
1763 | CYCLADES 2X SYNC CARD DRIVER | 1509 | CYCLADES 2X SYNC CARD DRIVER |
1764 | P: Arnaldo Carvalho de Melo | 1510 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
1765 | M: acme@ghostprotocols.net | ||
1766 | W: http://oops.ghostprotocols.net:81/blog | 1511 | W: http://oops.ghostprotocols.net:81/blog |
1767 | S: Maintained | 1512 | S: Maintained |
1768 | F: drivers/net/wan/cycx* | 1513 | F: drivers/net/wan/cycx* |
@@ -1779,8 +1524,7 @@ S: Orphan | |||
1779 | F: drivers/net/wan/pc300* | 1524 | F: drivers/net/wan/pc300* |
1780 | 1525 | ||
1781 | DAMA SLAVE for AX.25 | 1526 | DAMA SLAVE for AX.25 |
1782 | P: Joerg Reuter | 1527 | M: Joerg Reuter <jreuter@yaina.de> |
1783 | M: jreuter@yaina.de | ||
1784 | W: http://yaina.de/jreuter/ | 1528 | W: http://yaina.de/jreuter/ |
1785 | W: http://www.qsl.net/dl1bke/ | 1529 | W: http://www.qsl.net/dl1bke/ |
1786 | L: linux-hams@vger.kernel.org | 1530 | L: linux-hams@vger.kernel.org |
@@ -1794,29 +1538,23 @@ F: net/ax25/ax25_timer.c | |||
1794 | F: net/ax25/sysctl_net_ax25.c | 1538 | F: net/ax25/sysctl_net_ax25.c |
1795 | 1539 | ||
1796 | DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER | 1540 | DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER |
1797 | P: Tobias Ringstrom | 1541 | M: Tobias Ringstrom <tori@unhappy.mine.nu> |
1798 | M: tori@unhappy.mine.nu | ||
1799 | L: netdev@vger.kernel.org | 1542 | L: netdev@vger.kernel.org |
1800 | S: Maintained | 1543 | S: Maintained |
1801 | F: Documentation/networking/dmfe.txt | 1544 | F: Documentation/networking/dmfe.txt |
1802 | F: drivers/net/tulip/dmfe.c | 1545 | F: drivers/net/tulip/dmfe.c |
1803 | 1546 | ||
1804 | DC390/AM53C974 SCSI driver | 1547 | DC390/AM53C974 SCSI driver |
1805 | P: Kurt Garloff | 1548 | M: Kurt Garloff <garloff@suse.de> |
1806 | M: garloff@suse.de | ||
1807 | W: http://www.garloff.de/kurt/linux/dc390/ | 1549 | W: http://www.garloff.de/kurt/linux/dc390/ |
1808 | P: Guennadi Liakhovetski | 1550 | M: Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
1809 | M: g.liakhovetski@gmx.de | ||
1810 | S: Maintained | 1551 | S: Maintained |
1811 | F: drivers/scsi/tmscsim.* | 1552 | F: drivers/scsi/tmscsim.* |
1812 | 1553 | ||
1813 | DC395x SCSI driver | 1554 | DC395x SCSI driver |
1814 | P: Oliver Neukum | 1555 | M: Oliver Neukum <oliver@neukum.name> |
1815 | M: oliver@neukum.name | 1556 | M: Ali Akcaagac <aliakc@web.de> |
1816 | P: Ali Akcaagac | 1557 | M: Jamie Lenehan <lenehan@twibble.org> |
1817 | M: aliakc@web.de | ||
1818 | P: Jamie Lenehan | ||
1819 | M: lenehan@twibble.org | ||
1820 | W: http://twibble.org/dist/dc395x/ | 1558 | W: http://twibble.org/dist/dc395x/ |
1821 | L: dc395x@twibble.org | 1559 | L: dc395x@twibble.org |
1822 | L: http://lists.twibble.org/mailman/listinfo/dc395x/ | 1560 | L: http://lists.twibble.org/mailman/listinfo/dc395x/ |
@@ -1825,8 +1563,7 @@ F: Documentation/scsi/dc395x.txt | |||
1825 | F: drivers/scsi/dc395x.* | 1563 | F: drivers/scsi/dc395x.* |
1826 | 1564 | ||
1827 | DCCP PROTOCOL | 1565 | DCCP PROTOCOL |
1828 | P: Arnaldo Carvalho de Melo | 1566 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
1829 | M: acme@ghostprotocols.net | ||
1830 | L: dccp@vger.kernel.org | 1567 | L: dccp@vger.kernel.org |
1831 | W: http://linux-net.osdl.org/index.php/DCCP | 1568 | W: http://linux-net.osdl.org/index.php/DCCP |
1832 | S: Maintained | 1569 | S: Maintained |
@@ -1835,8 +1572,7 @@ F: include/linux/tfrc.h | |||
1835 | F: net/dccp/ | 1572 | F: net/dccp/ |
1836 | 1573 | ||
1837 | DECnet NETWORK LAYER | 1574 | DECnet NETWORK LAYER |
1838 | P: Christine Caulfield | 1575 | M: Christine Caulfield <christine.caulfield@googlemail.com> |
1839 | M: christine.caulfield@googlemail.com | ||
1840 | W: http://linux-decnet.sourceforge.net | 1576 | W: http://linux-decnet.sourceforge.net |
1841 | L: linux-decnet-user@lists.sourceforge.net | 1577 | L: linux-decnet-user@lists.sourceforge.net |
1842 | S: Maintained | 1578 | S: Maintained |
@@ -1844,40 +1580,34 @@ F: Documentation/networking/decnet.txt | |||
1844 | F: net/decnet/ | 1580 | F: net/decnet/ |
1845 | 1581 | ||
1846 | DEFXX FDDI NETWORK DRIVER | 1582 | DEFXX FDDI NETWORK DRIVER |
1847 | P: Maciej W. Rozycki | 1583 | M: "Maciej W. Rozycki" <macro@linux-mips.org> |
1848 | M: macro@linux-mips.org | ||
1849 | S: Maintained | 1584 | S: Maintained |
1850 | F: drivers/net/defxx.* | 1585 | F: drivers/net/defxx.* |
1851 | 1586 | ||
1852 | DELL LAPTOP DRIVER | 1587 | DELL LAPTOP DRIVER |
1853 | P: Matthew Garrett | 1588 | M: Matthew Garrett <mjg59@srcf.ucam.org> |
1854 | M: mjg59@srcf.ucam.org | ||
1855 | S: Maintained | 1589 | S: Maintained |
1856 | F: drivers/platform/x86/dell-laptop.c | 1590 | F: drivers/platform/x86/dell-laptop.c |
1857 | 1591 | ||
1858 | DELL LAPTOP SMM DRIVER | 1592 | DELL LAPTOP SMM DRIVER |
1859 | P: Massimo Dal Zotto | 1593 | M: Massimo Dal Zotto <dz@debian.org> |
1860 | M: dz@debian.org | ||
1861 | W: http://www.debian.org/~dz/i8k/ | 1594 | W: http://www.debian.org/~dz/i8k/ |
1862 | S: Maintained | 1595 | S: Maintained |
1863 | F: drivers/char/i8k.c | 1596 | F: drivers/char/i8k.c |
1864 | F: include/linux/i8k.h | 1597 | F: include/linux/i8k.h |
1865 | 1598 | ||
1866 | DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas) | 1599 | DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas) |
1867 | P: Doug Warzecha | 1600 | M: Doug Warzecha <Douglas_Warzecha@dell.com> |
1868 | M: Douglas_Warzecha@dell.com | ||
1869 | S: Maintained | 1601 | S: Maintained |
1870 | F: Documentation/dcdbas.txt | 1602 | F: Documentation/dcdbas.txt |
1871 | F: drivers/firmware/dcdbas.* | 1603 | F: drivers/firmware/dcdbas.* |
1872 | 1604 | ||
1873 | DELL WMI EXTRAS DRIVER | 1605 | DELL WMI EXTRAS DRIVER |
1874 | P: Matthew Garrett | 1606 | M: Matthew Garrett <mjg59@srcf.ucam.org> |
1875 | M: mjg59@srcf.ucam.org | ||
1876 | S: Maintained | 1607 | S: Maintained |
1877 | 1608 | ||
1878 | DEVICE NUMBER REGISTRY | 1609 | DEVICE NUMBER REGISTRY |
1879 | P: Torben Mathiasen | 1610 | M: Torben Mathiasen <device@lanana.org> |
1880 | M: device@lanana.org | ||
1881 | W: http://lanana.org/docs/device-list/index.html | 1611 | W: http://lanana.org/docs/device-list/index.html |
1882 | S: Maintained | 1612 | S: Maintained |
1883 | 1613 | ||
@@ -1892,8 +1622,7 @@ F: include/linux/device-mapper.h | |||
1892 | F: include/linux/dm-*.h | 1622 | F: include/linux/dm-*.h |
1893 | 1623 | ||
1894 | DIGI INTL. EPCA DRIVER | 1624 | DIGI INTL. EPCA DRIVER |
1895 | P: Digi International, Inc | 1625 | M: "Digi International, Inc" <Eng.Linux@digi.com> |
1896 | M: Eng.Linux@digi.com | ||
1897 | L: Eng.Linux@digi.com | 1626 | L: Eng.Linux@digi.com |
1898 | W: http://www.digi.com | 1627 | W: http://www.digi.com |
1899 | S: Orphan | 1628 | S: Orphan |
@@ -1902,34 +1631,29 @@ F: drivers/char/epca* | |||
1902 | F: drivers/char/digi* | 1631 | F: drivers/char/digi* |
1903 | 1632 | ||
1904 | DIRECTORY NOTIFICATION (DNOTIFY) | 1633 | DIRECTORY NOTIFICATION (DNOTIFY) |
1905 | P: Eric Paris | 1634 | M: Eric Paris <eparis@parisplace.org> |
1906 | M: eparis@parisplace.org | ||
1907 | S: Maintained | 1635 | S: Maintained |
1908 | F: Documentation/filesystems/dnotify.txt | 1636 | F: Documentation/filesystems/dnotify.txt |
1909 | F: fs/notify/dnotify/ | 1637 | F: fs/notify/dnotify/ |
1910 | F: include/linux/dnotify.h | 1638 | F: include/linux/dnotify.h |
1911 | 1639 | ||
1912 | DISK GEOMETRY AND PARTITION HANDLING | 1640 | DISK GEOMETRY AND PARTITION HANDLING |
1913 | P: Andries Brouwer | 1641 | M: Andries Brouwer <aeb@cwi.nl> |
1914 | M: aeb@cwi.nl | ||
1915 | W: http://www.win.tue.nl/~aeb/linux/Large-Disk.html | 1642 | W: http://www.win.tue.nl/~aeb/linux/Large-Disk.html |
1916 | W: http://www.win.tue.nl/~aeb/linux/zip/zip-1.html | 1643 | W: http://www.win.tue.nl/~aeb/linux/zip/zip-1.html |
1917 | W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html | 1644 | W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html |
1918 | S: Maintained | 1645 | S: Maintained |
1919 | 1646 | ||
1920 | DISKQUOTA | 1647 | DISKQUOTA |
1921 | P: Jan Kara | 1648 | M: Jan Kara <jack@suse.cz> |
1922 | M: jack@suse.cz | ||
1923 | S: Maintained | 1649 | S: Maintained |
1924 | F: Documentation/filesystems/quota.txt | 1650 | F: Documentation/filesystems/quota.txt |
1925 | F: fs/quota/ | 1651 | F: fs/quota/ |
1926 | F: include/linux/quota*.h | 1652 | F: include/linux/quota*.h |
1927 | 1653 | ||
1928 | DISTRIBUTED LOCK MANAGER (DLM) | 1654 | DISTRIBUTED LOCK MANAGER (DLM) |
1929 | P: Christine Caulfield | 1655 | M: Christine Caulfield <ccaulfie@redhat.com> |
1930 | M: ccaulfie@redhat.com | 1656 | M: David Teigland <teigland@redhat.com> |
1931 | P: David Teigland | ||
1932 | M: teigland@redhat.com | ||
1933 | L: cluster-devel@redhat.com | 1657 | L: cluster-devel@redhat.com |
1934 | W: http://sources.redhat.com/cluster/ | 1658 | W: http://sources.redhat.com/cluster/ |
1935 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git | 1659 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git |
@@ -1937,52 +1661,44 @@ S: Supported | |||
1937 | F: fs/dlm/ | 1661 | F: fs/dlm/ |
1938 | 1662 | ||
1939 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 1663 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
1940 | P: Maciej Sosnowski | 1664 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> |
1941 | M: maciej.sosnowski@intel.com | 1665 | M: Dan Williams <dan.j.williams@intel.com> |
1942 | P: Dan Williams | ||
1943 | M: dan.j.williams@intel.com | ||
1944 | S: Supported | 1666 | S: Supported |
1945 | F: drivers/dma/ | 1667 | F: drivers/dma/ |
1946 | F: include/linux/dma* | 1668 | F: include/linux/dma* |
1947 | 1669 | ||
1948 | DME1737 HARDWARE MONITOR DRIVER | 1670 | DME1737 HARDWARE MONITOR DRIVER |
1949 | P: Juerg Haefliger | 1671 | M: Juerg Haefliger <juergh@gmail.com> |
1950 | M: juergh@gmail.com | ||
1951 | L: lm-sensors@lm-sensors.org | 1672 | L: lm-sensors@lm-sensors.org |
1952 | S: Maintained | 1673 | S: Maintained |
1953 | F: Documentation/hwmon/dme1737 | 1674 | F: Documentation/hwmon/dme1737 |
1954 | F: drivers/hwmon/dme1737.c | 1675 | F: drivers/hwmon/dme1737.c |
1955 | 1676 | ||
1956 | DOCBOOK FOR DOCUMENTATION | 1677 | DOCBOOK FOR DOCUMENTATION |
1957 | P: Randy Dunlap | 1678 | M: Randy Dunlap <rdunlap@xenotime.net> |
1958 | M: rdunlap@xenotime.net | ||
1959 | S: Maintained | 1679 | S: Maintained |
1960 | 1680 | ||
1961 | DOCKING STATION DRIVER | 1681 | DOCKING STATION DRIVER |
1962 | P: Shaohua Li | 1682 | M: Shaohua Li <shaohua.li@intel.com> |
1963 | M: shaohua.li@intel.com | ||
1964 | L: linux-acpi@vger.kernel.org | 1683 | L: linux-acpi@vger.kernel.org |
1965 | S: Supported | 1684 | S: Supported |
1966 | F: drivers/acpi/dock.c | 1685 | F: drivers/acpi/dock.c |
1967 | 1686 | ||
1968 | DOCUMENTATION | 1687 | DOCUMENTATION |
1969 | P: Randy Dunlap | 1688 | M: Randy Dunlap <rdunlap@xenotime.net> |
1970 | M: rdunlap@xenotime.net | ||
1971 | L: linux-doc@vger.kernel.org | 1689 | L: linux-doc@vger.kernel.org |
1972 | S: Maintained | 1690 | S: Maintained |
1973 | F: Documentation/ | 1691 | F: Documentation/ |
1974 | 1692 | ||
1975 | DOUBLETALK DRIVER | 1693 | DOUBLETALK DRIVER |
1976 | P: James R. Van Zandt | 1694 | M: "James R. Van Zandt" <jrv@vanzandt.mv.com> |
1977 | M: jrv@vanzandt.mv.com | ||
1978 | L: blinux-list@redhat.com | 1695 | L: blinux-list@redhat.com |
1979 | S: Maintained | 1696 | S: Maintained |
1980 | F: drivers/char/dtlk.c | 1697 | F: drivers/char/dtlk.c |
1981 | F: include/linux/dtlk.h | 1698 | F: include/linux/dtlk.h |
1982 | 1699 | ||
1983 | DPT_I2O SCSI RAID DRIVER | 1700 | DPT_I2O SCSI RAID DRIVER |
1984 | P: Adaptec OEM Raid Solutions | 1701 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> |
1985 | M: aacraid@adaptec.com | ||
1986 | L: linux-scsi@vger.kernel.org | 1702 | L: linux-scsi@vger.kernel.org |
1987 | W: http://www.adaptec.com/ | 1703 | W: http://www.adaptec.com/ |
1988 | S: Maintained | 1704 | S: Maintained |
@@ -1990,8 +1706,7 @@ F: drivers/scsi/dpt* | |||
1990 | F: drivers/scsi/dpt/ | 1706 | F: drivers/scsi/dpt/ |
1991 | 1707 | ||
1992 | DRIVER CORE, KOBJECTS, AND SYSFS | 1708 | DRIVER CORE, KOBJECTS, AND SYSFS |
1993 | P: Greg Kroah-Hartman | 1709 | M: Greg Kroah-Hartman <gregkh@suse.de> |
1994 | M: gregkh@suse.de | ||
1995 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 1710 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ |
1996 | S: Supported | 1711 | S: Supported |
1997 | F: Documentation/kobject.txt | 1712 | F: Documentation/kobject.txt |
@@ -2001,52 +1716,45 @@ F: include/linux/kobj* | |||
2001 | F: lib/kobj* | 1716 | F: lib/kobj* |
2002 | 1717 | ||
2003 | DRM DRIVERS | 1718 | DRM DRIVERS |
2004 | P: David Airlie | 1719 | M: David Airlie <airlied@linux.ie> |
2005 | M: airlied@linux.ie | ||
2006 | L: dri-devel@lists.sourceforge.net | 1720 | L: dri-devel@lists.sourceforge.net |
2007 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git | 1721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git |
2008 | S: Maintained | 1722 | S: Maintained |
2009 | F: drivers/gpu/drm/ | 1723 | F: drivers/gpu/drm/ |
2010 | 1724 | ||
2011 | DSCC4 DRIVER | 1725 | DSCC4 DRIVER |
2012 | P: Francois Romieu | 1726 | M: Francois Romieu <romieu@fr.zoreil.com> |
2013 | M: romieu@fr.zoreil.com | ||
2014 | L: netdev@vger.kernel.org | 1727 | L: netdev@vger.kernel.org |
2015 | S: Maintained | 1728 | S: Maintained |
2016 | F: drivers/net/wan/dscc4.c | 1729 | F: drivers/net/wan/dscc4.c |
2017 | 1730 | ||
2018 | DZ DECSTATION DZ11 SERIAL DRIVER | 1731 | DZ DECSTATION DZ11 SERIAL DRIVER |
2019 | P: Maciej W. Rozycki | 1732 | M: "Maciej W. Rozycki" <macro@linux-mips.org> |
2020 | M: macro@linux-mips.org | ||
2021 | S: Maintained | 1733 | S: Maintained |
2022 | F: drivers/serial/dz.* | 1734 | F: drivers/serial/dz.* |
2023 | 1735 | ||
2024 | EATA-DMA SCSI DRIVER | 1736 | EATA-DMA SCSI DRIVER |
2025 | P: Michael Neuffer | 1737 | M: Michael Neuffer <mike@i-Connect.Net> |
2026 | M: mike@i-Connect.Net | ||
2027 | L: linux-eata@i-connect.net | 1738 | L: linux-eata@i-connect.net |
2028 | L: linux-scsi@vger.kernel.org | 1739 | L: linux-scsi@vger.kernel.org |
2029 | S: Maintained | 1740 | S: Maintained |
2030 | F: drivers/scsi/eata* | 1741 | F: drivers/scsi/eata* |
2031 | 1742 | ||
2032 | EATA ISA/EISA/PCI SCSI DRIVER | 1743 | EATA ISA/EISA/PCI SCSI DRIVER |
2033 | P: Dario Ballabio | 1744 | M: Dario Ballabio <ballabio_dario@emc.com> |
2034 | M: ballabio_dario@emc.com | ||
2035 | L: linux-scsi@vger.kernel.org | 1745 | L: linux-scsi@vger.kernel.org |
2036 | S: Maintained | 1746 | S: Maintained |
2037 | F: drivers/scsi/eata.c | 1747 | F: drivers/scsi/eata.c |
2038 | 1748 | ||
2039 | EATA-PIO SCSI DRIVER | 1749 | EATA-PIO SCSI DRIVER |
2040 | P: Michael Neuffer | 1750 | M: Michael Neuffer <mike@i-Connect.Net> |
2041 | M: mike@i-Connect.Net | ||
2042 | L: linux-eata@i-connect.net | 1751 | L: linux-eata@i-connect.net |
2043 | L: linux-scsi@vger.kernel.org | 1752 | L: linux-scsi@vger.kernel.org |
2044 | S: Maintained | 1753 | S: Maintained |
2045 | F: drivers/scsi/eata_pio.* | 1754 | F: drivers/scsi/eata_pio.* |
2046 | 1755 | ||
2047 | EBTABLES | 1756 | EBTABLES |
2048 | P: Bart De Schuymer | 1757 | M: Bart De Schuymer <bart.de.schuymer@pandora.be> |
2049 | M: bart.de.schuymer@pandora.be | ||
2050 | L: ebtables-user@lists.sourceforge.net | 1758 | L: ebtables-user@lists.sourceforge.net |
2051 | L: ebtables-devel@lists.sourceforge.net | 1759 | L: ebtables-devel@lists.sourceforge.net |
2052 | W: http://ebtables.sourceforge.net/ | 1760 | W: http://ebtables.sourceforge.net/ |
@@ -2055,10 +1763,8 @@ F: include/linux/netfilter_bridge/ebt_*.h | |||
2055 | F: net/bridge/netfilter/ebt*.c | 1763 | F: net/bridge/netfilter/ebt*.c |
2056 | 1764 | ||
2057 | ECRYPT FILE SYSTEM | 1765 | ECRYPT FILE SYSTEM |
2058 | P: Tyler Hicks | 1766 | M: Tyler Hicks <tyhicks@linux.vnet.ibm.com> |
2059 | M: tyhicks@linux.vnet.ibm.com | 1767 | M: Dustin Kirkland <kirkland@canonical.com> |
2060 | P: Dustin Kirkland | ||
2061 | M: kirkland@canonical.com | ||
2062 | L: ecryptfs-devel@lists.launchpad.net | 1768 | L: ecryptfs-devel@lists.launchpad.net |
2063 | W: https://launchpad.net/ecryptfs | 1769 | W: https://launchpad.net/ecryptfs |
2064 | S: Supported | 1770 | S: Supported |
@@ -2066,8 +1772,7 @@ F: Documentation/filesystems/ecryptfs.txt | |||
2066 | F: fs/ecryptfs/ | 1772 | F: fs/ecryptfs/ |
2067 | 1773 | ||
2068 | EDAC-CORE | 1774 | EDAC-CORE |
2069 | P: Doug Thompson | 1775 | M: Doug Thompson <dougthompson@xmission.com> |
2070 | M: dougthompson@xmission.com | ||
2071 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1776 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2072 | W: bluesmoke.sourceforge.net | 1777 | W: bluesmoke.sourceforge.net |
2073 | S: Supported | 1778 | S: Supported |
@@ -2076,94 +1781,80 @@ F: drivers/edac/edac_* | |||
2076 | F: include/linux/edac.h | 1781 | F: include/linux/edac.h |
2077 | 1782 | ||
2078 | EDAC-AMD64 | 1783 | EDAC-AMD64 |
2079 | P: Doug Thompson | 1784 | M: Doug Thompson <dougthompson@xmission.com> |
2080 | M: dougthompson@xmission.com | 1785 | M: Borislav Petkov <borislav.petkov@amd.com> |
2081 | P: Borislav Petkov | ||
2082 | M: borislav.petkov@amd.com | ||
2083 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1786 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2084 | W: bluesmoke.sourceforge.net | 1787 | W: bluesmoke.sourceforge.net |
2085 | S: Supported | 1788 | S: Supported |
2086 | F: drivers/edac/amd64_edac* | 1789 | F: drivers/edac/amd64_edac* |
2087 | 1790 | ||
2088 | EDAC-E752X | 1791 | EDAC-E752X |
2089 | P: Mark Gross | 1792 | M: Mark Gross <mark.gross@intel.com> |
2090 | M: mark.gross@intel.com | 1793 | M: Doug Thompson <dougthompson@xmission.com> |
2091 | P: Doug Thompson | ||
2092 | M: dougthompson@xmission.com | ||
2093 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1794 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2094 | W: bluesmoke.sourceforge.net | 1795 | W: bluesmoke.sourceforge.net |
2095 | S: Maintained | 1796 | S: Maintained |
2096 | F: drivers/edac/e752x_edac.c | 1797 | F: drivers/edac/e752x_edac.c |
2097 | 1798 | ||
2098 | EDAC-E7XXX | 1799 | EDAC-E7XXX |
2099 | P: Doug Thompson | 1800 | M: Doug Thompson <dougthompson@xmission.com> |
2100 | M: dougthompson@xmission.com | ||
2101 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1801 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2102 | W: bluesmoke.sourceforge.net | 1802 | W: bluesmoke.sourceforge.net |
2103 | S: Maintained | 1803 | S: Maintained |
2104 | F: drivers/edac/e7xxx_edac.c | 1804 | F: drivers/edac/e7xxx_edac.c |
2105 | 1805 | ||
2106 | EDAC-I82443BXGX | 1806 | EDAC-I82443BXGX |
2107 | P: Tim Small | 1807 | M: Tim Small <tim@buttersideup.com> |
2108 | M: tim@buttersideup.com | ||
2109 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1808 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2110 | W: bluesmoke.sourceforge.net | 1809 | W: bluesmoke.sourceforge.net |
2111 | S: Maintained | 1810 | S: Maintained |
2112 | F: drivers/edac/i82443bxgx_edac.c | 1811 | F: drivers/edac/i82443bxgx_edac.c |
2113 | 1812 | ||
2114 | EDAC-I3000 | 1813 | EDAC-I3000 |
2115 | P: Jason Uhlenkott | 1814 | M: Jason Uhlenkott <juhlenko@akamai.com> |
2116 | M: juhlenko@akamai.com | ||
2117 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1815 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2118 | W: bluesmoke.sourceforge.net | 1816 | W: bluesmoke.sourceforge.net |
2119 | S: Maintained | 1817 | S: Maintained |
2120 | F: drivers/edac/i3000_edac.c | 1818 | F: drivers/edac/i3000_edac.c |
2121 | 1819 | ||
2122 | EDAC-I5000 | 1820 | EDAC-I5000 |
2123 | P: Doug Thompson | 1821 | M: Doug Thompson <dougthompson@xmission.com> |
2124 | M: dougthompson@xmission.com | ||
2125 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1822 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2126 | W: bluesmoke.sourceforge.net | 1823 | W: bluesmoke.sourceforge.net |
2127 | S: Maintained | 1824 | S: Maintained |
2128 | F: drivers/edac/i5000_edac.c | 1825 | F: drivers/edac/i5000_edac.c |
2129 | 1826 | ||
2130 | EDAC-I5400 | 1827 | EDAC-I5400 |
2131 | P: Mauro Carvalho Chehab | 1828 | M: Mauro Carvalho Chehab <mchehab@redhat.com> |
2132 | M: mchehab@redhat.com | ||
2133 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1829 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2134 | W: bluesmoke.sourceforge.net | 1830 | W: bluesmoke.sourceforge.net |
2135 | S: Maintained | 1831 | S: Maintained |
2136 | F: drivers/edac/i5400_edac.c | 1832 | F: drivers/edac/i5400_edac.c |
2137 | 1833 | ||
2138 | EDAC-I82975X | 1834 | EDAC-I82975X |
2139 | P: Ranganathan Desikan | 1835 | M: Ranganathan Desikan <ravi@jetztechnologies.com> |
2140 | M: ravi@jetztechnologies.com | 1836 | M: "Arvind R." <arvind@jetztechnologies.com> |
2141 | P: Arvind R. | ||
2142 | M: arvind@jetztechnologies.com | ||
2143 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1837 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2144 | W: bluesmoke.sourceforge.net | 1838 | W: bluesmoke.sourceforge.net |
2145 | S: Maintained | 1839 | S: Maintained |
2146 | F: drivers/edac/i82975x_edac.c | 1840 | F: drivers/edac/i82975x_edac.c |
2147 | 1841 | ||
2148 | EDAC-PASEMI | 1842 | EDAC-PASEMI |
2149 | P: Egor Martovetsky | 1843 | M: Egor Martovetsky <egor@pasemi.com> |
2150 | M: egor@pasemi.com | ||
2151 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1844 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2152 | W: bluesmoke.sourceforge.net | 1845 | W: bluesmoke.sourceforge.net |
2153 | S: Maintained | 1846 | S: Maintained |
2154 | F: drivers/edac/pasemi_edac.c | 1847 | F: drivers/edac/pasemi_edac.c |
2155 | 1848 | ||
2156 | EDAC-R82600 | 1849 | EDAC-R82600 |
2157 | P: Tim Small | 1850 | M: Tim Small <tim@buttersideup.com> |
2158 | M: tim@buttersideup.com | ||
2159 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) | 1851 | L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) |
2160 | W: bluesmoke.sourceforge.net | 1852 | W: bluesmoke.sourceforge.net |
2161 | S: Maintained | 1853 | S: Maintained |
2162 | F: drivers/edac/r82600_edac.c | 1854 | F: drivers/edac/r82600_edac.c |
2163 | 1855 | ||
2164 | EEEPC LAPTOP EXTRAS DRIVER | 1856 | EEEPC LAPTOP EXTRAS DRIVER |
2165 | P: Corentin Chary | 1857 | M: Corentin Chary <corentincj@iksaif.net> |
2166 | M: corentincj@iksaif.net | ||
2167 | L: acpi4asus-user@lists.sourceforge.net | 1858 | L: acpi4asus-user@lists.sourceforge.net |
2168 | W: http://acpi4asus.sf.net | 1859 | W: http://acpi4asus.sf.net |
2169 | S: Maintained | 1860 | S: Maintained |
@@ -2175,66 +1866,54 @@ S: Orphan | |||
2175 | F: fs/efs/ | 1866 | F: fs/efs/ |
2176 | 1867 | ||
2177 | EHCA (IBM GX bus InfiniBand adapter) DRIVER | 1868 | EHCA (IBM GX bus InfiniBand adapter) DRIVER |
2178 | P: Hoang-Nam Nguyen | 1869 | M: Hoang-Nam Nguyen <hnguyen@de.ibm.com> |
2179 | M: hnguyen@de.ibm.com | 1870 | M: Christoph Raisch <raisch@de.ibm.com> |
2180 | P: Christoph Raisch | ||
2181 | M: raisch@de.ibm.com | ||
2182 | L: general@lists.openfabrics.org | 1871 | L: general@lists.openfabrics.org |
2183 | S: Supported | 1872 | S: Supported |
2184 | F: drivers/infiniband/hw/ehca/ | 1873 | F: drivers/infiniband/hw/ehca/ |
2185 | 1874 | ||
2186 | EMBEDDED LINUX | 1875 | EMBEDDED LINUX |
2187 | P: Paul Gortmaker | 1876 | M: Paul Gortmaker <paul.gortmaker@windriver.com> |
2188 | M: paul.gortmaker@windriver.com | 1877 | M: Matt Mackall <mpm@selenic.com> |
2189 | P: Matt Mackall | 1878 | M: David Woodhouse <dwmw2@infradead.org> |
2190 | M: mpm@selenic.com | ||
2191 | P: David Woodhouse | ||
2192 | M: dwmw2@infradead.org | ||
2193 | L: linux-embedded@vger.kernel.org | 1879 | L: linux-embedded@vger.kernel.org |
2194 | S: Maintained | 1880 | S: Maintained |
2195 | 1881 | ||
2196 | EMULEX LPFC FC SCSI DRIVER | 1882 | EMULEX LPFC FC SCSI DRIVER |
2197 | P: James Smart | 1883 | M: James Smart <james.smart@emulex.com> |
2198 | M: james.smart@emulex.com | ||
2199 | L: linux-scsi@vger.kernel.org | 1884 | L: linux-scsi@vger.kernel.org |
2200 | W: http://sourceforge.net/projects/lpfcxxxx | 1885 | W: http://sourceforge.net/projects/lpfcxxxx |
2201 | S: Supported | 1886 | S: Supported |
2202 | F: drivers/scsi/lpfc/ | 1887 | F: drivers/scsi/lpfc/ |
2203 | 1888 | ||
2204 | ENE CB710 FLASH CARD READER DRIVER | 1889 | ENE CB710 FLASH CARD READER DRIVER |
2205 | P: Michał Mirosław | 1890 | M: Michał Mirosław <mirq-linux@rere.qmqm.pl> |
2206 | M: mirq-linux@rere.qmqm.pl | ||
2207 | L: linux-kernel@vger.kernel.org | ||
2208 | S: Maintained | 1891 | S: Maintained |
2209 | F: drivers/misc/cb710/ | 1892 | F: drivers/misc/cb710/ |
2210 | F: drivers/mmc/host/cb710-mmc.* | 1893 | F: drivers/mmc/host/cb710-mmc.* |
2211 | F: include/linux/cb710.h | 1894 | F: include/linux/cb710.h |
2212 | 1895 | ||
2213 | EPSON 1355 FRAMEBUFFER DRIVER | 1896 | EPSON 1355 FRAMEBUFFER DRIVER |
2214 | P: Christopher Hoover | 1897 | M: Christopher Hoover <ch@murgatroid.com> |
2215 | M: ch@murgatroid.com | 1898 | M: Christopher Hoover <ch@hpl.hp.com> |
2216 | P: Christopher Hoover | ||
2217 | M: ch@hpl.hp.com | ||
2218 | S: Maintained | 1899 | S: Maintained |
2219 | F: drivers/video/epson1355fb.c | 1900 | F: drivers/video/epson1355fb.c |
2220 | 1901 | ||
2221 | EPSON S1D13XXX FRAMEBUFFER DRIVER | 1902 | EPSON S1D13XXX FRAMEBUFFER DRIVER |
2222 | P: Kristoffer Ericson | 1903 | M: Kristoffer Ericson <kristoffer.ericson@gmail.com> |
2223 | M: kristoffer.ericson@gmail.com | ||
2224 | S: Maintained | 1904 | S: Maintained |
1905 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git | ||
2225 | F: drivers/video/s1d13xxxfb.c | 1906 | F: drivers/video/s1d13xxxfb.c |
2226 | F: include/video/s1d13xxxfb.h | 1907 | F: include/video/s1d13xxxfb.h |
2227 | 1908 | ||
2228 | ETHEREXPRESS-16 NETWORK DRIVER | 1909 | ETHEREXPRESS-16 NETWORK DRIVER |
2229 | P: Philip Blundell | 1910 | M: Philip Blundell <philb@gnu.org> |
2230 | M: philb@gnu.org | ||
2231 | L: netdev@vger.kernel.org | 1911 | L: netdev@vger.kernel.org |
2232 | S: Maintained | 1912 | S: Maintained |
2233 | F: drivers/net/eexpress.* | 1913 | F: drivers/net/eexpress.* |
2234 | 1914 | ||
2235 | ETHERNET BRIDGE | 1915 | ETHERNET BRIDGE |
2236 | P: Stephen Hemminger | 1916 | M: Stephen Hemminger <shemminger@linux-foundation.org> |
2237 | M: shemminger@linux-foundation.org | ||
2238 | L: bridge@lists.linux-foundation.org | 1917 | L: bridge@lists.linux-foundation.org |
2239 | W: http://www.linux-foundation.org/en/Net:Bridge | 1918 | W: http://www.linux-foundation.org/en/Net:Bridge |
2240 | S: Maintained | 1919 | S: Maintained |
@@ -2242,8 +1921,7 @@ F: include/linux/netfilter_bridge/ | |||
2242 | F: net/bridge/ | 1921 | F: net/bridge/ |
2243 | 1922 | ||
2244 | ETHERTEAM 16I DRIVER | 1923 | ETHERTEAM 16I DRIVER |
2245 | P: Mika Kuoppala | 1924 | M: Mika Kuoppala <miku@iki.fi> |
2246 | M: miku@iki.fi | ||
2247 | S: Maintained | 1925 | S: Maintained |
2248 | F: drivers/net/eth16i.c | 1926 | F: drivers/net/eth16i.c |
2249 | 1927 | ||
@@ -2255,12 +1933,9 @@ F: fs/ext2/ | |||
2255 | F: include/linux/ext2* | 1933 | F: include/linux/ext2* |
2256 | 1934 | ||
2257 | EXT3 FILE SYSTEM | 1935 | EXT3 FILE SYSTEM |
2258 | P: Stephen Tweedie | 1936 | M: Stephen Tweedie <sct@redhat.com> |
2259 | M: sct@redhat.com | 1937 | M: Andrew Morton <akpm@linux-foundation.org> |
2260 | P: Andrew Morton | 1938 | M: Andreas Dilger <adilger@sun.com> |
2261 | M: akpm@linux-foundation.org | ||
2262 | P: Andreas Dilger | ||
2263 | M: adilger@sun.com | ||
2264 | L: linux-ext4@vger.kernel.org | 1939 | L: linux-ext4@vger.kernel.org |
2265 | S: Maintained | 1940 | S: Maintained |
2266 | F: Documentation/filesystems/ext3.txt | 1941 | F: Documentation/filesystems/ext3.txt |
@@ -2268,10 +1943,8 @@ F: fs/ext3/ | |||
2268 | F: include/linux/ext3* | 1943 | F: include/linux/ext3* |
2269 | 1944 | ||
2270 | EXT4 FILE SYSTEM | 1945 | EXT4 FILE SYSTEM |
2271 | P: Theodore Ts'o | 1946 | M: "Theodore Ts'o" <tytso@mit.edu> |
2272 | M: tytso@mit.edu | 1947 | M: Andreas Dilger <adilger@sun.com> |
2273 | P: Andreas Dilger | ||
2274 | M: adilger@sun.com | ||
2275 | L: linux-ext4@vger.kernel.org | 1948 | L: linux-ext4@vger.kernel.org |
2276 | W: http://ext4.wiki.kernel.org | 1949 | W: http://ext4.wiki.kernel.org |
2277 | S: Maintained | 1950 | S: Maintained |
@@ -2279,30 +1952,26 @@ F: Documentation/filesystems/ext4.txt | |||
2279 | F: fs/ext4/ | 1952 | F: fs/ext4/ |
2280 | 1953 | ||
2281 | F71805F HARDWARE MONITORING DRIVER | 1954 | F71805F HARDWARE MONITORING DRIVER |
2282 | P: Jean Delvare | 1955 | M: Jean Delvare <khali@linux-fr.org> |
2283 | M: khali@linux-fr.org | ||
2284 | L: lm-sensors@lm-sensors.org | 1956 | L: lm-sensors@lm-sensors.org |
2285 | S: Maintained | 1957 | S: Maintained |
2286 | F: Documentation/hwmon/f71805f | 1958 | F: Documentation/hwmon/f71805f |
2287 | F: drivers/hwmon/f71805f.c | 1959 | F: drivers/hwmon/f71805f.c |
2288 | 1960 | ||
2289 | FARSYNC SYNCHRONOUS DRIVER | 1961 | FARSYNC SYNCHRONOUS DRIVER |
2290 | P: Kevin Curtis | 1962 | M: Kevin Curtis <kevin.curtis@farsite.co.uk> |
2291 | M: kevin.curtis@farsite.co.uk | ||
2292 | W: http://www.farsite.co.uk/ | 1963 | W: http://www.farsite.co.uk/ |
2293 | S: Supported | 1964 | S: Supported |
2294 | F: drivers/net/wan/farsync.* | 1965 | F: drivers/net/wan/farsync.* |
2295 | 1966 | ||
2296 | FAULT INJECTION SUPPORT | 1967 | FAULT INJECTION SUPPORT |
2297 | P: Akinobu Mita | 1968 | M: Akinobu Mita <akinobu.mita@gmail.com> |
2298 | M: akinobu.mita@gmail.com | ||
2299 | S: Supported | 1969 | S: Supported |
2300 | F: Documentation/fault-injection/ | 1970 | F: Documentation/fault-injection/ |
2301 | F: lib/fault-inject.c | 1971 | F: lib/fault-inject.c |
2302 | 1972 | ||
2303 | FILE LOCKING (flock() and fcntl()/lockf()) | 1973 | FILE LOCKING (flock() and fcntl()/lockf()) |
2304 | P: Matthew Wilcox | 1974 | M: Matthew Wilcox <matthew@wil.cx> |
2305 | M: matthew@wil.cx | ||
2306 | L: linux-fsdevel@vger.kernel.org | 1975 | L: linux-fsdevel@vger.kernel.org |
2307 | S: Maintained | 1976 | S: Maintained |
2308 | F: include/linux/fcntl.h | 1977 | F: include/linux/fcntl.h |
@@ -2311,25 +1980,21 @@ F: fs/fcntl.c | |||
2311 | F: fs/locks.c | 1980 | F: fs/locks.c |
2312 | 1981 | ||
2313 | FILESYSTEMS (VFS and infrastructure) | 1982 | FILESYSTEMS (VFS and infrastructure) |
2314 | P: Alexander Viro | 1983 | M: Alexander Viro <viro@zeniv.linux.org.uk> |
2315 | M: viro@zeniv.linux.org.uk | ||
2316 | L: linux-fsdevel@vger.kernel.org | 1984 | L: linux-fsdevel@vger.kernel.org |
2317 | S: Maintained | 1985 | S: Maintained |
2318 | F: fs/* | 1986 | F: fs/* |
2319 | 1987 | ||
2320 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 1988 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
2321 | P: Riku Voipio | 1989 | M: Riku Voipio <riku.vipio@iki.fi> |
2322 | M: riku.vipio@iki.fi | ||
2323 | L: lm-sensors@lm-sensors.org | 1990 | L: lm-sensors@lm-sensors.org |
2324 | S: Maintained | 1991 | S: Maintained |
2325 | F: drivers/hwmon/f75375s.c | 1992 | F: drivers/hwmon/f75375s.c |
2326 | F: include/linux/f75375s.h | 1993 | F: include/linux/f75375s.h |
2327 | 1994 | ||
2328 | FIREWIRE SUBSYSTEM | 1995 | FIREWIRE SUBSYSTEM |
2329 | P: Kristian Hoegsberg | 1996 | M: Kristian Hoegsberg <krh@redhat.com> |
2330 | M: krh@redhat.com | 1997 | M: Stefan Richter <stefanr@s5r6.in-berlin.de> |
2331 | P: Stefan Richter | ||
2332 | M: stefanr@s5r6.in-berlin.de | ||
2333 | L: linux1394-devel@lists.sourceforge.net | 1998 | L: linux1394-devel@lists.sourceforge.net |
2334 | W: http://www.linux1394.org/ | 1999 | W: http://www.linux1394.org/ |
2335 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git | 2000 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git |
@@ -2344,15 +2009,13 @@ F: drivers/base/firmware*.c | |||
2344 | F: include/linux/firmware.h | 2009 | F: include/linux/firmware.h |
2345 | 2010 | ||
2346 | FPU EMULATOR | 2011 | FPU EMULATOR |
2347 | P: Bill Metzenthen | 2012 | M: Bill Metzenthen <billm@melbpc.org.au> |
2348 | M: billm@melbpc.org.au | ||
2349 | W: http://floatingpoint.sourceforge.net/emulator/index.html | 2013 | W: http://floatingpoint.sourceforge.net/emulator/index.html |
2350 | S: Maintained | 2014 | S: Maintained |
2351 | F: arch/x86/math-emu/ | 2015 | F: arch/x86/math-emu/ |
2352 | 2016 | ||
2353 | FRAME RELAY DLCI/FRAD (Sangoma drivers too) | 2017 | FRAME RELAY DLCI/FRAD (Sangoma drivers too) |
2354 | P: Mike McLagan | 2018 | M: Mike McLagan <mike.mclagan@linux.org> |
2355 | M: mike.mclagan@linux.org | ||
2356 | L: netdev@vger.kernel.org | 2019 | L: netdev@vger.kernel.org |
2357 | S: Maintained | 2020 | S: Maintained |
2358 | F: drivers/net/wan/dlci.c | 2021 | F: drivers/net/wan/dlci.c |
@@ -2367,25 +2030,21 @@ F: drivers/video/fb* | |||
2367 | F: include/linux/fb.h | 2030 | F: include/linux/fb.h |
2368 | 2031 | ||
2369 | FREESCALE DMA DRIVER | 2032 | FREESCALE DMA DRIVER |
2370 | P: Li Yang | 2033 | M: Li Yang <leoli@freescale.com> |
2371 | M: leoli@freescale.com | 2034 | M: Zhang Wei <zw@zh-kernel.org> |
2372 | P: Zhang Wei | ||
2373 | M: zw@zh-kernel.org | ||
2374 | L: linuxppc-dev@ozlabs.org | 2035 | L: linuxppc-dev@ozlabs.org |
2375 | S: Maintained | 2036 | S: Maintained |
2376 | F: drivers/dma/fsldma.* | 2037 | F: drivers/dma/fsldma.* |
2377 | 2038 | ||
2378 | FREESCALE I2C CPM DRIVER | 2039 | FREESCALE I2C CPM DRIVER |
2379 | P: Jochen Friedrich | 2040 | M: Jochen Friedrich <jochen@scram.de> |
2380 | M: jochen@scram.de | ||
2381 | L: linuxppc-dev@ozlabs.org | 2041 | L: linuxppc-dev@ozlabs.org |
2382 | L: linux-i2c@vger.kernel.org | 2042 | L: linux-i2c@vger.kernel.org |
2383 | S: Maintained | 2043 | S: Maintained |
2384 | F: drivers/i2c/busses/i2c-cpm.c | 2044 | F: drivers/i2c/busses/i2c-cpm.c |
2385 | 2045 | ||
2386 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER | 2046 | FREESCALE IMX / MXC FRAMEBUFFER DRIVER |
2387 | P: Sascha Hauer | 2047 | M: Sascha Hauer <kernel@pengutronix.de> |
2388 | M: kernel@pengutronix.de | ||
2389 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 2048 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
2390 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 2049 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
2391 | S: Maintained | 2050 | S: Maintained |
@@ -2393,10 +2052,8 @@ F: arch/arm/plat-mxc/include/mach/imxfb.h | |||
2393 | F: drivers/video/imxfb.c | 2052 | F: drivers/video/imxfb.c |
2394 | 2053 | ||
2395 | FREESCALE SOC FS_ENET DRIVER | 2054 | FREESCALE SOC FS_ENET DRIVER |
2396 | P: Pantelis Antoniou | 2055 | M: Pantelis Antoniou <pantelis.antoniou@gmail.com> |
2397 | M: pantelis.antoniou@gmail.com | 2056 | M: Vitaly Bordug <vbordug@ru.mvista.com> |
2398 | P: Vitaly Bordug | ||
2399 | M: vbordug@ru.mvista.com | ||
2400 | L: linuxppc-dev@ozlabs.org | 2057 | L: linuxppc-dev@ozlabs.org |
2401 | L: netdev@vger.kernel.org | 2058 | L: netdev@vger.kernel.org |
2402 | S: Maintained | 2059 | S: Maintained |
@@ -2404,39 +2061,34 @@ F: drivers/net/fs_enet/ | |||
2404 | F: include/linux/fs_enet_pd.h | 2061 | F: include/linux/fs_enet_pd.h |
2405 | 2062 | ||
2406 | FREESCALE QUICC ENGINE LIBRARY | 2063 | FREESCALE QUICC ENGINE LIBRARY |
2407 | P: Timur Tabi | 2064 | M: Timur Tabi <timur@freescale.com> |
2408 | M: timur@freescale.com | ||
2409 | L: linuxppc-dev@ozlabs.org | 2065 | L: linuxppc-dev@ozlabs.org |
2410 | S: Supported | 2066 | S: Supported |
2411 | F: arch/powerpc/sysdev/qe_lib/ | 2067 | F: arch/powerpc/sysdev/qe_lib/ |
2412 | F: arch/powerpc/include/asm/*qe.h | 2068 | F: arch/powerpc/include/asm/*qe.h |
2413 | 2069 | ||
2414 | FREESCALE HIGHSPEED USB DEVICE DRIVER | 2070 | FREESCALE HIGHSPEED USB DEVICE DRIVER |
2415 | P: Li Yang | 2071 | M: Li Yang <leoli@freescale.com> |
2416 | M: leoli@freescale.com | ||
2417 | L: linux-usb@vger.kernel.org | 2072 | L: linux-usb@vger.kernel.org |
2418 | L: linuxppc-dev@ozlabs.org | 2073 | L: linuxppc-dev@ozlabs.org |
2419 | S: Maintained | 2074 | S: Maintained |
2420 | F: drivers/usb/gadget/fsl_usb2_udc.c | 2075 | F: drivers/usb/gadget/fsl_usb2_udc.c |
2421 | 2076 | ||
2422 | FREESCALE QUICC ENGINE UCC ETHERNET DRIVER | 2077 | FREESCALE QUICC ENGINE UCC ETHERNET DRIVER |
2423 | P: Li Yang | 2078 | M: Li Yang <leoli@freescale.com> |
2424 | M: leoli@freescale.com | ||
2425 | L: netdev@vger.kernel.org | 2079 | L: netdev@vger.kernel.org |
2426 | L: linuxppc-dev@ozlabs.org | 2080 | L: linuxppc-dev@ozlabs.org |
2427 | S: Maintained | 2081 | S: Maintained |
2428 | F: drivers/net/ucc_geth* | 2082 | F: drivers/net/ucc_geth* |
2429 | 2083 | ||
2430 | FREESCALE QUICC ENGINE UCC UART DRIVER | 2084 | FREESCALE QUICC ENGINE UCC UART DRIVER |
2431 | P: Timur Tabi | 2085 | M: Timur Tabi <timur@freescale.com> |
2432 | M: timur@freescale.com | ||
2433 | L: linuxppc-dev@ozlabs.org | 2086 | L: linuxppc-dev@ozlabs.org |
2434 | S: Supported | 2087 | S: Supported |
2435 | F: drivers/serial/ucc_uart.c | 2088 | F: drivers/serial/ucc_uart.c |
2436 | 2089 | ||
2437 | FREESCALE SOC SOUND DRIVERS | 2090 | FREESCALE SOC SOUND DRIVERS |
2438 | P: Timur Tabi | 2091 | M: Timur Tabi <timur@freescale.com> |
2439 | M: timur@freescale.com | ||
2440 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 2092 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
2441 | L: linuxppc-dev@ozlabs.org | 2093 | L: linuxppc-dev@ozlabs.org |
2442 | S: Supported | 2094 | S: Supported |
@@ -2444,17 +2096,14 @@ F: sound/soc/fsl/fsl* | |||
2444 | F: sound/soc/fsl/mpc8610_hpcd.c | 2096 | F: sound/soc/fsl/mpc8610_hpcd.c |
2445 | 2097 | ||
2446 | FREEVXFS FILESYSTEM | 2098 | FREEVXFS FILESYSTEM |
2447 | P: Christoph Hellwig | 2099 | M: Christoph Hellwig <hch@infradead.org> |
2448 | M: hch@infradead.org | ||
2449 | W: ftp://ftp.openlinux.org/pub/people/hch/vxfs | 2100 | W: ftp://ftp.openlinux.org/pub/people/hch/vxfs |
2450 | S: Maintained | 2101 | S: Maintained |
2451 | F: fs/freevxfs/ | 2102 | F: fs/freevxfs/ |
2452 | 2103 | ||
2453 | FREEZER | 2104 | FREEZER |
2454 | P: Pavel Machek | 2105 | M: Pavel Machek <pavel@ucw.cz> |
2455 | M: pavel@ucw.cz | 2106 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
2456 | P: Rafael J. Wysocki | ||
2457 | M: rjw@sisk.pl | ||
2458 | L: linux-pm@lists.linux-foundation.org | 2107 | L: linux-pm@lists.linux-foundation.org |
2459 | S: Supported | 2108 | S: Supported |
2460 | F: Documentation/power/freezing-of-tasks.txt | 2109 | F: Documentation/power/freezing-of-tasks.txt |
@@ -2462,8 +2111,7 @@ F: include/linux/freezer.h | |||
2462 | F: kernel/freezer.c | 2111 | F: kernel/freezer.c |
2463 | 2112 | ||
2464 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS | 2113 | FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS |
2465 | P: David Howells | 2114 | M: David Howells <dhowells@redhat.com> |
2466 | M: dhowells@redhat.com | ||
2467 | L: linux-cachefs@redhat.com | 2115 | L: linux-cachefs@redhat.com |
2468 | S: Supported | 2116 | S: Supported |
2469 | F: Documentation/filesystems/caching/ | 2117 | F: Documentation/filesystems/caching/ |
@@ -2471,8 +2119,7 @@ F: fs/fscache/ | |||
2471 | F: include/linux/fscache*.h | 2119 | F: include/linux/fscache*.h |
2472 | 2120 | ||
2473 | FTRACE | 2121 | FTRACE |
2474 | P: Steven Rostedt | 2122 | M: Steven Rostedt <rostedt@goodmis.org> |
2475 | M: rostedt@goodmis.org | ||
2476 | S: Maintained | 2123 | S: Maintained |
2477 | F: Documentation/trace/ftrace.txt | 2124 | F: Documentation/trace/ftrace.txt |
2478 | F: arch/*/*/*/ftrace.h | 2125 | F: arch/*/*/*/ftrace.h |
@@ -2481,21 +2128,18 @@ F: include/*/ftrace.h | |||
2481 | F: kernel/trace/ | 2128 | F: kernel/trace/ |
2482 | 2129 | ||
2483 | FUJITSU FR-V (FRV) PORT | 2130 | FUJITSU FR-V (FRV) PORT |
2484 | P: David Howells | 2131 | M: David Howells <dhowells@redhat.com> |
2485 | M: dhowells@redhat.com | ||
2486 | S: Maintained | 2132 | S: Maintained |
2487 | F: arch/frv/ | 2133 | F: arch/frv/ |
2488 | 2134 | ||
2489 | FUJITSU LAPTOP EXTRAS | 2135 | FUJITSU LAPTOP EXTRAS |
2490 | P: Jonathan Woithe | 2136 | M: Jonathan Woithe <jwoithe@physics.adelaide.edu.au> |
2491 | M: jwoithe@physics.adelaide.edu.au | ||
2492 | L: linux-acpi@vger.kernel.org | 2137 | L: linux-acpi@vger.kernel.org |
2493 | S: Maintained | 2138 | S: Maintained |
2494 | F: drivers/platform/x86/fujitsu-laptop.c | 2139 | F: drivers/platform/x86/fujitsu-laptop.c |
2495 | 2140 | ||
2496 | FUSE: FILESYSTEM IN USERSPACE | 2141 | FUSE: FILESYSTEM IN USERSPACE |
2497 | P: Miklos Szeredi | 2142 | M: Miklos Szeredi <miklos@szeredi.hu> |
2498 | M: miklos@szeredi.hu | ||
2499 | L: fuse-devel@lists.sourceforge.net | 2143 | L: fuse-devel@lists.sourceforge.net |
2500 | W: http://fuse.sourceforge.net/ | 2144 | W: http://fuse.sourceforge.net/ |
2501 | S: Maintained | 2145 | S: Maintained |
@@ -2503,30 +2147,26 @@ F: fs/fuse/ | |||
2503 | F: include/linux/fuse.h | 2147 | F: include/linux/fuse.h |
2504 | 2148 | ||
2505 | FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit) | 2149 | FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit) |
2506 | P: Rik Faith | 2150 | M: Rik Faith <faith@cs.unc.edu> |
2507 | M: faith@cs.unc.edu | ||
2508 | L: linux-scsi@vger.kernel.org | 2151 | L: linux-scsi@vger.kernel.org |
2509 | S: Odd Fixes (e.g., new signatures) | 2152 | S: Odd Fixes (e.g., new signatures) |
2510 | F: drivers/scsi/fdomain.* | 2153 | F: drivers/scsi/fdomain.* |
2511 | 2154 | ||
2512 | GDT SCSI DISK ARRAY CONTROLLER DRIVER | 2155 | GDT SCSI DISK ARRAY CONTROLLER DRIVER |
2513 | P: Achim Leubner | 2156 | M: Achim Leubner <achim_leubner@adaptec.com> |
2514 | M: achim_leubner@adaptec.com | ||
2515 | L: linux-scsi@vger.kernel.org | 2157 | L: linux-scsi@vger.kernel.org |
2516 | W: http://www.icp-vortex.com/ | 2158 | W: http://www.icp-vortex.com/ |
2517 | S: Supported | 2159 | S: Supported |
2518 | F: drivers/scsi/gdt* | 2160 | F: drivers/scsi/gdt* |
2519 | 2161 | ||
2520 | GENERIC GPIO I2C DRIVER | 2162 | GENERIC GPIO I2C DRIVER |
2521 | P: Haavard Skinnemoen | 2163 | M: Haavard Skinnemoen <hskinnemoen@atmel.com> |
2522 | M: hskinnemoen@atmel.com | ||
2523 | S: Supported | 2164 | S: Supported |
2524 | F: drivers/i2c/busses/i2c-gpio.c | 2165 | F: drivers/i2c/busses/i2c-gpio.c |
2525 | F: include/linux/i2c-gpio.h | 2166 | F: include/linux/i2c-gpio.h |
2526 | 2167 | ||
2527 | GENERIC HDLC (WAN) DRIVERS | 2168 | GENERIC HDLC (WAN) DRIVERS |
2528 | P: Krzysztof Halasa | 2169 | M: Krzysztof Halasa <khc@pm.waw.pl> |
2529 | M: khc@pm.waw.pl | ||
2530 | W: http://www.kernel.org/pub/linux/utils/net/hdlc/ | 2170 | W: http://www.kernel.org/pub/linux/utils/net/hdlc/ |
2531 | S: Maintained | 2171 | S: Maintained |
2532 | F: drivers/net/wan/c101.c | 2172 | F: drivers/net/wan/c101.c |
@@ -2538,16 +2178,14 @@ F: drivers/net/wan/pci200syn.c | |||
2538 | F: drivers/net/wan/wanxl* | 2178 | F: drivers/net/wan/wanxl* |
2539 | 2179 | ||
2540 | GENERIC INCLUDE/ASM HEADER FILES | 2180 | GENERIC INCLUDE/ASM HEADER FILES |
2541 | P: Arnd Bergmann | 2181 | M: Arnd Bergmann <arnd@arndb.de> |
2542 | M: arnd@arndb.de | ||
2543 | L: linux-arch@vger.kernel.org | 2182 | L: linux-arch@vger.kernel.org |
2544 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git | 2183 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git |
2545 | S: Maintained | 2184 | S: Maintained |
2546 | F: include/asm-generic | 2185 | F: include/asm-generic |
2547 | 2186 | ||
2548 | GFS2 FILE SYSTEM | 2187 | GFS2 FILE SYSTEM |
2549 | P: Steven Whitehouse | 2188 | M: Steven Whitehouse <swhiteho@redhat.com> |
2550 | M: swhiteho@redhat.com | ||
2551 | L: cluster-devel@redhat.com | 2189 | L: cluster-devel@redhat.com |
2552 | W: http://sources.redhat.com/cluster/ | 2190 | W: http://sources.redhat.com/cluster/ |
2553 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes.git | 2191 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes.git |
@@ -2558,10 +2196,8 @@ F: fs/gfs2/ | |||
2558 | F: include/linux/gfs2_ondisk.h | 2196 | F: include/linux/gfs2_ondisk.h |
2559 | 2197 | ||
2560 | GIGASET ISDN DRIVERS | 2198 | GIGASET ISDN DRIVERS |
2561 | P: Hansjoerg Lipp | 2199 | M: Hansjoerg Lipp <hjlipp@web.de> |
2562 | M: hjlipp@web.de | 2200 | M: Tilman Schmidt <tilman@imap.cc> |
2563 | P: Tilman Schmidt | ||
2564 | M: tilman@imap.cc | ||
2565 | L: gigaset307x-common@lists.sourceforge.net | 2201 | L: gigaset307x-common@lists.sourceforge.net |
2566 | W: http://gigaset307x.sourceforge.net/ | 2202 | W: http://gigaset307x.sourceforge.net/ |
2567 | S: Maintained | 2203 | S: Maintained |
@@ -2570,8 +2206,7 @@ F: drivers/isdn/gigaset/ | |||
2570 | F: include/linux/gigaset_dev.h | 2206 | F: include/linux/gigaset_dev.h |
2571 | 2207 | ||
2572 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER | 2208 | HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER |
2573 | P: Frank Seidel | 2209 | M: Frank Seidel <frank@f-seidel.de> |
2574 | M: frank@f-seidel.de | ||
2575 | L: lm-sensors@lm-sensors.org | 2210 | L: lm-sensors@lm-sensors.org |
2576 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ | 2211 | W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/ |
2577 | S: Maintained | 2212 | S: Maintained |
@@ -2583,40 +2218,35 @@ S: Odd Fixes | |||
2583 | F: drivers/char/hvc_* | 2218 | F: drivers/char/hvc_* |
2584 | 2219 | ||
2585 | GSPCA FINEPIX SUBDRIVER | 2220 | GSPCA FINEPIX SUBDRIVER |
2586 | P: Frank Zago | 2221 | M: Frank Zago <frank@zago.net> |
2587 | M: frank@zago.net | ||
2588 | L: linux-media@vger.kernel.org | 2222 | L: linux-media@vger.kernel.org |
2589 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2223 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
2590 | S: Maintained | 2224 | S: Maintained |
2591 | F: drivers/media/video/gspca/finepix.c | 2225 | F: drivers/media/video/gspca/finepix.c |
2592 | 2226 | ||
2593 | GSPCA M5602 SUBDRIVER | 2227 | GSPCA M5602 SUBDRIVER |
2594 | P: Erik Andren | 2228 | M: Erik Andren <erik.andren@gmail.com> |
2595 | M: erik.andren@gmail.com | ||
2596 | L: linux-media@vger.kernel.org | 2229 | L: linux-media@vger.kernel.org |
2597 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2230 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
2598 | S: Maintained | 2231 | S: Maintained |
2599 | F: drivers/media/video/gspca/m5602/ | 2232 | F: drivers/media/video/gspca/m5602/ |
2600 | 2233 | ||
2601 | GSPCA PAC207 SONIXB SUBDRIVER | 2234 | GSPCA PAC207 SONIXB SUBDRIVER |
2602 | P: Hans de Goede | 2235 | M: Hans de Goede <hdegoede@redhat.com> |
2603 | M: hdegoede@redhat.com | ||
2604 | L: linux-media@vger.kernel.org | 2236 | L: linux-media@vger.kernel.org |
2605 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2237 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
2606 | S: Maintained | 2238 | S: Maintained |
2607 | F: drivers/media/video/gspca/pac207.c | 2239 | F: drivers/media/video/gspca/pac207.c |
2608 | 2240 | ||
2609 | GSPCA T613 SUBDRIVER | 2241 | GSPCA T613 SUBDRIVER |
2610 | P: Leandro Costantino | 2242 | M: Leandro Costantino <lcostantino@gmail.com> |
2611 | M: lcostantino@gmail.com | ||
2612 | L: linux-media@vger.kernel.org | 2243 | L: linux-media@vger.kernel.org |
2613 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
2614 | S: Maintained | 2245 | S: Maintained |
2615 | F: drivers/media/video/gspca/t613.c | 2246 | F: drivers/media/video/gspca/t613.c |
2616 | 2247 | ||
2617 | GSPCA USB WEBCAM DRIVER | 2248 | GSPCA USB WEBCAM DRIVER |
2618 | P: Jean-Francois Moine | 2249 | M: Jean-Francois Moine <moinejf@free.fr> |
2619 | M: moinejf@free.fr | ||
2620 | W: http://moinejf.free.fr | 2250 | W: http://moinejf.free.fr |
2621 | L: linux-media@vger.kernel.org | 2251 | L: linux-media@vger.kernel.org |
2622 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2252 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -2636,31 +2266,27 @@ F: drivers/char/hw_random/ | |||
2636 | F: include/linux/hw_random.h | 2266 | F: include/linux/hw_random.h |
2637 | 2267 | ||
2638 | HARMONY SOUND DRIVER | 2268 | HARMONY SOUND DRIVER |
2639 | P: Kyle McMartin | 2269 | M: Kyle McMartin <kyle@mcmartin.ca> |
2640 | M: kyle@mcmartin.ca | ||
2641 | L: linux-parisc@vger.kernel.org | 2270 | L: linux-parisc@vger.kernel.org |
2642 | S: Maintained | 2271 | S: Maintained |
2643 | F: sound/parisc/harmony.* | 2272 | F: sound/parisc/harmony.* |
2644 | 2273 | ||
2645 | HAYES ESP SERIAL DRIVER | 2274 | HAYES ESP SERIAL DRIVER |
2646 | P: Andrew J. Robinson | 2275 | M: "Andrew J. Robinson" <arobinso@nyx.net> |
2647 | M: arobinso@nyx.net | ||
2648 | W: http://www.nyx.net/~arobinso | 2276 | W: http://www.nyx.net/~arobinso |
2649 | S: Maintained | 2277 | S: Maintained |
2650 | F: Documentation/serial/hayes-esp.txt | 2278 | F: Documentation/serial/hayes-esp.txt |
2651 | F: drivers/char/esp.c | 2279 | F: drivers/char/esp.c |
2652 | 2280 | ||
2653 | HEWLETT-PACKARD SMART2 RAID DRIVER | 2281 | HEWLETT-PACKARD SMART2 RAID DRIVER |
2654 | P: Chirag Kantharia | 2282 | M: Chirag Kantharia <chirag.kantharia@hp.com> |
2655 | M: chirag.kantharia@hp.com | ||
2656 | L: iss_storagedev@hp.com | 2283 | L: iss_storagedev@hp.com |
2657 | S: Maintained | 2284 | S: Maintained |
2658 | F: Documentation/blockdev/cpqarray.txt | 2285 | F: Documentation/blockdev/cpqarray.txt |
2659 | F: drivers/block/cpqarray.* | 2286 | F: drivers/block/cpqarray.* |
2660 | 2287 | ||
2661 | HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) | 2288 | HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) |
2662 | P: Mike Miller | 2289 | M: Mike Miller <mike.miller@hp.com> |
2663 | M: mike.miller@hp.com | ||
2664 | L: iss_storagedev@hp.com | 2290 | L: iss_storagedev@hp.com |
2665 | S: Supported | 2291 | S: Supported |
2666 | F: Documentation/blockdev/cciss.txt | 2292 | F: Documentation/blockdev/cciss.txt |
@@ -2668,25 +2294,21 @@ F: drivers/block/cciss* | |||
2668 | F: include/linux/cciss_ioctl.h | 2294 | F: include/linux/cciss_ioctl.h |
2669 | 2295 | ||
2670 | HFS FILESYSTEM | 2296 | HFS FILESYSTEM |
2671 | P: Roman Zippel | 2297 | M: Roman Zippel <zippel@linux-m68k.org> |
2672 | M: zippel@linux-m68k.org | ||
2673 | S: Maintained | 2298 | S: Maintained |
2674 | F: Documentation/filesystems/hfs.txt | 2299 | F: Documentation/filesystems/hfs.txt |
2675 | F: fs/hfs/ | 2300 | F: fs/hfs/ |
2676 | 2301 | ||
2677 | HGA FRAMEBUFFER DRIVER | 2302 | HGA FRAMEBUFFER DRIVER |
2678 | P: Ferenc Bakonyi | 2303 | M: Ferenc Bakonyi <fero@drama.obuda.kando.hu> |
2679 | M: fero@drama.obuda.kando.hu | ||
2680 | L: linux-nvidia@lists.surfsouth.com | 2304 | L: linux-nvidia@lists.surfsouth.com |
2681 | W: http://drama.obuda.kando.hu/~fero/cgi-bin/hgafb.shtml | 2305 | W: http://drama.obuda.kando.hu/~fero/cgi-bin/hgafb.shtml |
2682 | S: Maintained | 2306 | S: Maintained |
2683 | F: drivers/video/hgafb.c | 2307 | F: drivers/video/hgafb.c |
2684 | 2308 | ||
2685 | HIBERNATION (aka Software Suspend, aka swsusp) | 2309 | HIBERNATION (aka Software Suspend, aka swsusp) |
2686 | P: Pavel Machek | 2310 | M: Pavel Machek <pavel@ucw.cz> |
2687 | M: pavel@ucw.cz | 2311 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
2688 | P: Rafael J. Wysocki | ||
2689 | M: rjw@sisk.pl | ||
2690 | L: linux-pm@lists.linux-foundation.org | 2312 | L: linux-pm@lists.linux-foundation.org |
2691 | S: Supported | 2313 | S: Supported |
2692 | F: arch/x86/power/ | 2314 | F: arch/x86/power/ |
@@ -2698,8 +2320,7 @@ F: include/linux/pm.h | |||
2698 | F: arch/*/include/asm/suspend*.h | 2320 | F: arch/*/include/asm/suspend*.h |
2699 | 2321 | ||
2700 | HID CORE LAYER | 2322 | HID CORE LAYER |
2701 | P: Jiri Kosina | 2323 | M: Jiri Kosina <jkosina@suse.cz> |
2702 | M: jkosina@suse.cz | ||
2703 | L: linux-input@vger.kernel.org | 2324 | L: linux-input@vger.kernel.org |
2704 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git | 2325 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git |
2705 | S: Maintained | 2326 | S: Maintained |
@@ -2707,16 +2328,14 @@ F: drivers/hid/ | |||
2707 | F: include/linux/hid* | 2328 | F: include/linux/hid* |
2708 | 2329 | ||
2709 | HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS | 2330 | HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS |
2710 | P: Thomas Gleixner | 2331 | M: Thomas Gleixner <tglx@linutronix.de> |
2711 | M: tglx@linutronix.de | ||
2712 | S: Maintained | 2332 | S: Maintained |
2713 | F: Documentation/timers/ | 2333 | F: Documentation/timers/ |
2714 | F: kernel/hrtimer.c | 2334 | F: kernel/hrtimer.c |
2715 | F: include/linux/hrtimer.h | 2335 | F: include/linux/hrtimer.h |
2716 | 2336 | ||
2717 | HIGH-SPEED SCC DRIVER FOR AX.25 | 2337 | HIGH-SPEED SCC DRIVER FOR AX.25 |
2718 | P: Klaus Kudielka | 2338 | M: Klaus Kudielka <klaus.kudielka@ieee.org> |
2719 | M: klaus.kudielka@ieee.org | ||
2720 | L: linux-hams@vger.kernel.org | 2339 | L: linux-hams@vger.kernel.org |
2721 | W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/ | 2340 | W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/ |
2722 | S: Maintained | 2341 | S: Maintained |
@@ -2724,16 +2343,14 @@ F: drivers/net/hamradio/dmascc.c | |||
2724 | F: drivers/net/hamradio/scc.c | 2343 | F: drivers/net/hamradio/scc.c |
2725 | 2344 | ||
2726 | HIGHPOINT ROCKETRAID 3xxx RAID DRIVER | 2345 | HIGHPOINT ROCKETRAID 3xxx RAID DRIVER |
2727 | P: HighPoint Linux Team | 2346 | M: HighPoint Linux Team <linux@highpoint-tech.com> |
2728 | M: linux@highpoint-tech.com | ||
2729 | W: http://www.highpoint-tech.com | 2347 | W: http://www.highpoint-tech.com |
2730 | S: Supported | 2348 | S: Supported |
2731 | F: Documentation/scsi/hptiop.txt | 2349 | F: Documentation/scsi/hptiop.txt |
2732 | F: drivers/scsi/hptiop.c | 2350 | F: drivers/scsi/hptiop.c |
2733 | 2351 | ||
2734 | HIPPI | 2352 | HIPPI |
2735 | P: Jes Sorensen | 2353 | M: Jes Sorensen <jes@trained-monkey.org> |
2736 | M: jes@trained-monkey.org | ||
2737 | L: linux-hippi@sunsite.dk | 2354 | L: linux-hippi@sunsite.dk |
2738 | S: Maintained | 2355 | S: Maintained |
2739 | F: include/linux/hippidevice.h | 2356 | F: include/linux/hippidevice.h |
@@ -2741,8 +2358,7 @@ F: include/linux/if_hippi.h | |||
2741 | F: net/802/hippi.c | 2358 | F: net/802/hippi.c |
2742 | 2359 | ||
2743 | HOST AP DRIVER | 2360 | HOST AP DRIVER |
2744 | P: Jouni Malinen | 2361 | M: Jouni Malinen <j@w1.fi> |
2745 | M: j@w1.fi | ||
2746 | L: hostap@shmoo.com (subscribers-only) | 2362 | L: hostap@shmoo.com (subscribers-only) |
2747 | L: linux-wireless@vger.kernel.org | 2363 | L: linux-wireless@vger.kernel.org |
2748 | W: http://hostap.epitest.fi/ | 2364 | W: http://hostap.epitest.fi/ |
@@ -2750,82 +2366,69 @@ S: Maintained | |||
2750 | F: drivers/net/wireless/hostap/ | 2366 | F: drivers/net/wireless/hostap/ |
2751 | 2367 | ||
2752 | HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER | 2368 | HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER |
2753 | P: Carlos Corbacho | 2369 | M: Carlos Corbacho <carlos@strangeworlds.co.uk> |
2754 | M: carlos@strangeworlds.co.uk | ||
2755 | S: Odd Fixes | 2370 | S: Odd Fixes |
2756 | F: drivers/platform/x86/tc1100-wmi.c | 2371 | F: drivers/platform/x86/tc1100-wmi.c |
2757 | 2372 | ||
2758 | HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series | 2373 | HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series |
2759 | P: Jaroslav Kysela | 2374 | M: Jaroslav Kysela <perex@perex.cz> |
2760 | M: perex@perex.cz | ||
2761 | S: Maintained | 2375 | S: Maintained |
2762 | F: drivers/net/hp100.* | 2376 | F: drivers/net/hp100.* |
2763 | 2377 | ||
2764 | HPET: High Precision Event Timers driver | 2378 | HPET: High Precision Event Timers driver |
2765 | P: Clemens Ladisch | 2379 | M: Clemens Ladisch <clemens@ladisch.de> |
2766 | M: clemens@ladisch.de | ||
2767 | S: Maintained | 2380 | S: Maintained |
2768 | F: Documentation/timers/hpet.txt | 2381 | F: Documentation/timers/hpet.txt |
2769 | F: drivers/char/hpet.c | 2382 | F: drivers/char/hpet.c |
2770 | F: include/linux/hpet.h | 2383 | F: include/linux/hpet.h |
2771 | 2384 | ||
2772 | HPET: i386 | 2385 | HPET: i386 |
2773 | P: Venkatesh Pallipadi (Venki) | 2386 | M: "Venkatesh Pallipadi (Venki)" <venkatesh.pallipadi@intel.com> |
2774 | M: venkatesh.pallipadi@intel.com | ||
2775 | S: Maintained | 2387 | S: Maintained |
2776 | F: arch/x86/kernel/hpet.c | 2388 | F: arch/x86/kernel/hpet.c |
2777 | F: arch/x86/include/asm/hpet.h | 2389 | F: arch/x86/include/asm/hpet.h |
2778 | 2390 | ||
2779 | HPET: x86_64 | 2391 | HPET: x86_64 |
2780 | P: Vojtech Pavlik | 2392 | M: Vojtech Pavlik <vojtech@suse.cz> |
2781 | M: vojtech@suse.cz | ||
2782 | S: Maintained | 2393 | S: Maintained |
2783 | 2394 | ||
2784 | HPET: ACPI | 2395 | HPET: ACPI |
2785 | P: Bob Picco | 2396 | M: Bob Picco <bob.picco@hp.com> |
2786 | M: bob.picco@hp.com | ||
2787 | S: Maintained | 2397 | S: Maintained |
2788 | F: drivers/char/hpet.c | 2398 | F: drivers/char/hpet.c |
2789 | 2399 | ||
2790 | HPFS FILESYSTEM | 2400 | HPFS FILESYSTEM |
2791 | P: Mikulas Patocka | 2401 | M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> |
2792 | M: mikulas@artax.karlin.mff.cuni.cz | ||
2793 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi | 2402 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi |
2794 | S: Maintained | 2403 | S: Maintained |
2795 | F: fs/hpfs/ | 2404 | F: fs/hpfs/ |
2796 | 2405 | ||
2797 | HSO 3G MODEM DRIVER | 2406 | HSO 3G MODEM DRIVER |
2798 | P: Jan Dumon | 2407 | M: Jan Dumon <j.dumon@option.com> |
2799 | M: j.dumon@option.com | ||
2800 | W: http://www.pharscape.org | 2408 | W: http://www.pharscape.org |
2801 | S: Maintained | 2409 | S: Maintained |
2802 | F: drivers/net/usb/hso.c | 2410 | F: drivers/net/usb/hso.c |
2803 | 2411 | ||
2804 | HTCPEN TOUCHSCREEN DRIVER | 2412 | HTCPEN TOUCHSCREEN DRIVER |
2805 | P: Pau Oliva Fora | 2413 | M: Pau Oliva Fora <pof@eslack.org> |
2806 | M: pof@eslack.org | ||
2807 | L: linux-input@vger.kernel.org | 2414 | L: linux-input@vger.kernel.org |
2808 | S: Maintained | 2415 | S: Maintained |
2809 | F: drivers/input/touchscreen/htcpen.c | 2416 | F: drivers/input/touchscreen/htcpen.c |
2810 | 2417 | ||
2811 | HUGETLB FILESYSTEM | 2418 | HUGETLB FILESYSTEM |
2812 | P: William Irwin | 2419 | M: William Irwin <wli@holomorphy.com> |
2813 | M: wli@holomorphy.com | ||
2814 | S: Maintained | 2420 | S: Maintained |
2815 | F: fs/hugetlbfs/ | 2421 | F: fs/hugetlbfs/ |
2816 | 2422 | ||
2817 | I2C/SMBUS STUB DRIVER | 2423 | I2C/SMBUS STUB DRIVER |
2818 | P: Mark M. Hoffman | 2424 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
2819 | M: mhoffman@lightlink.com | ||
2820 | L: linux-i2c@vger.kernel.org | 2425 | L: linux-i2c@vger.kernel.org |
2821 | S: Maintained | 2426 | S: Maintained |
2822 | F: drivers/i2c/busses/i2c-stub.c | 2427 | F: drivers/i2c/busses/i2c-stub.c |
2823 | 2428 | ||
2824 | I2C SUBSYSTEM | 2429 | I2C SUBSYSTEM |
2825 | P: Jean Delvare (PC drivers, core) | 2430 | M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org> |
2826 | M: khali@linux-fr.org | 2431 | M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org> |
2827 | P: Ben Dooks (embedded platforms) | ||
2828 | M: ben-linux@fluff.org | ||
2829 | L: linux-i2c@vger.kernel.org | 2432 | L: linux-i2c@vger.kernel.org |
2830 | W: http://i2c.wiki.kernel.org/ | 2433 | W: http://i2c.wiki.kernel.org/ |
2831 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ | 2434 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ |
@@ -2837,30 +2440,25 @@ F: include/linux/i2c-dev.h | |||
2837 | F: include/linux/i2c-id.h | 2440 | F: include/linux/i2c-id.h |
2838 | 2441 | ||
2839 | I2C-TINY-USB DRIVER | 2442 | I2C-TINY-USB DRIVER |
2840 | P: Till Harbaum | 2443 | M: Till Harbaum <till@harbaum.org> |
2841 | M: till@harbaum.org | ||
2842 | L: linux-i2c@vger.kernel.org | 2444 | L: linux-i2c@vger.kernel.org |
2843 | W: http://www.harbaum.org/till/i2c_tiny_usb | 2445 | W: http://www.harbaum.org/till/i2c_tiny_usb |
2844 | S: Maintained | 2446 | S: Maintained |
2845 | F: drivers/i2c/busses/i2c-tiny-usb.c | 2447 | F: drivers/i2c/busses/i2c-tiny-usb.c |
2846 | 2448 | ||
2847 | i386 BOOT CODE | 2449 | i386 BOOT CODE |
2848 | P: H. Peter Anvin | 2450 | M: "H. Peter Anvin" <hpa@zytor.com> |
2849 | M: hpa@zytor.com | ||
2850 | S: Maintained | 2451 | S: Maintained |
2851 | F: arch/x86/boot/ | 2452 | F: arch/x86/boot/ |
2852 | 2453 | ||
2853 | i386 SETUP CODE / CPU ERRATA WORKAROUNDS | 2454 | i386 SETUP CODE / CPU ERRATA WORKAROUNDS |
2854 | P: H. Peter Anvin | 2455 | M: "H. Peter Anvin" <hpa@zytor.com> |
2855 | M: hpa@zytor.com | ||
2856 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git | 2456 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git |
2857 | S: Maintained | 2457 | S: Maintained |
2858 | 2458 | ||
2859 | IA64 (Itanium) PLATFORM | 2459 | IA64 (Itanium) PLATFORM |
2860 | P: Tony Luck | 2460 | M: Tony Luck <tony.luck@intel.com> |
2861 | P: Fenghua Yu | 2461 | M: Fenghua Yu <fenghua.yu@intel.com> |
2862 | M: tony.luck@intel.com | ||
2863 | M: fenghua.yu@intel.com | ||
2864 | L: linux-ia64@vger.kernel.org | 2462 | L: linux-ia64@vger.kernel.org |
2865 | W: http://www.ia64-linux.org/ | 2463 | W: http://www.ia64-linux.org/ |
2866 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git | 2464 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git |
@@ -2868,29 +2466,25 @@ S: Maintained | |||
2868 | F: arch/ia64/ | 2466 | F: arch/ia64/ |
2869 | 2467 | ||
2870 | IBM MCA SCSI SUBSYSTEM DRIVER | 2468 | IBM MCA SCSI SUBSYSTEM DRIVER |
2871 | P: Michael Lang | 2469 | M: Michael Lang <langa2@kph.uni-mainz.de> |
2872 | M: langa2@kph.uni-mainz.de | ||
2873 | W: http://www.uni-mainz.de/~langm000/linux.html | 2470 | W: http://www.uni-mainz.de/~langm000/linux.html |
2874 | S: Maintained | 2471 | S: Maintained |
2875 | F: drivers/scsi/ibmmca.c | 2472 | F: drivers/scsi/ibmmca.c |
2876 | 2473 | ||
2877 | IBM Power Linux RAID adapter | 2474 | IBM Power Linux RAID adapter |
2878 | P: Brian King | 2475 | M: Brian King <brking@us.ibm.com> |
2879 | M: brking@us.ibm.com | ||
2880 | S: Supported | 2476 | S: Supported |
2881 | F: drivers/scsi/ipr.* | 2477 | F: drivers/scsi/ipr.* |
2882 | 2478 | ||
2883 | IBM ServeRAID RAID DRIVER | 2479 | IBM ServeRAID RAID DRIVER |
2884 | P: Jack Hammer | 2480 | P: Jack Hammer |
2885 | P: Dave Jeffery | 2481 | M: Dave Jeffery <ipslinux@adaptec.com> |
2886 | M: ipslinux@adaptec.com | ||
2887 | W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html | 2482 | W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html |
2888 | S: Supported | 2483 | S: Supported |
2889 | F: drivers/scsi/ips.* | 2484 | F: drivers/scsi/ips.* |
2890 | 2485 | ||
2891 | IDE SUBSYSTEM | 2486 | IDE SUBSYSTEM |
2892 | P: David S. Miller | 2487 | M: "David S. Miller" <davem@davemloft.net> |
2893 | M: davem@davemloft.net | ||
2894 | L: linux-ide@vger.kernel.org | 2488 | L: linux-ide@vger.kernel.org |
2895 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6.git | 2489 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6.git |
2896 | S: Maintained | 2490 | S: Maintained |
@@ -2899,25 +2493,21 @@ F: drivers/ide/ | |||
2899 | F: include/linux/ide.h | 2493 | F: include/linux/ide.h |
2900 | 2494 | ||
2901 | IDE/ATAPI DRIVERS | 2495 | IDE/ATAPI DRIVERS |
2902 | P: Borislav Petkov | 2496 | M: Borislav Petkov <petkovbb@gmail.com> |
2903 | M: petkovbb@gmail.com | ||
2904 | L: linux-ide@vger.kernel.org | 2497 | L: linux-ide@vger.kernel.org |
2905 | S: Maintained | 2498 | S: Maintained |
2906 | F: Documentation/cdrom/ide-cd | 2499 | F: Documentation/cdrom/ide-cd |
2907 | F: drivers/ide/ide-cd* | 2500 | F: drivers/ide/ide-cd* |
2908 | 2501 | ||
2909 | IDLE-I7300 | 2502 | IDLE-I7300 |
2910 | P: Andy Henroid | 2503 | M: Andy Henroid <andrew.d.henroid@intel.com> |
2911 | M: andrew.d.henroid@intel.com | ||
2912 | L: linux-pm@lists.linux-foundation.org | 2504 | L: linux-pm@lists.linux-foundation.org |
2913 | S: Supported | 2505 | S: Supported |
2914 | F: drivers/idle/i7300_idle.c | 2506 | F: drivers/idle/i7300_idle.c |
2915 | 2507 | ||
2916 | IEEE 1394 SUBSYSTEM | 2508 | IEEE 1394 SUBSYSTEM |
2917 | P: Ben Collins | 2509 | M: Ben Collins <ben.collins@ubuntu.com> |
2918 | M: ben.collins@ubuntu.com | 2510 | M: Stefan Richter <stefanr@s5r6.in-berlin.de> |
2919 | P: Stefan Richter | ||
2920 | M: stefanr@s5r6.in-berlin.de | ||
2921 | L: linux1394-devel@lists.sourceforge.net | 2511 | L: linux1394-devel@lists.sourceforge.net |
2922 | W: http://www.linux1394.org/ | 2512 | W: http://www.linux1394.org/ |
2923 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git | 2513 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git |
@@ -2925,19 +2515,15 @@ S: Maintained | |||
2925 | F: drivers/ieee1394/ | 2515 | F: drivers/ieee1394/ |
2926 | 2516 | ||
2927 | IEEE 1394 RAW I/O DRIVER | 2517 | IEEE 1394 RAW I/O DRIVER |
2928 | P: Dan Dennedy | 2518 | M: Dan Dennedy <dan@dennedy.org> |
2929 | M: dan@dennedy.org | 2519 | M: Stefan Richter <stefanr@s5r6.in-berlin.de> |
2930 | P: Stefan Richter | ||
2931 | M: stefanr@s5r6.in-berlin.de | ||
2932 | L: linux1394-devel@lists.sourceforge.net | 2520 | L: linux1394-devel@lists.sourceforge.net |
2933 | S: Maintained | 2521 | S: Maintained |
2934 | F: drivers/ieee1394/raw1394* | 2522 | F: drivers/ieee1394/raw1394* |
2935 | 2523 | ||
2936 | IEEE 802.15.4 SUBSYSTEM | 2524 | IEEE 802.15.4 SUBSYSTEM |
2937 | P: Dmitry Eremin-Solenikov | 2525 | M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> |
2938 | M: dbaryshkov@gmail.com | 2526 | M: Sergey Lapin <slapin@ossfans.org> |
2939 | P: Sergey Lapin | ||
2940 | M: slapin@ossfans.org | ||
2941 | L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) | 2527 | L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) |
2942 | W: http://apps.sourceforge.net/trac/linux-zigbee | 2528 | W: http://apps.sourceforge.net/trac/linux-zigbee |
2943 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git | 2529 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git |
@@ -2946,8 +2532,7 @@ F: net/ieee802154/ | |||
2946 | F: drivers/ieee802154/ | 2532 | F: drivers/ieee802154/ |
2947 | 2533 | ||
2948 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) | 2534 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) |
2949 | P: Mimi Zohar | 2535 | M: Mimi Zohar <zohar@us.ibm.com> |
2950 | M: zohar@us.ibm.com | ||
2951 | S: Supported | 2536 | S: Supported |
2952 | F: security/integrity/ima/ | 2537 | F: security/integrity/ima/ |
2953 | 2538 | ||
@@ -2957,12 +2542,9 @@ S: Orphan | |||
2957 | F: drivers/video/imsttfb.c | 2542 | F: drivers/video/imsttfb.c |
2958 | 2543 | ||
2959 | INFINIBAND SUBSYSTEM | 2544 | INFINIBAND SUBSYSTEM |
2960 | P: Roland Dreier | 2545 | M: Roland Dreier <rolandd@cisco.com> |
2961 | M: rolandd@cisco.com | 2546 | M: Sean Hefty <sean.hefty@intel.com> |
2962 | P: Sean Hefty | 2547 | M: Hal Rosenstock <hal.rosenstock@gmail.com> |
2963 | M: sean.hefty@intel.com | ||
2964 | P: Hal Rosenstock | ||
2965 | M: hal.rosenstock@gmail.com | ||
2966 | L: general@lists.openfabrics.org (moderated for non-subscribers) | 2548 | L: general@lists.openfabrics.org (moderated for non-subscribers) |
2967 | W: http://www.openib.org/ | 2549 | W: http://www.openib.org/ |
2968 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git | 2550 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git |
@@ -2972,65 +2554,55 @@ F: drivers/infiniband/ | |||
2972 | F: include/linux/if_infiniband.h | 2554 | F: include/linux/if_infiniband.h |
2973 | 2555 | ||
2974 | INOTIFY | 2556 | INOTIFY |
2975 | P: John McCutchan | 2557 | M: John McCutchan <john@johnmccutchan.com> |
2976 | M: john@johnmccutchan.com | 2558 | M: Robert Love <rlove@rlove.org> |
2977 | P: Robert Love | 2559 | M: Eric Paris <eparis@parisplace.org> |
2978 | M: rlove@rlove.org | ||
2979 | P: Eric Paris | ||
2980 | M: eparis@parisplace.org | ||
2981 | S: Maintained | 2560 | S: Maintained |
2982 | F: Documentation/filesystems/inotify.txt | 2561 | F: Documentation/filesystems/inotify.txt |
2983 | F: fs/notify/inotify/ | 2562 | F: fs/notify/inotify/ |
2984 | F: include/linux/inotify.h | 2563 | F: include/linux/inotify.h |
2985 | 2564 | ||
2986 | INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS | 2565 | INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS |
2987 | P: Dmitry Torokhov | 2566 | M: Dmitry Torokhov <dmitry.torokhov@gmail.com> |
2988 | M: dmitry.torokhov@gmail.com | 2567 | M: Dmitry Torokhov <dtor@mail.ru> |
2989 | M: dtor@mail.ru | ||
2990 | L: linux-input@vger.kernel.org | 2568 | L: linux-input@vger.kernel.org |
2991 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git | 2569 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git |
2992 | S: Maintained | 2570 | S: Maintained |
2993 | F: drivers/input/ | 2571 | F: drivers/input/ |
2994 | 2572 | ||
2995 | INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) | 2573 | INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) |
2996 | P: Sylvain Meyer | 2574 | M: Sylvain Meyer <sylvain.meyer@worldonline.fr> |
2997 | M: sylvain.meyer@worldonline.fr | ||
2998 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 2575 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
2999 | S: Maintained | 2576 | S: Maintained |
3000 | F: Documentation/fb/intelfb.txt | 2577 | F: Documentation/fb/intelfb.txt |
3001 | F: drivers/video/intelfb/ | 2578 | F: drivers/video/intelfb/ |
3002 | 2579 | ||
3003 | INTEL 810/815 FRAMEBUFFER DRIVER | 2580 | INTEL 810/815 FRAMEBUFFER DRIVER |
3004 | P: Antonino Daplas | 2581 | M: Antonino Daplas <adaplas@gmail.com> |
3005 | M: adaplas@gmail.com | ||
3006 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 2582 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
3007 | S: Maintained | 2583 | S: Maintained |
3008 | F: drivers/video/i810/ | 2584 | F: drivers/video/i810/ |
3009 | 2585 | ||
3010 | INTEL MENLOW THERMAL DRIVER | 2586 | INTEL MENLOW THERMAL DRIVER |
3011 | P: Sujith Thomas | 2587 | M: Sujith Thomas <sujith.thomas@intel.com> |
3012 | M: sujith.thomas@intel.com | ||
3013 | L: linux-acpi@vger.kernel.org | 2588 | L: linux-acpi@vger.kernel.org |
3014 | W: http://www.lesswatts.org/projects/acpi/ | 2589 | W: http://www.lesswatts.org/projects/acpi/ |
3015 | S: Supported | 2590 | S: Supported |
3016 | F: drivers/platform/x86/intel_menlow.c | 2591 | F: drivers/platform/x86/intel_menlow.c |
3017 | 2592 | ||
3018 | INTEL IA32 MICROCODE UPDATE SUPPORT | 2593 | INTEL IA32 MICROCODE UPDATE SUPPORT |
3019 | P: Tigran Aivazian | 2594 | M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
3020 | M: tigran@aivazian.fsnet.co.uk | ||
3021 | S: Maintained | 2595 | S: Maintained |
3022 | F: arch/x86/kernel/microcode_core.c | 2596 | F: arch/x86/kernel/microcode_core.c |
3023 | F: arch/x86/kernel/microcode_intel.c | 2597 | F: arch/x86/kernel/microcode_intel.c |
3024 | 2598 | ||
3025 | INTEL I/OAT DMA DRIVER | 2599 | INTEL I/OAT DMA DRIVER |
3026 | P: Maciej Sosnowski | 2600 | M: Maciej Sosnowski <maciej.sosnowski@intel.com> |
3027 | M: maciej.sosnowski@intel.com | ||
3028 | S: Supported | 2601 | S: Supported |
3029 | F: drivers/dma/ioat* | 2602 | F: drivers/dma/ioat* |
3030 | 2603 | ||
3031 | INTEL IOMMU (VT-d) | 2604 | INTEL IOMMU (VT-d) |
3032 | P: David Woodhouse | 2605 | M: David Woodhouse <dwmw2@infradead.org> |
3033 | M: dwmw2@infradead.org | ||
3034 | L: iommu@lists.linux-foundation.org | 2606 | L: iommu@lists.linux-foundation.org |
3035 | T: git git://git.infradead.org/iommu-2.6.git | 2607 | T: git git://git.infradead.org/iommu-2.6.git |
3036 | S: Supported | 2608 | S: Supported |
@@ -3038,14 +2610,12 @@ F: drivers/pci/intel-iommu.c | |||
3038 | F: include/linux/intel-iommu.h | 2610 | F: include/linux/intel-iommu.h |
3039 | 2611 | ||
3040 | INTEL IOP-ADMA DMA DRIVER | 2612 | INTEL IOP-ADMA DMA DRIVER |
3041 | P: Dan Williams | 2613 | M: Dan Williams <dan.j.williams@intel.com> |
3042 | M: dan.j.williams@intel.com | ||
3043 | S: Supported | 2614 | S: Supported |
3044 | F: drivers/dma/iop-adma.c | 2615 | F: drivers/dma/iop-adma.c |
3045 | 2616 | ||
3046 | INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT | 2617 | INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT |
3047 | P: Krzysztof Halasa | 2618 | M: Krzysztof Halasa <khc@pm.waw.pl> |
3048 | M: khc@pm.waw.pl | ||
3049 | S: Maintained | 2619 | S: Maintained |
3050 | F: arch/arm/mach-ixp4xx/include/mach/qmgr.h | 2620 | F: arch/arm/mach-ixp4xx/include/mach/qmgr.h |
3051 | F: arch/arm/mach-ixp4xx/include/mach/npe.h | 2621 | F: arch/arm/mach-ixp4xx/include/mach/npe.h |
@@ -3055,29 +2625,22 @@ F: drivers/net/arm/ixp4xx_eth.c | |||
3055 | F: drivers/net/wan/ixp4xx_hss.c | 2625 | F: drivers/net/wan/ixp4xx_hss.c |
3056 | 2626 | ||
3057 | INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT | 2627 | INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT |
3058 | P: Deepak Saxena | 2628 | M: Deepak Saxena <dsaxena@plexity.net> |
3059 | M: dsaxena@plexity.net | ||
3060 | S: Maintained | 2629 | S: Maintained |
3061 | F: drivers/char/hw_random/ixp4xx-rng.c | 2630 | F: drivers/char/hw_random/ixp4xx-rng.c |
3062 | 2631 | ||
3063 | INTEL IXP2000 ETHERNET DRIVER | 2632 | INTEL IXP2000 ETHERNET DRIVER |
3064 | P: Lennert Buytenhek | 2633 | M: Lennert Buytenhek <kernel@wantstofly.org> |
3065 | M: kernel@wantstofly.org | ||
3066 | L: netdev@vger.kernel.org | 2634 | L: netdev@vger.kernel.org |
3067 | S: Maintained | 2635 | S: Maintained |
3068 | F: drivers/net/ixp2000/ | 2636 | F: drivers/net/ixp2000/ |
3069 | 2637 | ||
3070 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) | 2638 | INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) |
3071 | P: Jeff Kirsher | 2639 | M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> |
3072 | M: jeffrey.t.kirsher@intel.com | 2640 | M: Jesse Brandeburg <jesse.brandeburg@intel.com> |
3073 | P: Jesse Brandeburg | 2641 | M: Bruce Allan <bruce.w.allan@intel.com> |
3074 | M: jesse.brandeburg@intel.com | 2642 | M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> |
3075 | P: Bruce Allan | 2643 | M: John Ronciak <john.ronciak@intel.com> |
3076 | M: bruce.w.allan@intel.com | ||
3077 | P: PJ Waskiewicz | ||
3078 | M: peter.p.waskiewicz.jr@intel.com | ||
3079 | P: John Ronciak | ||
3080 | M: john.ronciak@intel.com | ||
3081 | L: e1000-devel@lists.sourceforge.net | 2644 | L: e1000-devel@lists.sourceforge.net |
3082 | W: http://e1000.sourceforge.net/ | 2645 | W: http://e1000.sourceforge.net/ |
3083 | S: Supported | 2646 | S: Supported |
@@ -3089,12 +2652,9 @@ F: drivers/net/ixgb/ | |||
3089 | F: drivers/net/ixgbe/ | 2652 | F: drivers/net/ixgbe/ |
3090 | 2653 | ||
3091 | INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT | 2654 | INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT |
3092 | P: Zhu Yi | 2655 | M: Zhu Yi <yi.zhu@intel.com> |
3093 | M: yi.zhu@intel.com | 2656 | M: James Ketrenos <jketreno@linux.intel.com> |
3094 | P: James Ketrenos | 2657 | M: Reinette Chatre <reinette.chatre@intel.com> |
3095 | M: jketreno@linux.intel.com | ||
3096 | P: Reinette Chatre | ||
3097 | M: reinette.chatre@intel.com | ||
3098 | L: linux-wireless@vger.kernel.org | 2658 | L: linux-wireless@vger.kernel.org |
3099 | L: ipw2100-devel@lists.sourceforge.net | 2659 | L: ipw2100-devel@lists.sourceforge.net |
3100 | W: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel | 2660 | W: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel |
@@ -3104,12 +2664,9 @@ F: Documentation/networking/README.ipw2100 | |||
3104 | F: drivers/net/wireless/ipw2x00/ipw2100.* | 2664 | F: drivers/net/wireless/ipw2x00/ipw2100.* |
3105 | 2665 | ||
3106 | INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT | 2666 | INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT |
3107 | P: Zhu Yi | 2667 | M: Zhu Yi <yi.zhu@intel.com> |
3108 | M: yi.zhu@intel.com | 2668 | M: James Ketrenos <jketreno@linux.intel.com> |
3109 | P: James Ketrenos | 2669 | M: Reinette Chatre <reinette.chatre@intel.com> |
3110 | M: jketreno@linux.intel.com | ||
3111 | P: Reinette Chatre | ||
3112 | M: reinette.chatre@intel.com | ||
3113 | L: linux-wireless@vger.kernel.org | 2670 | L: linux-wireless@vger.kernel.org |
3114 | L: ipw2100-devel@lists.sourceforge.net | 2671 | L: ipw2100-devel@lists.sourceforge.net |
3115 | W: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel | 2672 | W: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel |
@@ -3119,8 +2676,7 @@ F: Documentation/networking/README.ipw2200 | |||
3119 | F: drivers/net/wireless/ipw2x00/ipw2200.* | 2676 | F: drivers/net/wireless/ipw2x00/ipw2200.* |
3120 | 2677 | ||
3121 | INTEL WIRELESS WIMAX CONNECTION 2400 | 2678 | INTEL WIRELESS WIMAX CONNECTION 2400 |
3122 | P: Inaky Perez-Gonzalez | 2679 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
3123 | M: inaky.perez-gonzalez@intel.com | ||
3124 | M: linux-wimax@intel.com | 2680 | M: linux-wimax@intel.com |
3125 | L: wimax@linuxwimax.org | 2681 | L: wimax@linuxwimax.org |
3126 | S: Supported | 2682 | S: Supported |
@@ -3130,10 +2686,8 @@ F: drivers/net/wimax/i2400m/ | |||
3130 | F: include/linux/wimax/i2400m.h | 2686 | F: include/linux/wimax/i2400m.h |
3131 | 2687 | ||
3132 | INTEL WIRELESS WIFI LINK (iwlwifi) | 2688 | INTEL WIRELESS WIFI LINK (iwlwifi) |
3133 | P: Zhu Yi | 2689 | M: Zhu Yi <yi.zhu@intel.com> |
3134 | M: yi.zhu@intel.com | 2690 | M: Reinette Chatre <reinette.chatre@intel.com> |
3135 | P: Reinette Chatre | ||
3136 | M: reinette.chatre@intel.com | ||
3137 | L: linux-wireless@vger.kernel.org | 2691 | L: linux-wireless@vger.kernel.org |
3138 | L: ipw3945-devel@lists.sourceforge.net | 2692 | L: ipw3945-devel@lists.sourceforge.net |
3139 | W: http://intellinuxwireless.org | 2693 | W: http://intellinuxwireless.org |
@@ -3142,47 +2696,39 @@ S: Supported | |||
3142 | F: drivers/net/wireless/iwlwifi/ | 2696 | F: drivers/net/wireless/iwlwifi/ |
3143 | 2697 | ||
3144 | IOC3 ETHERNET DRIVER | 2698 | IOC3 ETHERNET DRIVER |
3145 | P: Ralf Baechle | 2699 | M: Ralf Baechle <ralf@linux-mips.org> |
3146 | M: ralf@linux-mips.org | ||
3147 | L: linux-mips@linux-mips.org | 2700 | L: linux-mips@linux-mips.org |
3148 | S: Maintained | 2701 | S: Maintained |
3149 | F: drivers/net/ioc3-eth.c | 2702 | F: drivers/net/ioc3-eth.c |
3150 | 2703 | ||
3151 | IOC3 SERIAL DRIVER | 2704 | IOC3 SERIAL DRIVER |
3152 | P: Pat Gefre | 2705 | M: Pat Gefre <pfg@sgi.com> |
3153 | M: pfg@sgi.com | ||
3154 | L: linux-mips@linux-mips.org | 2706 | L: linux-mips@linux-mips.org |
3155 | S: Maintained | 2707 | S: Maintained |
3156 | F: drivers/serial/ioc3_serial.c | 2708 | F: drivers/serial/ioc3_serial.c |
3157 | 2709 | ||
3158 | IP MASQUERADING | 2710 | IP MASQUERADING |
3159 | P: Juanjo Ciarlante | 2711 | M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> |
3160 | M: jjciarla@raiz.uncu.edu.ar | ||
3161 | S: Maintained | 2712 | S: Maintained |
3162 | F: net/ipv4/netfilter/ipt_MASQUERADE.c | 2713 | F: net/ipv4/netfilter/ipt_MASQUERADE.c |
3163 | 2714 | ||
3164 | IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER | 2715 | IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER |
3165 | P: Francois Romieu | 2716 | M: Francois Romieu <romieu@fr.zoreil.com> |
3166 | M: romieu@fr.zoreil.com | 2717 | M: Sorbica Shieh <sorbica@icplus.com.tw> |
3167 | P: Sorbica Shieh | 2718 | M: Jesse Huang <jesse@icplus.com.tw> |
3168 | M: sorbica@icplus.com.tw | ||
3169 | P: Jesse Huang | ||
3170 | M: jesse@icplus.com.tw | ||
3171 | L: netdev@vger.kernel.org | 2719 | L: netdev@vger.kernel.org |
3172 | S: Maintained | 2720 | S: Maintained |
3173 | F: drivers/net/ipg.c | 2721 | F: drivers/net/ipg.c |
3174 | 2722 | ||
3175 | IPATH DRIVER | 2723 | IPATH DRIVER |
3176 | P: Ralph Campbell | 2724 | M: Ralph Campbell <infinipath@qlogic.com> |
3177 | M: infinipath@qlogic.com | ||
3178 | L: general@lists.openfabrics.org | 2725 | L: general@lists.openfabrics.org |
3179 | T: git git://git.qlogic.com/ipath-linux-2.6 | 2726 | T: git git://git.qlogic.com/ipath-linux-2.6 |
3180 | S: Supported | 2727 | S: Supported |
3181 | F: drivers/infiniband/hw/ipath/ | 2728 | F: drivers/infiniband/hw/ipath/ |
3182 | 2729 | ||
3183 | IPMI SUBSYSTEM | 2730 | IPMI SUBSYSTEM |
3184 | P: Corey Minyard | 2731 | M: Corey Minyard <minyard@acm.org> |
3185 | M: minyard@acm.org | ||
3186 | L: openipmi-developer@lists.sourceforge.net | 2732 | L: openipmi-developer@lists.sourceforge.net |
3187 | W: http://openipmi.sourceforge.net/ | 2733 | W: http://openipmi.sourceforge.net/ |
3188 | S: Supported | 2734 | S: Supported |
@@ -3191,20 +2737,16 @@ F: drivers/char/ipmi/ | |||
3191 | F: include/linux/ipmi* | 2737 | F: include/linux/ipmi* |
3192 | 2738 | ||
3193 | IPS SCSI RAID DRIVER | 2739 | IPS SCSI RAID DRIVER |
3194 | P: Adaptec OEM Raid Solutions | 2740 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> |
3195 | M: aacraid@adaptec.com | ||
3196 | L: linux-scsi@vger.kernel.org | 2741 | L: linux-scsi@vger.kernel.org |
3197 | W: http://www.adaptec.com/ | 2742 | W: http://www.adaptec.com/ |
3198 | S: Maintained | 2743 | S: Maintained |
3199 | F: drivers/scsi/ips* | 2744 | F: drivers/scsi/ips* |
3200 | 2745 | ||
3201 | IPVS | 2746 | IPVS |
3202 | P: Wensong Zhang | 2747 | M: Wensong Zhang <wensong@linux-vs.org> |
3203 | M: wensong@linux-vs.org | 2748 | M: Simon Horman <horms@verge.net.au> |
3204 | P: Simon Horman | 2749 | M: Julian Anastasov <ja@ssi.bg> |
3205 | M: horms@verge.net.au | ||
3206 | P: Julian Anastasov | ||
3207 | M: ja@ssi.bg | ||
3208 | L: netdev@vger.kernel.org | 2750 | L: netdev@vger.kernel.org |
3209 | L: lvs-devel@vger.kernel.org | 2751 | L: lvs-devel@vger.kernel.org |
3210 | S: Maintained | 2752 | S: Maintained |
@@ -3212,17 +2754,14 @@ F: Documentation/networking/ipvs-sysctl.txt | |||
3212 | F: net/netfilter/ipvs/ | 2754 | F: net/netfilter/ipvs/ |
3213 | 2755 | ||
3214 | IPWIRELESS DRIVER | 2756 | IPWIRELESS DRIVER |
3215 | P: Jiri Kosina | 2757 | M: Jiri Kosina <jkosina@suse.cz> |
3216 | M: jkosina@suse.cz | 2758 | M: David Sterba <dsterba@suse.cz> |
3217 | P: David Sterba | ||
3218 | M: dsterba@suse.cz | ||
3219 | S: Maintained | 2759 | S: Maintained |
3220 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git | 2760 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git |
3221 | F: drivers/char/pcmcia/ipwireless/ | 2761 | F: drivers/char/pcmcia/ipwireless/ |
3222 | 2762 | ||
3223 | IPX NETWORK LAYER | 2763 | IPX NETWORK LAYER |
3224 | P: Arnaldo Carvalho de Melo | 2764 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
3225 | M: acme@ghostprotocols.net | ||
3226 | L: netdev@vger.kernel.org | 2765 | L: netdev@vger.kernel.org |
3227 | S: Maintained | 2766 | S: Maintained |
3228 | F: include/linux/ipx.h | 2767 | F: include/linux/ipx.h |
@@ -3230,8 +2769,7 @@ F: include/net/ipx.h | |||
3230 | F: net/ipx/ | 2769 | F: net/ipx/ |
3231 | 2770 | ||
3232 | IRDA SUBSYSTEM | 2771 | IRDA SUBSYSTEM |
3233 | P: Samuel Ortiz | 2772 | M: Samuel Ortiz <samuel@sortiz.org> |
3234 | M: samuel@sortiz.org | ||
3235 | L: irda-users@lists.sourceforge.net (subscribers-only) | 2773 | L: irda-users@lists.sourceforge.net (subscribers-only) |
3236 | W: http://irda.sourceforge.net/ | 2774 | W: http://irda.sourceforge.net/ |
3237 | S: Maintained | 2775 | S: Maintained |
@@ -3242,16 +2780,14 @@ F: include/net/irda/ | |||
3242 | F: net/irda/ | 2780 | F: net/irda/ |
3243 | 2781 | ||
3244 | ISAPNP | 2782 | ISAPNP |
3245 | P: Jaroslav Kysela | 2783 | M: Jaroslav Kysela <perex@perex.cz> |
3246 | M: perex@perex.cz | ||
3247 | S: Maintained | 2784 | S: Maintained |
3248 | F: Documentation/isapnp.txt | 2785 | F: Documentation/isapnp.txt |
3249 | F: drivers/pnp/isapnp/ | 2786 | F: drivers/pnp/isapnp/ |
3250 | F: include/linux/isapnp.h | 2787 | F: include/linux/isapnp.h |
3251 | 2788 | ||
3252 | ISCSI | 2789 | ISCSI |
3253 | P: Mike Christie | 2790 | M: Mike Christie <michaelc@cs.wisc.edu> |
3254 | M: michaelc@cs.wisc.edu | ||
3255 | L: open-iscsi@googlegroups.com | 2791 | L: open-iscsi@googlegroups.com |
3256 | W: www.open-iscsi.org | 2792 | W: www.open-iscsi.org |
3257 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mnc/linux-2.6-iscsi.git | 2793 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mnc/linux-2.6-iscsi.git |
@@ -3260,8 +2796,7 @@ F: drivers/scsi/*iscsi* | |||
3260 | F: include/scsi/*iscsi* | 2796 | F: include/scsi/*iscsi* |
3261 | 2797 | ||
3262 | ISDN SUBSYSTEM | 2798 | ISDN SUBSYSTEM |
3263 | P: Karsten Keil | 2799 | M: Karsten Keil <isdn@linux-pingi.de> |
3264 | M: isdn@linux-pingi.de | ||
3265 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) | 2800 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
3266 | W: http://www.isdn4linux.de | 2801 | W: http://www.isdn4linux.de |
3267 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git | 2802 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git |
@@ -3272,18 +2807,15 @@ F: include/linux/isdn.h | |||
3272 | F: include/linux/isdn/ | 2807 | F: include/linux/isdn/ |
3273 | 2808 | ||
3274 | ISDN SUBSYSTEM (Eicon active card driver) | 2809 | ISDN SUBSYSTEM (Eicon active card driver) |
3275 | P: Armin Schindler | 2810 | M: Armin Schindler <mac@melware.de> |
3276 | M: mac@melware.de | ||
3277 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) | 2811 | L: isdn4linux@listserv.isdn4linux.de (subscribers-only) |
3278 | W: http://www.melware.de | 2812 | W: http://www.melware.de |
3279 | S: Maintained | 2813 | S: Maintained |
3280 | F: drivers/isdn/hardware/eicon/ | 2814 | F: drivers/isdn/hardware/eicon/ |
3281 | 2815 | ||
3282 | IVTV VIDEO4LINUX DRIVER | 2816 | IVTV VIDEO4LINUX DRIVER |
3283 | P: Hans Verkuil | 2817 | M: Hans Verkuil <hverkuil@xs4all.nl> |
3284 | M: hverkuil@xs4all.nl | ||
3285 | L: ivtv-devel@ivtvdriver.org | 2818 | L: ivtv-devel@ivtvdriver.org |
3286 | L: ivtv-users@ivtvdriver.org | ||
3287 | L: linux-media@vger.kernel.org | 2819 | L: linux-media@vger.kernel.org |
3288 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 2820 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
3289 | W: http://www.ivtvdriver.org | 2821 | W: http://www.ivtvdriver.org |
@@ -3293,8 +2825,7 @@ F: drivers/media/video/ivtv/ | |||
3293 | F: include/linux/ivtv* | 2825 | F: include/linux/ivtv* |
3294 | 2826 | ||
3295 | JFS FILESYSTEM | 2827 | JFS FILESYSTEM |
3296 | P: Dave Kleikamp | 2828 | M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> |
3297 | M: shaggy@linux.vnet.ibm.com | ||
3298 | L: jfs-discussion@lists.sourceforge.net | 2829 | L: jfs-discussion@lists.sourceforge.net |
3299 | W: http://jfs.sourceforge.net/ | 2830 | W: http://jfs.sourceforge.net/ |
3300 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git | 2831 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git |
@@ -3303,15 +2834,13 @@ F: Documentation/filesystems/jfs.txt | |||
3303 | F: fs/jfs/ | 2834 | F: fs/jfs/ |
3304 | 2835 | ||
3305 | JME NETWORK DRIVER | 2836 | JME NETWORK DRIVER |
3306 | P: Guo-Fu Tseng | 2837 | M: Guo-Fu Tseng <cooldavid@cooldavid.org> |
3307 | M: cooldavid@cooldavid.org | ||
3308 | L: netdev@vger.kernel.org | 2838 | L: netdev@vger.kernel.org |
3309 | S: Maintained | 2839 | S: Maintained |
3310 | F: drivers/net/jme.* | 2840 | F: drivers/net/jme.* |
3311 | 2841 | ||
3312 | JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) | 2842 | JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) |
3313 | P: David Woodhouse | 2843 | M: David Woodhouse <dwmw2@infradead.org> |
3314 | M: dwmw2@infradead.org | ||
3315 | L: linux-mtd@lists.infradead.org | 2844 | L: linux-mtd@lists.infradead.org |
3316 | W: http://www.linux-mtd.infradead.org/doc/jffs2.html | 2845 | W: http://www.linux-mtd.infradead.org/doc/jffs2.html |
3317 | S: Maintained | 2846 | S: Maintained |
@@ -3319,10 +2848,8 @@ F: fs/jffs2/ | |||
3319 | F: include/linux/jffs2.h | 2848 | F: include/linux/jffs2.h |
3320 | 2849 | ||
3321 | JOURNALLING LAYER FOR BLOCK DEVICES (JBD) | 2850 | JOURNALLING LAYER FOR BLOCK DEVICES (JBD) |
3322 | P: Stephen Tweedie | 2851 | M: Stephen Tweedie <sct@redhat.com> |
3323 | M: sct@redhat.com | 2852 | M: Andrew Morton <akpm@linux-foundation.org> |
3324 | P: Andrew Morton | ||
3325 | M: akpm@linux-foundation.org | ||
3326 | L: linux-ext4@vger.kernel.org | 2853 | L: linux-ext4@vger.kernel.org |
3327 | S: Maintained | 2854 | S: Maintained |
3328 | F: fs/jbd*/ | 2855 | F: fs/jbd*/ |
@@ -3330,48 +2857,41 @@ F: include/linux/ext*jbd*.h | |||
3330 | F: include/linux/jbd*.h | 2857 | F: include/linux/jbd*.h |
3331 | 2858 | ||
3332 | K8TEMP HARDWARE MONITORING DRIVER | 2859 | K8TEMP HARDWARE MONITORING DRIVER |
3333 | P: Rudolf Marek | 2860 | M: Rudolf Marek <r.marek@assembler.cz> |
3334 | M: r.marek@assembler.cz | ||
3335 | L: lm-sensors@lm-sensors.org | 2861 | L: lm-sensors@lm-sensors.org |
3336 | S: Maintained | 2862 | S: Maintained |
3337 | F: Documentation/hwmon/k8temp | 2863 | F: Documentation/hwmon/k8temp |
3338 | F: drivers/hwmon/k8temp.c | 2864 | F: drivers/hwmon/k8temp.c |
3339 | 2865 | ||
3340 | KCONFIG | 2866 | KCONFIG |
3341 | P: Roman Zippel | 2867 | M: Roman Zippel <zippel@linux-m68k.org> |
3342 | M: zippel@linux-m68k.org | ||
3343 | L: linux-kbuild@vger.kernel.org | 2868 | L: linux-kbuild@vger.kernel.org |
3344 | S: Maintained | 2869 | S: Maintained |
3345 | F: Documentation/kbuild/kconfig-language.txt | 2870 | F: Documentation/kbuild/kconfig-language.txt |
3346 | F: scripts/kconfig/ | 2871 | F: scripts/kconfig/ |
3347 | 2872 | ||
3348 | KDUMP | 2873 | KDUMP |
3349 | P: Vivek Goyal | 2874 | M: Vivek Goyal <vgoyal@redhat.com> |
3350 | M: vgoyal@redhat.com | 2875 | M: Haren Myneni <hbabu@us.ibm.com> |
3351 | P: Haren Myneni | ||
3352 | M: hbabu@us.ibm.com | ||
3353 | L: kexec@lists.infradead.org | 2876 | L: kexec@lists.infradead.org |
3354 | W: http://lse.sourceforge.net/kdump/ | 2877 | W: http://lse.sourceforge.net/kdump/ |
3355 | S: Maintained | 2878 | S: Maintained |
3356 | F: Documentation/kdump/ | 2879 | F: Documentation/kdump/ |
3357 | 2880 | ||
3358 | KERNEL AUTOMOUNTER (AUTOFS) | 2881 | KERNEL AUTOMOUNTER (AUTOFS) |
3359 | P: H. Peter Anvin | 2882 | M: "H. Peter Anvin" <hpa@zytor.com> |
3360 | M: hpa@zytor.com | ||
3361 | L: autofs@linux.kernel.org | 2883 | L: autofs@linux.kernel.org |
3362 | S: Odd Fixes | 2884 | S: Odd Fixes |
3363 | F: fs/autofs/ | 2885 | F: fs/autofs/ |
3364 | 2886 | ||
3365 | KERNEL AUTOMOUNTER v4 (AUTOFS4) | 2887 | KERNEL AUTOMOUNTER v4 (AUTOFS4) |
3366 | P: Ian Kent | 2888 | M: Ian Kent <raven@themaw.net> |
3367 | M: raven@themaw.net | ||
3368 | L: autofs@linux.kernel.org | 2889 | L: autofs@linux.kernel.org |
3369 | S: Maintained | 2890 | S: Maintained |
3370 | F: fs/autofs4/ | 2891 | F: fs/autofs4/ |
3371 | 2892 | ||
3372 | KERNEL BUILD | 2893 | KERNEL BUILD |
3373 | P: Sam Ravnborg | 2894 | M: Sam Ravnborg <sam@ravnborg.org> |
3374 | M: sam@ravnborg.org | ||
3375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-next.git | 2895 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-next.git |
3376 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes.git | 2896 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes.git |
3377 | L: linux-kbuild@vger.kernel.org | 2897 | L: linux-kbuild@vger.kernel.org |
@@ -3381,16 +2901,13 @@ F: Makefile | |||
3381 | F: scripts/Makefile.* | 2901 | F: scripts/Makefile.* |
3382 | 2902 | ||
3383 | KERNEL JANITORS | 2903 | KERNEL JANITORS |
3384 | P: Several | ||
3385 | L: kernel-janitors@vger.kernel.org | 2904 | L: kernel-janitors@vger.kernel.org |
3386 | W: http://www.kerneljanitors.org/ | 2905 | W: http://www.kerneljanitors.org/ |
3387 | S: Maintained | 2906 | S: Odd fixes |
3388 | 2907 | ||
3389 | KERNEL NFSD, SUNRPC, AND LOCKD SERVERS | 2908 | KERNEL NFSD, SUNRPC, AND LOCKD SERVERS |
3390 | P: J. Bruce Fields | 2909 | M: "J. Bruce Fields" <bfields@fieldses.org> |
3391 | M: bfields@fieldses.org | 2910 | M: Neil Brown <neilb@suse.de> |
3392 | P: Neil Brown | ||
3393 | M: neilb@suse.de | ||
3394 | L: linux-nfs@vger.kernel.org | 2911 | L: linux-nfs@vger.kernel.org |
3395 | W: http://nfs.sourceforge.net/ | 2912 | W: http://nfs.sourceforge.net/ |
3396 | S: Supported | 2913 | S: Supported |
@@ -3403,8 +2920,7 @@ F: include/linux/lockd/ | |||
3403 | F: include/linux/sunrpc/ | 2920 | F: include/linux/sunrpc/ |
3404 | 2921 | ||
3405 | KERNEL VIRTUAL MACHINE (KVM) | 2922 | KERNEL VIRTUAL MACHINE (KVM) |
3406 | P: Avi Kivity | 2923 | M: Avi Kivity <avi@redhat.com> |
3407 | M: avi@redhat.com | ||
3408 | L: kvm@vger.kernel.org | 2924 | L: kvm@vger.kernel.org |
3409 | W: http://kvm.qumranet.com | 2925 | W: http://kvm.qumranet.com |
3410 | S: Supported | 2926 | S: Supported |
@@ -3415,8 +2931,7 @@ F: include/linux/kvm* | |||
3415 | F: virt/kvm/ | 2931 | F: virt/kvm/ |
3416 | 2932 | ||
3417 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V | 2933 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V |
3418 | P: Joerg Roedel | 2934 | M: Joerg Roedel <joerg.roedel@amd.com> |
3419 | M: joerg.roedel@amd.com | ||
3420 | L: kvm@vger.kernel.org | 2935 | L: kvm@vger.kernel.org |
3421 | W: http://kvm.qumranet.com | 2936 | W: http://kvm.qumranet.com |
3422 | S: Supported | 2937 | S: Supported |
@@ -3425,8 +2940,7 @@ F: arch/x86/kvm/kvm_svm.h | |||
3425 | F: arch/x86/kvm/svm.c | 2940 | F: arch/x86/kvm/svm.c |
3426 | 2941 | ||
3427 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC | 2942 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC |
3428 | P: Hollis Blanchard | 2943 | M: Hollis Blanchard <hollisb@us.ibm.com> |
3429 | M: hollisb@us.ibm.com | ||
3430 | L: kvm-ppc@vger.kernel.org | 2944 | L: kvm-ppc@vger.kernel.org |
3431 | W: http://kvm.qumranet.com | 2945 | W: http://kvm.qumranet.com |
3432 | S: Supported | 2946 | S: Supported |
@@ -3434,8 +2948,7 @@ F: arch/powerpc/include/asm/kvm* | |||
3434 | F: arch/powerpc/kvm/ | 2948 | F: arch/powerpc/kvm/ |
3435 | 2949 | ||
3436 | KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) | 2950 | KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) |
3437 | P: Xiantao Zhang | 2951 | M: Xiantao Zhang <xiantao.zhang@intel.com> |
3438 | M: xiantao.zhang@intel.com | ||
3439 | L: kvm-ia64@vger.kernel.org | 2952 | L: kvm-ia64@vger.kernel.org |
3440 | W: http://kvm.qumranet.com | 2953 | W: http://kvm.qumranet.com |
3441 | S: Supported | 2954 | S: Supported |
@@ -3444,10 +2957,8 @@ F: arch/ia64/include/asm/kvm* | |||
3444 | F: arch/ia64/kvm/ | 2957 | F: arch/ia64/kvm/ |
3445 | 2958 | ||
3446 | KERNEL VIRTUAL MACHINE for s390 (KVM/s390) | 2959 | KERNEL VIRTUAL MACHINE for s390 (KVM/s390) |
3447 | P: Carsten Otte | 2960 | M: Carsten Otte <cotte@de.ibm.com> |
3448 | M: cotte@de.ibm.com | 2961 | M: Christian Borntraeger <borntraeger@de.ibm.com> |
3449 | P: Christian Borntraeger | ||
3450 | M: borntraeger@de.ibm.com | ||
3451 | M: linux390@de.ibm.com | 2962 | M: linux390@de.ibm.com |
3452 | L: linux-s390@vger.kernel.org | 2963 | L: linux-s390@vger.kernel.org |
3453 | W: http://www.ibm.com/developerworks/linux/linux390/ | 2964 | W: http://www.ibm.com/developerworks/linux/linux390/ |
@@ -3457,8 +2968,7 @@ F: arch/s390/include/asm/kvm* | |||
3457 | F: arch/s390/kvm/ | 2968 | F: arch/s390/kvm/ |
3458 | 2969 | ||
3459 | KEXEC | 2970 | KEXEC |
3460 | P: Eric Biederman | 2971 | M: Eric Biederman <ebiederm@xmission.com> |
3461 | M: ebiederm@xmission.com | ||
3462 | W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ | 2972 | W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ |
3463 | L: kexec@lists.infradead.org | 2973 | L: kexec@lists.infradead.org |
3464 | S: Maintained | 2974 | S: Maintained |
@@ -3466,8 +2976,7 @@ F: include/linux/kexec.h | |||
3466 | F: kernel/kexec.c | 2976 | F: kernel/kexec.c |
3467 | 2977 | ||
3468 | KGDB | 2978 | KGDB |
3469 | P: Jason Wessel | 2979 | M: Jason Wessel <jason.wessel@windriver.com> |
3470 | M: jason.wessel@windriver.com | ||
3471 | L: kgdb-bugreport@lists.sourceforge.net | 2980 | L: kgdb-bugreport@lists.sourceforge.net |
3472 | S: Maintained | 2981 | S: Maintained |
3473 | F: Documentation/DocBook/kgdb.tmpl | 2982 | F: Documentation/DocBook/kgdb.tmpl |
@@ -3477,17 +2986,13 @@ F: include/linux/kgdb.h | |||
3477 | F: kernel/kgdb.c | 2986 | F: kernel/kgdb.c |
3478 | 2987 | ||
3479 | KMEMCHECK | 2988 | KMEMCHECK |
3480 | P: Vegard Nossum | 2989 | M: Vegard Nossum <vegardno@ifi.uio.no> |
3481 | M: vegardno@ifi.uio.no | ||
3482 | P Pekka Enberg | 2990 | P Pekka Enberg |
3483 | M: penberg@cs.helsinki.fi | 2991 | M: penberg@cs.helsinki.fi |
3484 | L: linux-kernel@vger.kernel.org | ||
3485 | S: Maintained | 2992 | S: Maintained |
3486 | 2993 | ||
3487 | KMEMLEAK | 2994 | KMEMLEAK |
3488 | P: Catalin Marinas | 2995 | M: Catalin Marinas <catalin.marinas@arm.com> |
3489 | M: catalin.marinas@arm.com | ||
3490 | L: linux-kernel@vger.kernel.org | ||
3491 | S: Maintained | 2996 | S: Maintained |
3492 | F: Documentation/kmemleak.txt | 2997 | F: Documentation/kmemleak.txt |
3493 | F: include/linux/kmemleak.h | 2998 | F: include/linux/kmemleak.h |
@@ -3495,30 +3000,24 @@ F: mm/kmemleak.c | |||
3495 | F: mm/kmemleak-test.c | 3000 | F: mm/kmemleak-test.c |
3496 | 3001 | ||
3497 | KMEMTRACE | 3002 | KMEMTRACE |
3498 | P: Eduard - Gabriel Munteanu | 3003 | M: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> |
3499 | M: eduard.munteanu@linux360.ro | ||
3500 | S: Maintained | 3004 | S: Maintained |
3501 | F: Documentation/trace/kmemtrace.txt | 3005 | F: Documentation/trace/kmemtrace.txt |
3502 | F: include/linux/kmemtrace.h | 3006 | F: include/linux/kmemtrace.h |
3503 | F: kernel/trace/kmemtrace.c | 3007 | F: kernel/trace/kmemtrace.c |
3504 | 3008 | ||
3505 | KPROBES | 3009 | KPROBES |
3506 | P: Ananth N Mavinakayanahalli | 3010 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> |
3507 | M: ananth@in.ibm.com | 3011 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
3508 | P: Anil S Keshavamurthy | 3012 | M: "David S. Miller" <davem@davemloft.net> |
3509 | M: anil.s.keshavamurthy@intel.com | 3013 | M: Masami Hiramatsu <mhiramat@redhat.com> |
3510 | P: David S. Miller | ||
3511 | M: davem@davemloft.net | ||
3512 | P: Masami Hiramatsu | ||
3513 | M: mhiramat@redhat.com | ||
3514 | S: Maintained | 3014 | S: Maintained |
3515 | F: Documentation/kprobes.txt | 3015 | F: Documentation/kprobes.txt |
3516 | F: include/linux/kprobes.h | 3016 | F: include/linux/kprobes.h |
3517 | F: kernel/kprobes.c | 3017 | F: kernel/kprobes.c |
3518 | 3018 | ||
3519 | KS0108 LCD CONTROLLER DRIVER | 3019 | KS0108 LCD CONTROLLER DRIVER |
3520 | P: Miguel Ojeda Sandonis | 3020 | M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com> |
3521 | M: miguel.ojeda.sandonis@gmail.com | ||
3522 | W: http://miguelojeda.es/auxdisplay.htm | 3021 | W: http://miguelojeda.es/auxdisplay.htm |
3523 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm | 3022 | W: http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm |
3524 | S: Maintained | 3023 | S: Maintained |
@@ -3534,31 +3033,27 @@ F: include/*/lapb.h | |||
3534 | F: net/lapb/ | 3033 | F: net/lapb/ |
3535 | 3034 | ||
3536 | LASI 53c700 driver for PARISC | 3035 | LASI 53c700 driver for PARISC |
3537 | P: James E.J. Bottomley | 3036 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
3538 | M: James.Bottomley@HansenPartnership.com | ||
3539 | L: linux-scsi@vger.kernel.org | 3037 | L: linux-scsi@vger.kernel.org |
3540 | S: Maintained | 3038 | S: Maintained |
3541 | F: Documentation/scsi/53c700.txt | 3039 | F: Documentation/scsi/53c700.txt |
3542 | F: drivers/scsi/53c700* | 3040 | F: drivers/scsi/53c700* |
3543 | 3041 | ||
3544 | LED SUBSYSTEM | 3042 | LED SUBSYSTEM |
3545 | P: Richard Purdie | 3043 | M: Richard Purdie <rpurdie@rpsys.net> |
3546 | M: rpurdie@rpsys.net | ||
3547 | S: Maintained | 3044 | S: Maintained |
3548 | F: drivers/leds/ | 3045 | F: drivers/leds/ |
3549 | F: include/linux/leds.h | 3046 | F: include/linux/leds.h |
3550 | 3047 | ||
3551 | LEGO USB Tower driver | 3048 | LEGO USB Tower driver |
3552 | P: Juergen Stuber | 3049 | M: Juergen Stuber <starblue@users.sourceforge.net> |
3553 | M: starblue@users.sourceforge.net | ||
3554 | L: legousb-devel@lists.sourceforge.net | 3050 | L: legousb-devel@lists.sourceforge.net |
3555 | W: http://legousb.sourceforge.net/ | 3051 | W: http://legousb.sourceforge.net/ |
3556 | S: Maintained | 3052 | S: Maintained |
3557 | F: drivers/usb/misc/legousbtower.c | 3053 | F: drivers/usb/misc/legousbtower.c |
3558 | 3054 | ||
3559 | LGUEST | 3055 | LGUEST |
3560 | P: Rusty Russell | 3056 | M: Rusty Russell <rusty@rustcorp.com.au> |
3561 | M: rusty@rustcorp.com.au | ||
3562 | L: lguest@ozlabs.org | 3057 | L: lguest@ozlabs.org |
3563 | W: http://lguest.ozlabs.org/ | 3058 | W: http://lguest.ozlabs.org/ |
3564 | S: Maintained | 3059 | S: Maintained |
@@ -3569,119 +3064,100 @@ F: include/linux/lguest*.h | |||
3569 | F: arch/x86/include/asm/lguest*.h | 3064 | F: arch/x86/include/asm/lguest*.h |
3570 | 3065 | ||
3571 | LINUX FOR IBM pSERIES (RS/6000) | 3066 | LINUX FOR IBM pSERIES (RS/6000) |
3572 | P: Paul Mackerras | 3067 | M: Paul Mackerras <paulus@au.ibm.com> |
3573 | M: paulus@au.ibm.com | ||
3574 | W: http://www.ibm.com/linux/ltc/projects/ppc | 3068 | W: http://www.ibm.com/linux/ltc/projects/ppc |
3575 | S: Supported | 3069 | S: Supported |
3576 | 3070 | ||
3577 | LINUX FOR POWERPC (32-BIT AND 64-BIT) | 3071 | LINUX FOR POWERPC (32-BIT AND 64-BIT) |
3578 | P: Benjamin Herrenschmidt | 3072 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
3579 | M: benh@kernel.crashing.org | 3073 | M: Paul Mackerras <paulus@samba.org> |
3580 | P: Paul Mackerras | ||
3581 | M: paulus@samba.org | ||
3582 | W: http://www.penguinppc.org/ | 3074 | W: http://www.penguinppc.org/ |
3583 | L: linuxppc-dev@ozlabs.org | 3075 | L: linuxppc-dev@ozlabs.org |
3584 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git | 3076 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git |
3585 | S: Supported | 3077 | S: Supported |
3586 | 3078 | ||
3587 | LINUX FOR POWER MACINTOSH | 3079 | LINUX FOR POWER MACINTOSH |
3588 | P: Benjamin Herrenschmidt | 3080 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
3589 | M: benh@kernel.crashing.org | ||
3590 | W: http://www.penguinppc.org/ | 3081 | W: http://www.penguinppc.org/ |
3591 | L: linuxppc-dev@ozlabs.org | 3082 | L: linuxppc-dev@ozlabs.org |
3592 | S: Maintained | 3083 | S: Maintained |
3593 | 3084 | ||
3594 | LINUX FOR POWERPC EMBEDDED MPC5XXX | 3085 | LINUX FOR POWERPC EMBEDDED MPC5XXX |
3595 | P: Grant Likely | 3086 | M: Grant Likely <grant.likely@secretlab.ca> |
3596 | M: grant.likely@secretlab.ca | ||
3597 | L: linuxppc-dev@ozlabs.org | 3087 | L: linuxppc-dev@ozlabs.org |
3598 | T: git git://git.secretlab.ca/git/linux-2.6.git | 3088 | T: git git://git.secretlab.ca/git/linux-2.6.git |
3599 | S: Maintained | 3089 | S: Maintained |
3600 | 3090 | ||
3601 | LINUX FOR POWERPC EMBEDDED PPC4XX | 3091 | LINUX FOR POWERPC EMBEDDED PPC4XX |
3602 | P: Josh Boyer | 3092 | M: Josh Boyer <jwboyer@linux.vnet.ibm.com> |
3603 | M: jwboyer@linux.vnet.ibm.com | 3093 | M: Matt Porter <mporter@kernel.crashing.org> |
3604 | P: Matt Porter | ||
3605 | M: mporter@kernel.crashing.org | ||
3606 | W: http://www.penguinppc.org/ | 3094 | W: http://www.penguinppc.org/ |
3607 | L: linuxppc-dev@ozlabs.org | 3095 | L: linuxppc-dev@ozlabs.org |
3608 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git | 3096 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git |
3609 | S: Maintained | 3097 | S: Maintained |
3610 | 3098 | ||
3611 | LINUX FOR POWERPC EMBEDDED XILINX VIRTEX | 3099 | LINUX FOR POWERPC EMBEDDED XILINX VIRTEX |
3612 | P: Grant Likely | 3100 | M: Grant Likely <grant.likely@secretlab.ca> |
3613 | M: grant.likely@secretlab.ca | ||
3614 | W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex | 3101 | W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex |
3615 | L: linuxppc-dev@ozlabs.org | 3102 | L: linuxppc-dev@ozlabs.org |
3616 | T: git git://git.secretlab.ca/git/linux-2.6.git | 3103 | T: git git://git.secretlab.ca/git/linux-2.6.git |
3617 | S: Maintained | 3104 | S: Maintained |
3618 | 3105 | ||
3619 | LINUX FOR POWERPC EMBEDDED PPC8XX | 3106 | LINUX FOR POWERPC EMBEDDED PPC8XX |
3620 | P: Vitaly Bordug | 3107 | M: Vitaly Bordug <vitb@kernel.crashing.org> |
3621 | M: vitb@kernel.crashing.org | 3108 | M: Marcelo Tosatti <marcelo@kvack.org> |
3622 | P: Marcelo Tosatti | ||
3623 | M: marcelo@kvack.org | ||
3624 | W: http://www.penguinppc.org/ | 3109 | W: http://www.penguinppc.org/ |
3625 | L: linuxppc-dev@ozlabs.org | 3110 | L: linuxppc-dev@ozlabs.org |
3626 | S: Maintained | 3111 | S: Maintained |
3627 | 3112 | ||
3628 | LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX | 3113 | LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX |
3629 | P: Kumar Gala | 3114 | M: Kumar Gala <galak@kernel.crashing.org> |
3630 | M: galak@kernel.crashing.org | ||
3631 | W: http://www.penguinppc.org/ | 3115 | W: http://www.penguinppc.org/ |
3632 | L: linuxppc-dev@ozlabs.org | 3116 | L: linuxppc-dev@ozlabs.org |
3633 | S: Maintained | 3117 | S: Maintained |
3634 | 3118 | ||
3635 | LINUX FOR POWERPC PA SEMI PWRFICIENT | 3119 | LINUX FOR POWERPC PA SEMI PWRFICIENT |
3636 | P: Olof Johansson | 3120 | M: Olof Johansson <olof@lixom.net> |
3637 | M: olof@lixom.net | ||
3638 | W: http://www.pasemi.com/ | 3121 | W: http://www.pasemi.com/ |
3639 | L: linuxppc-dev@ozlabs.org | 3122 | L: linuxppc-dev@ozlabs.org |
3640 | S: Supported | 3123 | S: Supported |
3641 | 3124 | ||
3642 | LINUX SECURITY MODULE (LSM) FRAMEWORK | 3125 | LINUX SECURITY MODULE (LSM) FRAMEWORK |
3643 | P: Chris Wright | 3126 | M: Chris Wright <chrisw@sous-sol.org> |
3644 | M: chrisw@sous-sol.org | ||
3645 | L: linux-security-module@vger.kernel.org | 3127 | L: linux-security-module@vger.kernel.org |
3646 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git | 3128 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git |
3647 | S: Supported | 3129 | S: Supported |
3648 | 3130 | ||
3649 | LLC (802.2) | 3131 | LLC (802.2) |
3650 | P: Arnaldo Carvalho de Melo | 3132 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
3651 | M: acme@ghostprotocols.net | ||
3652 | S: Maintained | 3133 | S: Maintained |
3653 | F: include/linux/llc.h | 3134 | F: include/linux/llc.h |
3654 | F: include/net/llc* | 3135 | F: include/net/llc* |
3655 | F: net/llc/ | 3136 | F: net/llc/ |
3656 | 3137 | ||
3657 | LIS3LV02D ACCELEROMETER DRIVER | 3138 | LIS3LV02D ACCELEROMETER DRIVER |
3658 | P: Eric Piel | 3139 | M: Eric Piel <eric.piel@tremplin-utc.net> |
3659 | M: eric.piel@tremplin-utc.net | ||
3660 | S: Maintained | 3140 | S: Maintained |
3661 | F: Documentation/hwmon/lis3lv02d | 3141 | F: Documentation/hwmon/lis3lv02d |
3662 | F: drivers/hwmon/lis3lv02d.* | 3142 | F: drivers/hwmon/lis3lv02d.* |
3663 | 3143 | ||
3664 | LM83 HARDWARE MONITOR DRIVER | 3144 | LM83 HARDWARE MONITOR DRIVER |
3665 | P: Jean Delvare | 3145 | M: Jean Delvare <khali@linux-fr.org> |
3666 | M: khali@linux-fr.org | ||
3667 | L: lm-sensors@lm-sensors.org | 3146 | L: lm-sensors@lm-sensors.org |
3668 | S: Maintained | 3147 | S: Maintained |
3669 | F: Documentation/hwmon/lm83 | 3148 | F: Documentation/hwmon/lm83 |
3670 | F: drivers/hwmon/lm83.c | 3149 | F: drivers/hwmon/lm83.c |
3671 | 3150 | ||
3672 | LM90 HARDWARE MONITOR DRIVER | 3151 | LM90 HARDWARE MONITOR DRIVER |
3673 | P: Jean Delvare | 3152 | M: Jean Delvare <khali@linux-fr.org> |
3674 | M: khali@linux-fr.org | ||
3675 | L: lm-sensors@lm-sensors.org | 3153 | L: lm-sensors@lm-sensors.org |
3676 | S: Maintained | 3154 | S: Maintained |
3677 | F: Documentation/hwmon/lm90 | 3155 | F: Documentation/hwmon/lm90 |
3678 | F: drivers/hwmon/lm90.c | 3156 | F: drivers/hwmon/lm90.c |
3679 | 3157 | ||
3680 | LOCKDEP AND LOCKSTAT | 3158 | LOCKDEP AND LOCKSTAT |
3681 | P: Peter Zijlstra | 3159 | M: Peter Zijlstra <peterz@infradead.org> |
3682 | M: peterz@infradead.org | 3160 | M: Ingo Molnar <mingo@redhat.com> |
3683 | P: Ingo Molnar | ||
3684 | M: mingo@redhat.com | ||
3685 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git | 3161 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git |
3686 | S: Maintained | 3162 | S: Maintained |
3687 | F: Documentation/lockdep*.txt | 3163 | F: Documentation/lockdep*.txt |
@@ -3690,8 +3166,7 @@ F: include/linux/lockdep.h | |||
3690 | F: kernel/lockdep* | 3166 | F: kernel/lockdep* |
3691 | 3167 | ||
3692 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) | 3168 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) |
3693 | P: Richard Russon (FlatCap) | 3169 | M: "Richard Russon (FlatCap)" <ldm@flatcap.org> |
3694 | M: ldm@flatcap.org | ||
3695 | L: linux-ntfs-dev@lists.sourceforge.net | 3170 | L: linux-ntfs-dev@lists.sourceforge.net |
3696 | W: http://www.linux-ntfs.org/content/view/19/37/ | 3171 | W: http://www.linux-ntfs.org/content/view/19/37/ |
3697 | S: Maintained | 3172 | S: Maintained |
@@ -3699,8 +3174,7 @@ F: Documentation/ldm.txt | |||
3699 | F: fs/partitions/ldm.* | 3174 | F: fs/partitions/ldm.* |
3700 | 3175 | ||
3701 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) | 3176 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) |
3702 | P: Eric Moore | 3177 | M: Eric Moore <Eric.Moore@lsi.com> |
3703 | M: Eric.Moore@lsi.com | ||
3704 | M: support@lsi.com | 3178 | M: support@lsi.com |
3705 | L: DL-MPTFusionLinux@lsi.com | 3179 | L: DL-MPTFusionLinux@lsi.com |
3706 | L: linux-scsi@vger.kernel.org | 3180 | L: linux-scsi@vger.kernel.org |
@@ -3709,25 +3183,21 @@ S: Supported | |||
3709 | F: drivers/message/fusion/ | 3183 | F: drivers/message/fusion/ |
3710 | 3184 | ||
3711 | LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers | 3185 | LSILOGIC/SYMBIOS/NCR 53C8XX and 53C1010 PCI-SCSI drivers |
3712 | P: Matthew Wilcox | 3186 | M: Matthew Wilcox <matthew@wil.cx> |
3713 | M: matthew@wil.cx | ||
3714 | L: linux-scsi@vger.kernel.org | 3187 | L: linux-scsi@vger.kernel.org |
3715 | S: Maintained | 3188 | S: Maintained |
3716 | F: drivers/scsi/sym53c8xx_2/ | 3189 | F: drivers/scsi/sym53c8xx_2/ |
3717 | 3190 | ||
3718 | LTP (Linux Test Project) | 3191 | LTP (Linux Test Project) |
3719 | P: Subrata Modak | 3192 | M: Subrata Modak <subrata@linux.vnet.ibm.com> |
3720 | M: subrata@linux.vnet.ibm.com | 3193 | M: Mike Frysinger <vapier@gentoo.org> |
3721 | P: Mike Frysinger | ||
3722 | M: vapier@gentoo.org | ||
3723 | L: ltp-list@lists.sourceforge.net (subscribers-only) | 3194 | L: ltp-list@lists.sourceforge.net (subscribers-only) |
3724 | W: http://ltp.sourceforge.net/ | 3195 | W: http://ltp.sourceforge.net/ |
3725 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git | 3196 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git |
3726 | S: Maintained | 3197 | S: Maintained |
3727 | 3198 | ||
3728 | M32R ARCHITECTURE | 3199 | M32R ARCHITECTURE |
3729 | P: Hirokazu Takata | 3200 | M: Hirokazu Takata <takata@linux-m32r.org> |
3730 | M: takata@linux-m32r.org | ||
3731 | L: linux-m32r@ml.linux-m32r.org | 3201 | L: linux-m32r@ml.linux-m32r.org |
3732 | L: linux-m32r-ja@ml.linux-m32r.org (in Japanese) | 3202 | L: linux-m32r-ja@ml.linux-m32r.org (in Japanese) |
3733 | W: http://www.linux-m32r.org/ | 3203 | W: http://www.linux-m32r.org/ |
@@ -3735,10 +3205,8 @@ S: Maintained | |||
3735 | F: arch/m32r/ | 3205 | F: arch/m32r/ |
3736 | 3206 | ||
3737 | M68K ARCHITECTURE | 3207 | M68K ARCHITECTURE |
3738 | P: Geert Uytterhoeven | 3208 | M: Geert Uytterhoeven <geert@linux-m68k.org> |
3739 | M: geert@linux-m68k.org | 3209 | M: Roman Zippel <zippel@linux-m68k.org> |
3740 | P: Roman Zippel | ||
3741 | M: zippel@linux-m68k.org | ||
3742 | L: linux-m68k@lists.linux-m68k.org | 3210 | L: linux-m68k@lists.linux-m68k.org |
3743 | W: http://www.linux-m68k.org/ | 3211 | W: http://www.linux-m68k.org/ |
3744 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git | 3212 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k.git |
@@ -3747,23 +3215,20 @@ F: arch/m68k/ | |||
3747 | F: drivers/zorro/ | 3215 | F: drivers/zorro/ |
3748 | 3216 | ||
3749 | M68K ON APPLE MACINTOSH | 3217 | M68K ON APPLE MACINTOSH |
3750 | P: Joshua Thompson | 3218 | M: Joshua Thompson <funaho@jurai.org> |
3751 | M: funaho@jurai.org | ||
3752 | W: http://www.mac.linux-m68k.org/ | 3219 | W: http://www.mac.linux-m68k.org/ |
3753 | L: linux-m68k@lists.linux-m68k.org | 3220 | L: linux-m68k@lists.linux-m68k.org |
3754 | S: Maintained | 3221 | S: Maintained |
3755 | F: arch/m68k/mac/ | 3222 | F: arch/m68k/mac/ |
3756 | 3223 | ||
3757 | M68K ON HP9000/300 | 3224 | M68K ON HP9000/300 |
3758 | P: Philip Blundell | 3225 | M: Philip Blundell <philb@gnu.org> |
3759 | M: philb@gnu.org | ||
3760 | W: http://www.tazenda.demon.co.uk/phil/linux-hp | 3226 | W: http://www.tazenda.demon.co.uk/phil/linux-hp |
3761 | S: Maintained | 3227 | S: Maintained |
3762 | F: arch/m68k/hp300/ | 3228 | F: arch/m68k/hp300/ |
3763 | 3229 | ||
3764 | MAC80211 | 3230 | MAC80211 |
3765 | P: Johannes Berg | 3231 | M: Johannes Berg <johannes@sipsolutions.net> |
3766 | M: johannes@sipsolutions.net | ||
3767 | L: linux-wireless@vger.kernel.org | 3232 | L: linux-wireless@vger.kernel.org |
3768 | W: http://linuxwireless.org/ | 3233 | W: http://linuxwireless.org/ |
3769 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git | 3234 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git |
@@ -3773,10 +3238,8 @@ F: include/net/mac80211.h | |||
3773 | F: net/mac80211/ | 3238 | F: net/mac80211/ |
3774 | 3239 | ||
3775 | MAC80211 PID RATE CONTROL | 3240 | MAC80211 PID RATE CONTROL |
3776 | P: Stefano Brivio | 3241 | M: Stefano Brivio <stefano.brivio@polimi.it> |
3777 | M: stefano.brivio@polimi.it | 3242 | M: Mattias Nissler <mattias.nissler@gmx.de> |
3778 | P: Mattias Nissler | ||
3779 | M: mattias.nissler@gmx.de | ||
3780 | L: linux-wireless@vger.kernel.org | 3243 | L: linux-wireless@vger.kernel.org |
3781 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID | 3244 | W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID |
3782 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git | 3245 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git |
@@ -3784,67 +3247,57 @@ S: Maintained | |||
3784 | F: net/mac80211/rc80211_pid* | 3247 | F: net/mac80211/rc80211_pid* |
3785 | 3248 | ||
3786 | MACVLAN DRIVER | 3249 | MACVLAN DRIVER |
3787 | P: Patrick McHardy | 3250 | M: Patrick McHardy <kaber@trash.net> |
3788 | M: kaber@trash.net | ||
3789 | L: netdev@vger.kernel.org | 3251 | L: netdev@vger.kernel.org |
3790 | S: Maintained | 3252 | S: Maintained |
3791 | F: drivers/net/macvlan.c | 3253 | F: drivers/net/macvlan.c |
3792 | F: include/linux/if_macvlan.h | 3254 | F: include/linux/if_macvlan.h |
3793 | 3255 | ||
3794 | MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 | 3256 | MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 |
3795 | P: Michael Kerrisk | 3257 | M: Michael Kerrisk <mtk.manpages@gmail.com> |
3796 | M: mtk.manpages@gmail.com | ||
3797 | W: http://www.kernel.org/doc/man-pages | 3258 | W: http://www.kernel.org/doc/man-pages |
3798 | L: linux-man@vger.kernel.org | 3259 | L: linux-man@vger.kernel.org |
3799 | S: Maintained | 3260 | S: Maintained |
3800 | 3261 | ||
3801 | MARVELL LIBERTAS WIRELESS DRIVER | 3262 | MARVELL LIBERTAS WIRELESS DRIVER |
3802 | P: Dan Williams | 3263 | M: Dan Williams <dcbw@redhat.com> |
3803 | M: dcbw@redhat.com | ||
3804 | L: libertas-dev@lists.infradead.org | 3264 | L: libertas-dev@lists.infradead.org |
3805 | S: Maintained | 3265 | S: Maintained |
3806 | F: drivers/net/wireless/libertas/ | 3266 | F: drivers/net/wireless/libertas/ |
3807 | 3267 | ||
3808 | MARVELL MV643XX ETHERNET DRIVER | 3268 | MARVELL MV643XX ETHERNET DRIVER |
3809 | P: Lennert Buytenhek | 3269 | M: Lennert Buytenhek <buytenh@marvell.com> |
3810 | M: buytenh@marvell.com | ||
3811 | L: netdev@vger.kernel.org | 3270 | L: netdev@vger.kernel.org |
3812 | S: Supported | 3271 | S: Supported |
3813 | F: drivers/net/mv643xx_eth.* | 3272 | F: drivers/net/mv643xx_eth.* |
3814 | F: include/linux/mv643xx.h | 3273 | F: include/linux/mv643xx.h |
3815 | 3274 | ||
3816 | MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER | 3275 | MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER |
3817 | P: Nicolas Pitre | 3276 | M: Nicolas Pitre <nico@cam.org> |
3818 | M: nico@cam.org | ||
3819 | S: Maintained | 3277 | S: Maintained |
3820 | 3278 | ||
3821 | MARVELL YUKON / SYSKONNECT DRIVER | 3279 | MARVELL YUKON / SYSKONNECT DRIVER |
3822 | P: Mirko Lindner | 3280 | M: Mirko Lindner <mlindner@syskonnect.de> |
3823 | M: mlindner@syskonnect.de | 3281 | M: Ralph Roesler <rroesler@syskonnect.de> |
3824 | P: Ralph Roesler | ||
3825 | M: rroesler@syskonnect.de | ||
3826 | W: http://www.syskonnect.com | 3282 | W: http://www.syskonnect.com |
3827 | S: Supported | 3283 | S: Supported |
3828 | 3284 | ||
3829 | MATROX FRAMEBUFFER DRIVER | 3285 | MATROX FRAMEBUFFER DRIVER |
3830 | P: Petr Vandrovec | 3286 | M: Petr Vandrovec <vandrove@vc.cvut.cz> |
3831 | M: vandrove@vc.cvut.cz | ||
3832 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 3287 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
3833 | S: Maintained | 3288 | S: Maintained |
3834 | F: drivers/video/matrox/matroxfb_* | 3289 | F: drivers/video/matrox/matroxfb_* |
3835 | F: include/linux/matroxfb.h | 3290 | F: include/linux/matroxfb.h |
3836 | 3291 | ||
3837 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 3292 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
3838 | P: Hans J. Koch | 3293 | M: "Hans J. Koch" <hjk@linutronix.de> |
3839 | M: hjk@linutronix.de | ||
3840 | L: lm-sensors@lm-sensors.org | 3294 | L: lm-sensors@lm-sensors.org |
3841 | S: Maintained | 3295 | S: Maintained |
3842 | F: Documentation/hwmon/max6650 | 3296 | F: Documentation/hwmon/max6650 |
3843 | F: drivers/hwmon/max6650.c | 3297 | F: drivers/hwmon/max6650.c |
3844 | 3298 | ||
3845 | MEDIA INPUT INFRASTRUCTURE (V4L/DVB) | 3299 | MEDIA INPUT INFRASTRUCTURE (V4L/DVB) |
3846 | P: Mauro Carvalho Chehab | 3300 | M: Mauro Carvalho Chehab <mchehab@infradead.org> |
3847 | M: mchehab@infradead.org | ||
3848 | P: LinuxTV.org Project | 3301 | P: LinuxTV.org Project |
3849 | L: linux-media@vger.kernel.org | 3302 | L: linux-media@vger.kernel.org |
3850 | W: http://linuxtv.org | 3303 | W: http://linuxtv.org |
@@ -3858,8 +3311,7 @@ F: include/linux/dvb/ | |||
3858 | F: include/linux/videodev*.h | 3311 | F: include/linux/videodev*.h |
3859 | 3312 | ||
3860 | MEGARAID SCSI DRIVERS | 3313 | MEGARAID SCSI DRIVERS |
3861 | P: Neela Syam Kolli | 3314 | M: Neela Syam Kolli <megaraidlinux@lsi.com> |
3862 | M: megaraidlinux@lsi.com | ||
3863 | L: linux-scsi@vger.kernel.org | 3315 | L: linux-scsi@vger.kernel.org |
3864 | W: http://megaraid.lsilogic.com | 3316 | W: http://megaraid.lsilogic.com |
3865 | S: Maintained | 3317 | S: Maintained |
@@ -3875,19 +3327,15 @@ F: include/linux/mm.h | |||
3875 | F: mm/ | 3327 | F: mm/ |
3876 | 3328 | ||
3877 | MEMORY RESOURCE CONTROLLER | 3329 | MEMORY RESOURCE CONTROLLER |
3878 | P: Balbir Singh | 3330 | M: Balbir Singh <balbir@linux.vnet.ibm.com> |
3879 | M: balbir@linux.vnet.ibm.com | 3331 | M: Pavel Emelyanov <xemul@openvz.org> |
3880 | P: Pavel Emelyanov | 3332 | M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> |
3881 | M: xemul@openvz.org | ||
3882 | P: KAMEZAWA Hiroyuki | ||
3883 | M: kamezawa.hiroyu@jp.fujitsu.com | ||
3884 | L: linux-mm@kvack.org | 3333 | L: linux-mm@kvack.org |
3885 | S: Maintained | 3334 | S: Maintained |
3886 | F: mm/memcontrol.c | 3335 | F: mm/memcontrol.c |
3887 | 3336 | ||
3888 | MEMORY TECHNOLOGY DEVICES (MTD) | 3337 | MEMORY TECHNOLOGY DEVICES (MTD) |
3889 | P: David Woodhouse | 3338 | M: David Woodhouse <dwmw2@infradead.org> |
3890 | M: dwmw2@infradead.org | ||
3891 | W: http://www.linux-mtd.infradead.org/ | 3339 | W: http://www.linux-mtd.infradead.org/ |
3892 | L: linux-mtd@lists.infradead.org | 3340 | L: linux-mtd@lists.infradead.org |
3893 | T: git git://git.infradead.org/mtd-2.6.git | 3341 | T: git git://git.infradead.org/mtd-2.6.git |
@@ -3897,8 +3345,7 @@ F: include/linux/mtd/ | |||
3897 | F: include/mtd/ | 3345 | F: include/mtd/ |
3898 | 3346 | ||
3899 | MICROBLAZE ARCHITECTURE | 3347 | MICROBLAZE ARCHITECTURE |
3900 | P: Michal Simek | 3348 | M: Michal Simek <monstr@monstr.eu> |
3901 | M: monstr@monstr.eu | ||
3902 | L: microblaze-uclinux@itee.uq.edu.au | 3349 | L: microblaze-uclinux@itee.uq.edu.au |
3903 | W: http://www.monstr.eu/fdt/ | 3350 | W: http://www.monstr.eu/fdt/ |
3904 | T: git git://git.monstr.eu/linux-2.6-microblaze.git | 3351 | T: git git://git.monstr.eu/linux-2.6-microblaze.git |
@@ -3906,14 +3353,12 @@ S: Supported | |||
3906 | F: arch/microblaze/ | 3353 | F: arch/microblaze/ |
3907 | 3354 | ||
3908 | MICROTEK X6 SCANNER | 3355 | MICROTEK X6 SCANNER |
3909 | P: Oliver Neukum | 3356 | M: Oliver Neukum <oliver@neukum.name> |
3910 | M: oliver@neukum.name | ||
3911 | S: Maintained | 3357 | S: Maintained |
3912 | F: drivers/usb/image/microtek.* | 3358 | F: drivers/usb/image/microtek.* |
3913 | 3359 | ||
3914 | MIPS | 3360 | MIPS |
3915 | P: Ralf Baechle | 3361 | M: Ralf Baechle <ralf@linux-mips.org> |
3916 | M: ralf@linux-mips.org | ||
3917 | W: http://www.linux-mips.org/ | 3362 | W: http://www.linux-mips.org/ |
3918 | L: linux-mips@linux-mips.org | 3363 | L: linux-mips@linux-mips.org |
3919 | T: git git://git.linux-mips.org/pub/scm/linux.git | 3364 | T: git git://git.linux-mips.org/pub/scm/linux.git |
@@ -3922,8 +3367,7 @@ F: Documentation/mips/ | |||
3922 | F: arch/mips/ | 3367 | F: arch/mips/ |
3923 | 3368 | ||
3924 | MISCELLANEOUS MCA-SUPPORT | 3369 | MISCELLANEOUS MCA-SUPPORT |
3925 | P: James Bottomley | 3370 | M: James Bottomley <James.Bottomley@HansenPartnership.com> |
3926 | M: James.Bottomley@HansenPartnership.com | ||
3927 | S: Maintained | 3371 | S: Maintained |
3928 | F: Documentation/ia64/mca.txt | 3372 | F: Documentation/ia64/mca.txt |
3929 | F: Documentation/mca.txt | 3373 | F: Documentation/mca.txt |
@@ -3931,15 +3375,13 @@ F: drivers/mca/ | |||
3931 | F: include/linux/mca* | 3375 | F: include/linux/mca* |
3932 | 3376 | ||
3933 | MODULE SUPPORT | 3377 | MODULE SUPPORT |
3934 | P: Rusty Russell | 3378 | M: Rusty Russell <rusty@rustcorp.com.au> |
3935 | M: rusty@rustcorp.com.au | ||
3936 | S: Maintained | 3379 | S: Maintained |
3937 | F: include/linux/module.h | 3380 | F: include/linux/module.h |
3938 | F: kernel/module.c | 3381 | F: kernel/module.c |
3939 | 3382 | ||
3940 | MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER | 3383 | MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER |
3941 | P: Stelian Pop | 3384 | M: Stelian Pop <stelian@popies.net> |
3942 | M: stelian@popies.net | ||
3943 | W: http://popies.net/meye/ | 3385 | W: http://popies.net/meye/ |
3944 | S: Maintained | 3386 | S: Maintained |
3945 | F: Documentation/video4linux/meye.txt | 3387 | F: Documentation/video4linux/meye.txt |
@@ -3947,135 +3389,112 @@ F: drivers/media/video/meye.* | |||
3947 | F: include/linux/meye.h | 3389 | F: include/linux/meye.h |
3948 | 3390 | ||
3949 | MOTOROLA IMX MMC/SD HOST CONTROLLER INTERFACE DRIVER | 3391 | MOTOROLA IMX MMC/SD HOST CONTROLLER INTERFACE DRIVER |
3950 | P: Pavel Pisa | 3392 | M: Pavel Pisa <ppisa@pikron.com> |
3951 | M: ppisa@pikron.com | ||
3952 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 3393 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
3953 | S: Maintained | 3394 | S: Maintained |
3954 | F: drivers/mmc/host/imxmmc.* | 3395 | F: drivers/mmc/host/imxmmc.* |
3955 | 3396 | ||
3956 | MOUSE AND MISC DEVICES [GENERAL] | 3397 | MOUSE AND MISC DEVICES [GENERAL] |
3957 | P: Alessandro Rubini | 3398 | M: Alessandro Rubini <rubini@ipvvis.unipv.it> |
3958 | M: rubini@ipvvis.unipv.it | ||
3959 | S: Maintained | 3399 | S: Maintained |
3960 | F: drivers/input/mouse/ | 3400 | F: drivers/input/mouse/ |
3961 | F: include/linux/gpio_mouse.h | 3401 | F: include/linux/gpio_mouse.h |
3962 | 3402 | ||
3963 | MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD | 3403 | MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD |
3964 | P: Jiri Slaby | 3404 | M: Jiri Slaby <jirislaby@gmail.com> |
3965 | M: jirislaby@gmail.com | ||
3966 | S: Maintained | 3405 | S: Maintained |
3967 | F: Documentation/serial/moxa-smartio | 3406 | F: Documentation/serial/moxa-smartio |
3968 | F: drivers/char/mxser.* | 3407 | F: drivers/char/mxser.* |
3969 | 3408 | ||
3970 | MSI LAPTOP SUPPORT | 3409 | MSI LAPTOP SUPPORT |
3971 | P: Lennart Poettering | 3410 | M: Lennart Poettering <mzxreary@0pointer.de> |
3972 | M: mzxreary@0pointer.de | ||
3973 | W: https://tango.0pointer.de/mailman/listinfo/s270-linux | 3411 | W: https://tango.0pointer.de/mailman/listinfo/s270-linux |
3974 | W: http://0pointer.de/lennart/tchibo.html | 3412 | W: http://0pointer.de/lennart/tchibo.html |
3975 | S: Maintained | 3413 | S: Maintained |
3976 | F: drivers/platform/x86/msi-laptop.c | 3414 | F: drivers/platform/x86/msi-laptop.c |
3977 | 3415 | ||
3978 | MULTIFUNCTION DEVICES (MFD) | 3416 | MULTIFUNCTION DEVICES (MFD) |
3979 | P: Samuel Ortiz | 3417 | M: Samuel Ortiz <sameo@linux.intel.com> |
3980 | M: sameo@linux.intel.com | ||
3981 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git | 3418 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git |
3982 | S: Supported | 3419 | S: Supported |
3983 | F: drivers/mfd/ | 3420 | F: drivers/mfd/ |
3984 | 3421 | ||
3985 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM | 3422 | MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM |
3986 | P: Pierre Ossman | 3423 | M: Pierre Ossman <pierre@ossman.eu> |
3987 | M: pierre@ossman.eu | ||
3988 | S: Maintained | 3424 | S: Maintained |
3989 | F: drivers/mmc/ | 3425 | F: drivers/mmc/ |
3990 | F: include/linux/mmc/ | 3426 | F: include/linux/mmc/ |
3991 | 3427 | ||
3992 | MULTIMEDIA CARD (MMC) ETC. OVER SPI | 3428 | MULTIMEDIA CARD (MMC) ETC. OVER SPI |
3993 | P: David Brownell | 3429 | M: David Brownell <dbrownell@users.sourceforge.net> |
3994 | M: dbrownell@users.sourceforge.net | ||
3995 | S: Odd Fixes | 3430 | S: Odd Fixes |
3996 | F: drivers/mmc/host/mmc_spi.c | 3431 | F: drivers/mmc/host/mmc_spi.c |
3997 | F: include/linux/spi/mmc_spi.h | 3432 | F: include/linux/spi/mmc_spi.h |
3998 | 3433 | ||
3999 | MULTISOUND SOUND DRIVER | 3434 | MULTISOUND SOUND DRIVER |
4000 | P: Andrew Veliath | 3435 | M: Andrew Veliath <andrewtv@usa.net> |
4001 | M: andrewtv@usa.net | ||
4002 | S: Maintained | 3436 | S: Maintained |
4003 | F: Documentation/sound/oss/MultiSound | 3437 | F: Documentation/sound/oss/MultiSound |
4004 | F: sound/oss/msnd* | 3438 | F: sound/oss/msnd* |
4005 | 3439 | ||
4006 | MULTITECH MULTIPORT CARD (ISICOM) | 3440 | MULTITECH MULTIPORT CARD (ISICOM) |
4007 | P: Jiri Slaby | 3441 | M: Jiri Slaby <jirislaby@gmail.com> |
4008 | M: jirislaby@gmail.com | ||
4009 | S: Maintained | 3442 | S: Maintained |
4010 | F: drivers/char/isicom.c | 3443 | F: drivers/char/isicom.c |
4011 | F: include/linux/isicom.h | 3444 | F: include/linux/isicom.h |
4012 | 3445 | ||
4013 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | 3446 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER |
4014 | P: Felipe Balbi | 3447 | M: Felipe Balbi <felipe.balbi@nokia.com> |
4015 | M: felipe.balbi@nokia.com | ||
4016 | L: linux-usb@vger.kernel.org | 3448 | L: linux-usb@vger.kernel.org |
4017 | T: git git://gitorious.org/musb/mainline.git | 3449 | T: git git://gitorious.org/musb/mainline.git |
4018 | S: Maintained | 3450 | S: Maintained |
4019 | F: drivers/usb/musb/ | 3451 | F: drivers/usb/musb/ |
4020 | 3452 | ||
4021 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) | 3453 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) |
4022 | P: Andrew Gallatin | 3454 | M: Andrew Gallatin <gallatin@myri.com> |
4023 | M: gallatin@myri.com | 3455 | M: Brice Goglin <brice@myri.com> |
4024 | P: Brice Goglin | ||
4025 | M: brice@myri.com | ||
4026 | L: netdev@vger.kernel.org | 3456 | L: netdev@vger.kernel.org |
4027 | W: http://www.myri.com/scs/download-Myri10GE.html | 3457 | W: http://www.myri.com/scs/download-Myri10GE.html |
4028 | S: Supported | 3458 | S: Supported |
4029 | F: drivers/net/myri10ge/ | 3459 | F: drivers/net/myri10ge/ |
4030 | 3460 | ||
4031 | NATSEMI ETHERNET DRIVER (DP8381x) | 3461 | NATSEMI ETHERNET DRIVER (DP8381x) |
4032 | P: Tim Hockin | 3462 | M: Tim Hockin <thockin@hockin.org> |
4033 | M: thockin@hockin.org | ||
4034 | S: Maintained | 3463 | S: Maintained |
4035 | F: drivers/net/natsemi.c | 3464 | F: drivers/net/natsemi.c |
4036 | 3465 | ||
4037 | NCP FILESYSTEM | 3466 | NCP FILESYSTEM |
4038 | P: Petr Vandrovec | 3467 | M: Petr Vandrovec <vandrove@vc.cvut.cz> |
4039 | M: vandrove@vc.cvut.cz | ||
4040 | L: linware@sh.cvut.cz | 3468 | L: linware@sh.cvut.cz |
4041 | S: Maintained | 3469 | S: Maintained |
4042 | F: fs/ncpfs/ | 3470 | F: fs/ncpfs/ |
4043 | 3471 | ||
4044 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) | 3472 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) |
4045 | P: James E.J. Bottomley | 3473 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
4046 | M: James.Bottomley@HansenPartnership.com | ||
4047 | L: linux-scsi@vger.kernel.org | 3474 | L: linux-scsi@vger.kernel.org |
4048 | S: Maintained | 3475 | S: Maintained |
4049 | F: drivers/scsi/NCR_D700.* | 3476 | F: drivers/scsi/NCR_D700.* |
4050 | 3477 | ||
4051 | NETEFFECT IWARP RNIC DRIVER (IW_NES) | 3478 | NETEFFECT IWARP RNIC DRIVER (IW_NES) |
4052 | P: Faisal Latif | 3479 | M: Faisal Latif <faisal.latif@intel.com> |
4053 | M: faisal.latif@intel.com | 3480 | M: Chien Tung <chien.tin.tung@intel.com> |
4054 | P: Chien Tung | ||
4055 | M: chien.tin.tung@intel.com | ||
4056 | L: general@lists.openfabrics.org | 3481 | L: general@lists.openfabrics.org |
4057 | W: http://www.neteffect.com | 3482 | W: http://www.neteffect.com |
4058 | S: Supported | 3483 | S: Supported |
4059 | F: drivers/infiniband/hw/nes/ | 3484 | F: drivers/infiniband/hw/nes/ |
4060 | 3485 | ||
4061 | NETEM NETWORK EMULATOR | 3486 | NETEM NETWORK EMULATOR |
4062 | P: Stephen Hemminger | 3487 | M: Stephen Hemminger <shemminger@linux-foundation.org> |
4063 | M: shemminger@linux-foundation.org | ||
4064 | L: netem@lists.linux-foundation.org | 3488 | L: netem@lists.linux-foundation.org |
4065 | S: Maintained | 3489 | S: Maintained |
4066 | F: net/sched/sch_netem.c | 3490 | F: net/sched/sch_netem.c |
4067 | 3491 | ||
4068 | NETERION (S2IO) 10GbE DRIVER (xframe/vxge) | 3492 | NETERION (S2IO) 10GbE DRIVER (xframe/vxge) |
4069 | P: Ramkrishna Vepa | 3493 | M: Ramkrishna Vepa <ram.vepa@neterion.com> |
4070 | M: ram.vepa@neterion.com | 3494 | M: Rastapur Santosh <santosh.rastapur@neterion.com> |
4071 | P: Rastapur Santosh | 3495 | M: Sivakumar Subramani <sivakumar.subramani@neterion.com> |
4072 | M: santosh.rastapur@neterion.com | 3496 | M: Sreenivasa Honnur <sreenivasa.honnur@neterion.com> |
4073 | P: Sivakumar Subramani | 3497 | M: Anil Murthy <anil.murthy@neterion.com> |
4074 | M: sivakumar.subramani@neterion.com | ||
4075 | P: Sreenivasa Honnur | ||
4076 | M: sreenivasa.honnur@neterion.com | ||
4077 | P: Anil Murthy | ||
4078 | M: anil.murthy@neterion.com | ||
4079 | L: netdev@vger.kernel.org | 3498 | L: netdev@vger.kernel.org |
4080 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous | 3499 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous |
4081 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous | 3500 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous |
@@ -4089,8 +3508,7 @@ P: Marc Boucher | |||
4089 | P: James Morris | 3508 | P: James Morris |
4090 | P: Harald Welte | 3509 | P: Harald Welte |
4091 | P: Jozsef Kadlecsik | 3510 | P: Jozsef Kadlecsik |
4092 | P: Patrick McHardy | 3511 | M: Patrick McHardy <kaber@trash.net> |
4093 | M: kaber@trash.net | ||
4094 | L: netfilter-devel@vger.kernel.org | 3512 | L: netfilter-devel@vger.kernel.org |
4095 | L: netfilter@vger.kernel.org | 3513 | L: netfilter@vger.kernel.org |
4096 | L: coreteam@netfilter.org | 3514 | L: coreteam@netfilter.org |
@@ -4106,8 +3524,7 @@ F: net/*/netfilter/ | |||
4106 | F: net/netfilter/ | 3524 | F: net/netfilter/ |
4107 | 3525 | ||
4108 | NETLABEL | 3526 | NETLABEL |
4109 | P: Paul Moore | 3527 | M: Paul Moore <paul.moore@hp.com> |
4110 | M: paul.moore@hp.com | ||
4111 | W: http://netlabel.sf.net | 3528 | W: http://netlabel.sf.net |
4112 | L: netdev@vger.kernel.org | 3529 | L: netdev@vger.kernel.org |
4113 | S: Supported | 3530 | S: Supported |
@@ -4116,8 +3533,7 @@ F: include/net/netlabel.h | |||
4116 | F: net/netlabel/ | 3533 | F: net/netlabel/ |
4117 | 3534 | ||
4118 | NETROM NETWORK LAYER | 3535 | NETROM NETWORK LAYER |
4119 | P: Ralf Baechle | 3536 | M: Ralf Baechle <ralf@linux-mips.org> |
4120 | M: ralf@linux-mips.org | ||
4121 | L: linux-hams@vger.kernel.org | 3537 | L: linux-hams@vger.kernel.org |
4122 | W: http://www.linux-ax25.org/ | 3538 | W: http://www.linux-ax25.org/ |
4123 | S: Maintained | 3539 | S: Maintained |
@@ -4126,16 +3542,14 @@ F: include/net/netrom.h | |||
4126 | F: net/netrom/ | 3542 | F: net/netrom/ |
4127 | 3543 | ||
4128 | NETWORK BLOCK DEVICE (NBD) | 3544 | NETWORK BLOCK DEVICE (NBD) |
4129 | P: Paul Clements | 3545 | M: Paul Clements <Paul.Clements@steeleye.com> |
4130 | M: Paul.Clements@steeleye.com | ||
4131 | S: Maintained | 3546 | S: Maintained |
4132 | F: Documentation/blockdev/nbd.txt | 3547 | F: Documentation/blockdev/nbd.txt |
4133 | F: drivers/block/nbd.c | 3548 | F: drivers/block/nbd.c |
4134 | F: include/linux/nbd.h | 3549 | F: include/linux/nbd.h |
4135 | 3550 | ||
4136 | NETWORKING [GENERAL] | 3551 | NETWORKING [GENERAL] |
4137 | P: David S. Miller | 3552 | M: "David S. Miller" <davem@davemloft.net> |
4138 | M: davem@davemloft.net | ||
4139 | L: netdev@vger.kernel.org | 3553 | L: netdev@vger.kernel.org |
4140 | W: http://www.linuxfoundation.org/en/Net | 3554 | W: http://www.linuxfoundation.org/en/Net |
4141 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git | 3555 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git |
@@ -4144,18 +3558,12 @@ F: net/ | |||
4144 | F: include/net/ | 3558 | F: include/net/ |
4145 | 3559 | ||
4146 | NETWORKING [IPv4/IPv6] | 3560 | NETWORKING [IPv4/IPv6] |
4147 | P: David S. Miller | 3561 | M: "David S. Miller" <davem@davemloft.net> |
4148 | M: davem@davemloft.net | 3562 | M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
4149 | P: Alexey Kuznetsov | 3563 | M: "Pekka Savola (ipv6)" <pekkas@netcore.fi> |
4150 | M: kuznet@ms2.inr.ac.ru | 3564 | M: James Morris <jmorris@namei.org> |
4151 | P: Pekka Savola (ipv6) | 3565 | M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> |
4152 | M: pekkas@netcore.fi | 3566 | M: Patrick McHardy <kaber@trash.net> |
4153 | P: James Morris | ||
4154 | M: jmorris@namei.org | ||
4155 | P: Hideaki YOSHIFUJI | ||
4156 | M: yoshfuji@linux-ipv6.org | ||
4157 | P: Patrick McHardy | ||
4158 | M: kaber@trash.net | ||
4159 | L: netdev@vger.kernel.org | 3567 | L: netdev@vger.kernel.org |
4160 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git | 3568 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git |
4161 | S: Maintained | 3569 | S: Maintained |
@@ -4164,14 +3572,12 @@ F: net/ipv6/ | |||
4164 | F: include/net/ip* | 3572 | F: include/net/ip* |
4165 | 3573 | ||
4166 | NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK) | 3574 | NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK) |
4167 | P: Paul Moore | 3575 | M: Paul Moore <paul.moore@hp.com> |
4168 | M: paul.moore@hp.com | ||
4169 | L: netdev@vger.kernel.org | 3576 | L: netdev@vger.kernel.org |
4170 | S: Maintained | 3577 | S: Maintained |
4171 | 3578 | ||
4172 | NETWORKING [WIRELESS] | 3579 | NETWORKING [WIRELESS] |
4173 | P: John W. Linville | 3580 | M: "John W. Linville" <linville@tuxdriver.com> |
4174 | M: linville@tuxdriver.com | ||
4175 | L: linux-wireless@vger.kernel.org | 3581 | L: linux-wireless@vger.kernel.org |
4176 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git | 3582 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6.git |
4177 | S: Maintained | 3583 | S: Maintained |
@@ -4187,16 +3593,14 @@ S: Odd Fixes | |||
4187 | F: drivers/net/ | 3593 | F: drivers/net/ |
4188 | 3594 | ||
4189 | NETXEN (1/10) GbE SUPPORT | 3595 | NETXEN (1/10) GbE SUPPORT |
4190 | P: Dhananjay Phadke | 3596 | M: Dhananjay Phadke <dhananjay@netxen.com> |
4191 | M: dhananjay@netxen.com | ||
4192 | L: netdev@vger.kernel.org | 3597 | L: netdev@vger.kernel.org |
4193 | W: http://www.netxen.com | 3598 | W: http://www.netxen.com |
4194 | S: Supported | 3599 | S: Supported |
4195 | F: drivers/net/netxen/ | 3600 | F: drivers/net/netxen/ |
4196 | 3601 | ||
4197 | NFS, SUNRPC, AND LOCKD CLIENTS | 3602 | NFS, SUNRPC, AND LOCKD CLIENTS |
4198 | P: Trond Myklebust | 3603 | M: Trond Myklebust <Trond.Myklebust@netapp.com> |
4199 | M: Trond.Myklebust@netapp.com | ||
4200 | L: linux-nfs@vger.kernel.org | 3604 | L: linux-nfs@vger.kernel.org |
4201 | W: http://client.linux-nfs.org | 3605 | W: http://client.linux-nfs.org |
4202 | T: git git://git.linux-nfs.org/pub/linux/nfs-2.6.git | 3606 | T: git git://git.linux-nfs.org/pub/linux/nfs-2.6.git |
@@ -4210,17 +3614,14 @@ F: include/linux/nfs* | |||
4210 | F: include/linux/sunrpc/ | 3614 | F: include/linux/sunrpc/ |
4211 | 3615 | ||
4212 | NI5010 NETWORK DRIVER | 3616 | NI5010 NETWORK DRIVER |
4213 | P: Jan-Pascal van Best | 3617 | M: Jan-Pascal van Best <janpascal@vanbest.org> |
4214 | M: janpascal@vanbest.org | 3618 | M: Andreas Mohr <andi@lisas.de> |
4215 | P: Andreas Mohr | ||
4216 | M: andi@lisas.de | ||
4217 | L: netdev@vger.kernel.org | 3619 | L: netdev@vger.kernel.org |
4218 | S: Maintained | 3620 | S: Maintained |
4219 | F: drivers/net/ni5010.* | 3621 | F: drivers/net/ni5010.* |
4220 | 3622 | ||
4221 | NILFS2 FILESYSTEM | 3623 | NILFS2 FILESYSTEM |
4222 | P: KONISHI Ryusuke | 3624 | M: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> |
4223 | M: konishi.ryusuke@lab.ntt.co.jp | ||
4224 | L: users@nilfs.org | 3625 | L: users@nilfs.org |
4225 | W: http://www.nilfs.org/en/ | 3626 | W: http://www.nilfs.org/en/ |
4226 | S: Supported | 3627 | S: Supported |
@@ -4229,26 +3630,22 @@ F: fs/nilfs2/ | |||
4229 | F: include/linux/nilfs2_fs.h | 3630 | F: include/linux/nilfs2_fs.h |
4230 | 3631 | ||
4231 | NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER | 3632 | NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER |
4232 | P: YOKOTA Hiroshi | 3633 | M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> |
4233 | M: yokota@netlab.is.tsukuba.ac.jp | ||
4234 | W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ | 3634 | W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ |
4235 | S: Maintained | 3635 | S: Maintained |
4236 | F: Documentation/scsi/NinjaSCSI.txt | 3636 | F: Documentation/scsi/NinjaSCSI.txt |
4237 | F: drivers/scsi/pcmcia/nsp_* | 3637 | F: drivers/scsi/pcmcia/nsp_* |
4238 | 3638 | ||
4239 | NINJA SCSI-32Bi/UDE PCI/CARDBUS SCSI HOST ADAPTER DRIVER | 3639 | NINJA SCSI-32Bi/UDE PCI/CARDBUS SCSI HOST ADAPTER DRIVER |
4240 | P: GOTO Masanori | 3640 | M: GOTO Masanori <gotom@debian.or.jp> |
4241 | M: gotom@debian.or.jp | 3641 | M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> |
4242 | P: YOKOTA Hiroshi | ||
4243 | M: yokota@netlab.is.tsukuba.ac.jp | ||
4244 | W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ | 3642 | W: http://www.netlab.is.tsukuba.ac.jp/~yokota/izumi/ninja/ |
4245 | S: Maintained | 3643 | S: Maintained |
4246 | F: Documentation/scsi/NinjaSCSI.txt | 3644 | F: Documentation/scsi/NinjaSCSI.txt |
4247 | F: drivers/scsi/nsp32* | 3645 | F: drivers/scsi/nsp32* |
4248 | 3646 | ||
4249 | NTFS FILESYSTEM | 3647 | NTFS FILESYSTEM |
4250 | P: Anton Altaparmakov | 3648 | M: Anton Altaparmakov <aia21@cantab.net> |
4251 | M: aia21@cantab.net | ||
4252 | L: linux-ntfs-dev@lists.sourceforge.net | 3649 | L: linux-ntfs-dev@lists.sourceforge.net |
4253 | W: http://www.linux-ntfs.org/ | 3650 | W: http://www.linux-ntfs.org/ |
4254 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git | 3651 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git |
@@ -4257,16 +3654,14 @@ F: Documentation/filesystems/ntfs.txt | |||
4257 | F: fs/ntfs/ | 3654 | F: fs/ntfs/ |
4258 | 3655 | ||
4259 | NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER | 3656 | NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER |
4260 | P: Antonino Daplas | 3657 | M: Antonino Daplas <adaplas@gmail.com> |
4261 | M: adaplas@gmail.com | ||
4262 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 3658 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
4263 | S: Maintained | 3659 | S: Maintained |
4264 | F: drivers/video/riva/ | 3660 | F: drivers/video/riva/ |
4265 | F: drivers/video/nvidia/ | 3661 | F: drivers/video/nvidia/ |
4266 | 3662 | ||
4267 | OMAP SUPPORT | 3663 | OMAP SUPPORT |
4268 | P: Tony Lindgren <tony@atomide.com> | 3664 | M: "Tony Lindgren <tony@atomide.com>" <tony@atomide.com> |
4269 | M: tony@atomide.com | ||
4270 | L: linux-omap@vger.kernel.org | 3665 | L: linux-omap@vger.kernel.org |
4271 | W: http://www.muru.com/linux/omap/ | 3666 | W: http://www.muru.com/linux/omap/ |
4272 | W: http://linux.omap.com/ | 3667 | W: http://linux.omap.com/ |
@@ -4275,98 +3670,83 @@ S: Maintained | |||
4275 | F: arch/arm/*omap* | 3670 | F: arch/arm/*omap* |
4276 | 3671 | ||
4277 | OMAP CLOCK FRAMEWORK SUPPORT | 3672 | OMAP CLOCK FRAMEWORK SUPPORT |
4278 | P: Paul Walmsley | 3673 | M: Paul Walmsley <paul@pwsan.com> |
4279 | M: paul@pwsan.com | ||
4280 | L: linux-omap@vger.kernel.org | 3674 | L: linux-omap@vger.kernel.org |
4281 | S: Maintained | 3675 | S: Maintained |
4282 | F: arch/arm/*omap*/*clock* | 3676 | F: arch/arm/*omap*/*clock* |
4283 | 3677 | ||
4284 | OMAP POWER MANAGEMENT SUPPORT | 3678 | OMAP POWER MANAGEMENT SUPPORT |
4285 | P: Kevin Hilman | 3679 | M: Kevin Hilman <khilman@deeprootsystems.com> |
4286 | M: khilman@deeprootsystems.com | ||
4287 | L: linux-omap@vger.kernel.org | 3680 | L: linux-omap@vger.kernel.org |
4288 | S: Maintained | 3681 | S: Maintained |
4289 | F: arch/arm/*omap*/*pm* | 3682 | F: arch/arm/*omap*/*pm* |
4290 | 3683 | ||
4291 | OMAP AUDIO SUPPORT | 3684 | OMAP AUDIO SUPPORT |
4292 | P: Jarkko Nikula | 3685 | M: Jarkko Nikula <jhnikula@gmail.com> |
4293 | M: jhnikula@gmail.com | ||
4294 | L: alsa-devel@alsa-project.org (subscribers-only) | 3686 | L: alsa-devel@alsa-project.org (subscribers-only) |
4295 | L: linux-omap@vger.kernel.org | 3687 | L: linux-omap@vger.kernel.org |
4296 | S: Maintained | 3688 | S: Maintained |
4297 | F: sound/soc/omap/ | 3689 | F: sound/soc/omap/ |
4298 | 3690 | ||
4299 | OMAP FRAMEBUFFER SUPPORT | 3691 | OMAP FRAMEBUFFER SUPPORT |
4300 | P: Imre Deak | 3692 | M: Imre Deak <imre.deak@nokia.com> |
4301 | M: imre.deak@nokia.com | ||
4302 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 3693 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
4303 | L: linux-omap@vger.kernel.org | 3694 | L: linux-omap@vger.kernel.org |
4304 | S: Maintained | 3695 | S: Maintained |
4305 | F: drivers/video/omap/ | 3696 | F: drivers/video/omap/ |
4306 | 3697 | ||
4307 | OMAP MMC SUPPORT | 3698 | OMAP MMC SUPPORT |
4308 | P: Jarkko Lavinen | 3699 | M: Jarkko Lavinen <jarkko.lavinen@nokia.com> |
4309 | M: jarkko.lavinen@nokia.com | ||
4310 | L: linux-kernel@vger.kernel.org | ||
4311 | L: linux-omap@vger.kernel.org | 3700 | L: linux-omap@vger.kernel.org |
4312 | S: Maintained | 3701 | S: Maintained |
4313 | F: drivers/mmc/host/*omap* | 3702 | F: drivers/mmc/host/*omap* |
4314 | 3703 | ||
4315 | OMAP RANDOM NUMBER GENERATOR SUPPORT | 3704 | OMAP RANDOM NUMBER GENERATOR SUPPORT |
4316 | P: Deepak Saxena | 3705 | M: Deepak Saxena <dsaxena@plexity.net> |
4317 | M: dsaxena@plexity.net | ||
4318 | S: Maintained | 3706 | S: Maintained |
4319 | F: drivers/char/hw_random/omap-rng.c | 3707 | F: drivers/char/hw_random/omap-rng.c |
4320 | 3708 | ||
4321 | OMAP USB SUPPORT | 3709 | OMAP USB SUPPORT |
4322 | P: Felipe Balbi | 3710 | M: Felipe Balbi <felipe.balbi@nokia.com> |
4323 | M: felipe.balbi@nokia.com | 3711 | M: David Brownell <dbrownell@users.sourceforge.net> |
4324 | P: David Brownell | ||
4325 | M: dbrownell@users.sourceforge.net | ||
4326 | L: linux-usb@vger.kernel.org | 3712 | L: linux-usb@vger.kernel.org |
4327 | L: linux-omap@vger.kernel.org | 3713 | L: linux-omap@vger.kernel.org |
4328 | S: Maintained | 3714 | S: Maintained |
4329 | 3715 | ||
4330 | OMFS FILESYSTEM | 3716 | OMFS FILESYSTEM |
4331 | P: Bob Copeland | 3717 | M: Bob Copeland <me@bobcopeland.com> |
4332 | M: me@bobcopeland.com | ||
4333 | L: linux-karma-devel@lists.sourceforge.net | 3718 | L: linux-karma-devel@lists.sourceforge.net |
4334 | S: Maintained | 3719 | S: Maintained |
4335 | F: Documentation/filesystems/omfs.txt | 3720 | F: Documentation/filesystems/omfs.txt |
4336 | F: fs/omfs/ | 3721 | F: fs/omfs/ |
4337 | 3722 | ||
4338 | OMNIKEY CARDMAN 4000 DRIVER | 3723 | OMNIKEY CARDMAN 4000 DRIVER |
4339 | P: Harald Welte | 3724 | M: Harald Welte <laforge@gnumonks.org> |
4340 | M: laforge@gnumonks.org | ||
4341 | S: Maintained | 3725 | S: Maintained |
4342 | F: drivers/char/pcmcia/cm4000_cs.c | 3726 | F: drivers/char/pcmcia/cm4000_cs.c |
4343 | F: include/linux/cm4000_cs.h | 3727 | F: include/linux/cm4000_cs.h |
4344 | 3728 | ||
4345 | OMNIKEY CARDMAN 4040 DRIVER | 3729 | OMNIKEY CARDMAN 4040 DRIVER |
4346 | P: Harald Welte | 3730 | M: Harald Welte <laforge@gnumonks.org> |
4347 | M: laforge@gnumonks.org | ||
4348 | S: Maintained | 3731 | S: Maintained |
4349 | F: drivers/char/pcmcia/cm4040_cs.* | 3732 | F: drivers/char/pcmcia/cm4040_cs.* |
4350 | 3733 | ||
4351 | OMNIVISION OV7670 SENSOR DRIVER | 3734 | OMNIVISION OV7670 SENSOR DRIVER |
4352 | P: Jonathan Corbet | 3735 | M: Jonathan Corbet <corbet@lwn.net> |
4353 | M: corbet@lwn.net | ||
4354 | L: linux-media@vger.kernel.org | 3736 | L: linux-media@vger.kernel.org |
4355 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 3737 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
4356 | S: Maintained | 3738 | S: Maintained |
4357 | F: drivers/media/video/ov7670.c | 3739 | F: drivers/media/video/ov7670.c |
4358 | 3740 | ||
4359 | ONENAND FLASH DRIVER | 3741 | ONENAND FLASH DRIVER |
4360 | P: Kyungmin Park | 3742 | M: Kyungmin Park <kyungmin.park@samsung.com> |
4361 | M: kyungmin.park@samsung.com | ||
4362 | L: linux-mtd@lists.infradead.org | 3743 | L: linux-mtd@lists.infradead.org |
4363 | S: Maintained | 3744 | S: Maintained |
4364 | F: drivers/mtd/onenand/ | 3745 | F: drivers/mtd/onenand/ |
4365 | F: include/linux/mtd/onenand*.h | 3746 | F: include/linux/mtd/onenand*.h |
4366 | 3747 | ||
4367 | ONSTREAM SCSI TAPE DRIVER | 3748 | ONSTREAM SCSI TAPE DRIVER |
4368 | P: Willem Riede | 3749 | M: Willem Riede <osst@riede.org> |
4369 | M: osst@riede.org | ||
4370 | L: osst-users@lists.sourceforge.net | 3750 | L: osst-users@lists.sourceforge.net |
4371 | L: linux-scsi@vger.kernel.org | 3751 | L: linux-scsi@vger.kernel.org |
4372 | S: Maintained | 3752 | S: Maintained |
@@ -4374,16 +3754,14 @@ F: drivers/scsi/osst* | |||
4374 | F: drivers/scsi/st* | 3754 | F: drivers/scsi/st* |
4375 | 3755 | ||
4376 | OPENCORES I2C BUS DRIVER | 3756 | OPENCORES I2C BUS DRIVER |
4377 | P: Peter Korsgaard | 3757 | M: Peter Korsgaard <jacmet@sunsite.dk> |
4378 | M: jacmet@sunsite.dk | ||
4379 | L: linux-i2c@vger.kernel.org | 3758 | L: linux-i2c@vger.kernel.org |
4380 | S: Maintained | 3759 | S: Maintained |
4381 | F: Documentation/i2c/busses/i2c-ocores | 3760 | F: Documentation/i2c/busses/i2c-ocores |
4382 | F: drivers/i2c/busses/i2c-ocores.c | 3761 | F: drivers/i2c/busses/i2c-ocores.c |
4383 | 3762 | ||
4384 | OPROFILE | 3763 | OPROFILE |
4385 | P: Robert Richter | 3764 | M: Robert Richter <robert.richter@amd.com> |
4386 | M: robert.richter@amd.com | ||
4387 | L: oprofile-list@lists.sf.net | 3765 | L: oprofile-list@lists.sf.net |
4388 | S: Maintained | 3766 | S: Maintained |
4389 | F: arch/*/oprofile/ | 3767 | F: arch/*/oprofile/ |
@@ -4391,10 +3769,8 @@ F: drivers/oprofile/ | |||
4391 | F: include/linux/oprofile.h | 3769 | F: include/linux/oprofile.h |
4392 | 3770 | ||
4393 | ORACLE CLUSTER FILESYSTEM 2 (OCFS2) | 3771 | ORACLE CLUSTER FILESYSTEM 2 (OCFS2) |
4394 | P: Mark Fasheh | 3772 | M: Mark Fasheh <mfasheh@suse.com> |
4395 | M: mfasheh@suse.com | 3773 | M: Joel Becker <joel.becker@oracle.com> |
4396 | P: Joel Becker | ||
4397 | M: joel.becker@oracle.com | ||
4398 | L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) | 3774 | L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) |
4399 | W: http://oss.oracle.com/projects/ocfs2/ | 3775 | W: http://oss.oracle.com/projects/ocfs2/ |
4400 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git | 3776 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git |
@@ -4404,10 +3780,8 @@ F: Documentation/filesystems/dlmfs.txt | |||
4404 | F: fs/ocfs2/ | 3780 | F: fs/ocfs2/ |
4405 | 3781 | ||
4406 | ORINOCO DRIVER | 3782 | ORINOCO DRIVER |
4407 | P: Pavel Roskin | 3783 | M: Pavel Roskin <proski@gnu.org> |
4408 | M: proski@gnu.org | 3784 | M: David Gibson <hermes@gibson.dropbear.id.au> |
4409 | P: David Gibson | ||
4410 | M: hermes@gibson.dropbear.id.au | ||
4411 | L: linux-wireless@vger.kernel.org | 3785 | L: linux-wireless@vger.kernel.org |
4412 | L: orinoco-users@lists.sourceforge.net | 3786 | L: orinoco-users@lists.sourceforge.net |
4413 | L: orinoco-devel@lists.sourceforge.net | 3787 | L: orinoco-devel@lists.sourceforge.net |
@@ -4416,10 +3790,8 @@ S: Maintained | |||
4416 | F: drivers/net/wireless/orinoco/ | 3790 | F: drivers/net/wireless/orinoco/ |
4417 | 3791 | ||
4418 | OSD LIBRARY and FILESYSTEM | 3792 | OSD LIBRARY and FILESYSTEM |
4419 | P: Boaz Harrosh | 3793 | M: Boaz Harrosh <bharrosh@panasas.com> |
4420 | M: bharrosh@panasas.com | 3794 | M: Benny Halevy <bhalevy@panasas.com> |
4421 | P: Benny Halevy | ||
4422 | M: bhalevy@panasas.com | ||
4423 | L: osd-dev@open-osd.org | 3795 | L: osd-dev@open-osd.org |
4424 | W: http://open-osd.org | 3796 | W: http://open-osd.org |
4425 | T: git git://git.open-osd.org/open-osd.git | 3797 | T: git git://git.open-osd.org/open-osd.git |
@@ -4429,8 +3801,7 @@ F: drivers/include/scsi/osd_* | |||
4429 | F: fs/exofs/ | 3801 | F: fs/exofs/ |
4430 | 3802 | ||
4431 | P54 WIRELESS DRIVER | 3803 | P54 WIRELESS DRIVER |
4432 | P: Michael Wu | 3804 | M: Michael Wu <flamingice@sourmilk.net> |
4433 | M: flamingice@sourmilk.net | ||
4434 | L: linux-wireless@vger.kernel.org | 3805 | L: linux-wireless@vger.kernel.org |
4435 | W: http://prism54.org | 3806 | W: http://prism54.org |
4436 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git | 3807 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git |
@@ -4438,30 +3809,25 @@ S: Maintained | |||
4438 | F: drivers/net/wireless/p54/ | 3809 | F: drivers/net/wireless/p54/ |
4439 | 3810 | ||
4440 | PA SEMI ETHERNET DRIVER | 3811 | PA SEMI ETHERNET DRIVER |
4441 | P: Olof Johansson | 3812 | M: Olof Johansson <olof@lixom.net> |
4442 | M: olof@lixom.net | ||
4443 | L: netdev@vger.kernel.org | 3813 | L: netdev@vger.kernel.org |
4444 | S: Maintained | 3814 | S: Maintained |
4445 | F: drivers/net/pasemi_mac.* | 3815 | F: drivers/net/pasemi_mac.* |
4446 | 3816 | ||
4447 | PA SEMI SMBUS DRIVER | 3817 | PA SEMI SMBUS DRIVER |
4448 | P: Olof Johansson | 3818 | M: Olof Johansson <olof@lixom.net> |
4449 | M: olof@lixom.net | ||
4450 | L: linux-i2c@vger.kernel.org | 3819 | L: linux-i2c@vger.kernel.org |
4451 | S: Maintained | 3820 | S: Maintained |
4452 | F: drivers/i2c/busses/i2c-pasemi.c | 3821 | F: drivers/i2c/busses/i2c-pasemi.c |
4453 | 3822 | ||
4454 | PANASONIC LAPTOP ACPI EXTRAS DRIVER | 3823 | PANASONIC LAPTOP ACPI EXTRAS DRIVER |
4455 | P: Harald Welte | 3824 | M: Harald Welte <laforge@gnumonks.org> |
4456 | M: laforge@gnumonks.org | ||
4457 | S: Maintained | 3825 | S: Maintained |
4458 | F: drivers/platform/x86/panasonic-laptop.c | 3826 | F: drivers/platform/x86/panasonic-laptop.c |
4459 | 3827 | ||
4460 | PANASONIC MN10300/AM33 PORT | 3828 | PANASONIC MN10300/AM33 PORT |
4461 | P: David Howells | 3829 | M: David Howells <dhowells@redhat.com> |
4462 | M: dhowells@redhat.com | 3830 | M: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> |
4463 | P: Koichi Yasutake | ||
4464 | M: yasutake.koichi@jp.panasonic.com | ||
4465 | L: linux-am33-list@redhat.com (moderated for non-subscribers) | 3831 | L: linux-am33-list@redhat.com (moderated for non-subscribers) |
4466 | W: ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/ | 3832 | W: ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/ |
4467 | S: Maintained | 3833 | S: Maintained |
@@ -4477,14 +3843,10 @@ F: drivers/char/ppdev.c | |||
4477 | F: include/linux/ppdev.h | 3843 | F: include/linux/ppdev.h |
4478 | 3844 | ||
4479 | PARAVIRT_OPS INTERFACE | 3845 | PARAVIRT_OPS INTERFACE |
4480 | P: Jeremy Fitzhardinge | 3846 | M: Jeremy Fitzhardinge <jeremy@xensource.com> |
4481 | M: jeremy@xensource.com | 3847 | M: Chris Wright <chrisw@sous-sol.org> |
4482 | P: Chris Wright | 3848 | M: Alok Kataria <akataria@vmware.com> |
4483 | M: chrisw@sous-sol.org | 3849 | M: Rusty Russell <rusty@rustcorp.com.au> |
4484 | P: Alok Kataria | ||
4485 | M: akataria@vmware.com | ||
4486 | P: Rusty Russell | ||
4487 | M: rusty@rustcorp.com.au | ||
4488 | L: virtualization@lists.osdl.org | 3850 | L: virtualization@lists.osdl.org |
4489 | S: Supported | 3851 | S: Supported |
4490 | F: Documentation/ia64/paravirt_ops.txt | 3852 | F: Documentation/ia64/paravirt_ops.txt |
@@ -4492,8 +3854,7 @@ F: arch/*/kernel/paravirt* | |||
4492 | F: arch/*/include/asm/paravirt.h | 3854 | F: arch/*/include/asm/paravirt.h |
4493 | 3855 | ||
4494 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES | 3856 | PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES |
4495 | P: Tim Waugh | 3857 | M: Tim Waugh <tim@cyberelk.net> |
4496 | M: tim@cyberelk.net | ||
4497 | L: linux-parport@lists.infradead.org (subscribers-only) | 3858 | L: linux-parport@lists.infradead.org (subscribers-only) |
4498 | W: http://www.torque.net/linux-pp.html | 3859 | W: http://www.torque.net/linux-pp.html |
4499 | S: Maintained | 3860 | S: Maintained |
@@ -4501,10 +3862,8 @@ F: Documentation/blockdev/paride.txt | |||
4501 | F: drivers/block/paride/ | 3862 | F: drivers/block/paride/ |
4502 | 3863 | ||
4503 | PARISC ARCHITECTURE | 3864 | PARISC ARCHITECTURE |
4504 | P: Kyle McMartin | 3865 | M: Kyle McMartin <kyle@mcmartin.ca> |
4505 | M: kyle@mcmartin.ca | 3866 | M: Helge Deller <deller@gmx.de> |
4506 | P: Helge Deller | ||
4507 | M: deller@gmx.de | ||
4508 | L: linux-parisc@vger.kernel.org | 3867 | L: linux-parisc@vger.kernel.org |
4509 | W: http://www.parisc-linux.org/ | 3868 | W: http://www.parisc-linux.org/ |
4510 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git | 3869 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git |
@@ -4513,37 +3872,32 @@ F: arch/parisc/ | |||
4513 | F: drivers/parisc/ | 3872 | F: drivers/parisc/ |
4514 | 3873 | ||
4515 | PC87360 HARDWARE MONITORING DRIVER | 3874 | PC87360 HARDWARE MONITORING DRIVER |
4516 | P: Jim Cromie | 3875 | M: Jim Cromie <jim.cromie@gmail.com> |
4517 | M: jim.cromie@gmail.com | ||
4518 | L: lm-sensors@lm-sensors.org | 3876 | L: lm-sensors@lm-sensors.org |
4519 | S: Maintained | 3877 | S: Maintained |
4520 | F: Documentation/hwmon/pc87360 | 3878 | F: Documentation/hwmon/pc87360 |
4521 | F: drivers/hwmon/pc87360.c | 3879 | F: drivers/hwmon/pc87360.c |
4522 | 3880 | ||
4523 | PC8736x GPIO DRIVER | 3881 | PC8736x GPIO DRIVER |
4524 | P: Jim Cromie | 3882 | M: Jim Cromie <jim.cromie@gmail.com> |
4525 | M: jim.cromie@gmail.com | ||
4526 | S: Maintained | 3883 | S: Maintained |
4527 | F: drivers/char/pc8736x_gpio.c | 3884 | F: drivers/char/pc8736x_gpio.c |
4528 | 3885 | ||
4529 | PCA9532 LED DRIVER | 3886 | PCA9532 LED DRIVER |
4530 | P: Riku Voipio | 3887 | M: Riku Voipio <riku.voipio@iki.fi> |
4531 | M: riku.voipio@iki.fi | ||
4532 | S: Maintained | 3888 | S: Maintained |
4533 | F: drivers/leds/leds-pca9532.c | 3889 | F: drivers/leds/leds-pca9532.c |
4534 | F: include/linux/leds-pca9532.h | 3890 | F: include/linux/leds-pca9532.h |
4535 | 3891 | ||
4536 | PCI ERROR RECOVERY | 3892 | PCI ERROR RECOVERY |
4537 | P: Linas Vepstas | 3893 | M: Linas Vepstas <linas@austin.ibm.com> |
4538 | M: linas@austin.ibm.com | ||
4539 | L: linux-pci@vger.kernel.org | 3894 | L: linux-pci@vger.kernel.org |
4540 | S: Supported | 3895 | S: Supported |
4541 | F: Documentation/PCI/pci-error-recovery.txt | 3896 | F: Documentation/PCI/pci-error-recovery.txt |
4542 | F: Documentation/powerpc/eeh-pci-error-recovery.txt | 3897 | F: Documentation/powerpc/eeh-pci-error-recovery.txt |
4543 | 3898 | ||
4544 | PCI SUBSYSTEM | 3899 | PCI SUBSYSTEM |
4545 | P: Jesse Barnes | 3900 | M: Jesse Barnes <jbarnes@virtuousgeek.org> |
4546 | M: jbarnes@virtuousgeek.org | ||
4547 | L: linux-pci@vger.kernel.org | 3901 | L: linux-pci@vger.kernel.org |
4548 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git | 3902 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git |
4549 | S: Supported | 3903 | S: Supported |
@@ -4552,8 +3906,7 @@ F: drivers/pci/ | |||
4552 | F: include/linux/pci* | 3906 | F: include/linux/pci* |
4553 | 3907 | ||
4554 | PCIE HOTPLUG DRIVER | 3908 | PCIE HOTPLUG DRIVER |
4555 | P: Kristen Carlson Accardi | 3909 | M: Kristen Carlson Accardi <kristen.c.accardi@intel.com> |
4556 | M: kristen.c.accardi@intel.com | ||
4557 | L: linux-pci@vger.kernel.org | 3910 | L: linux-pci@vger.kernel.org |
4558 | S: Supported | 3911 | S: Supported |
4559 | F: drivers/pci/pcie/ | 3912 | F: drivers/pci/pcie/ |
@@ -4569,121 +3922,103 @@ F: drivers/pcmcia/ | |||
4569 | F: include/pcmcia/ | 3922 | F: include/pcmcia/ |
4570 | 3923 | ||
4571 | PCNET32 NETWORK DRIVER | 3924 | PCNET32 NETWORK DRIVER |
4572 | P: Don Fry | 3925 | M: Don Fry <pcnet32@verizon.net> |
4573 | M: pcnet32@verizon.net | ||
4574 | L: netdev@vger.kernel.org | 3926 | L: netdev@vger.kernel.org |
4575 | S: Maintained | 3927 | S: Maintained |
4576 | F: drivers/net/pcnet32.c | 3928 | F: drivers/net/pcnet32.c |
4577 | 3929 | ||
4578 | PER-TASK DELAY ACCOUNTING | 3930 | PER-TASK DELAY ACCOUNTING |
4579 | P: Balbir Singh | 3931 | M: Balbir Singh <balbir@linux.vnet.ibm.com> |
4580 | M: balbir@linux.vnet.ibm.com | ||
4581 | S: Maintained | 3932 | S: Maintained |
4582 | F: include/linux/delayacct.h | 3933 | F: include/linux/delayacct.h |
4583 | F: kernel/delayacct.c | 3934 | F: kernel/delayacct.c |
4584 | 3935 | ||
4585 | PERFORMANCE COUNTER SUBSYSTEM | 3936 | PERFORMANCE COUNTER SUBSYSTEM |
4586 | P: Peter Zijlstra | 3937 | M: Peter Zijlstra <a.p.zijlstra@chello.nl> |
4587 | M: a.p.zijlstra@chello.nl | 3938 | M: Paul Mackerras <paulus@samba.org> |
4588 | P: Paul Mackerras | 3939 | M: Ingo Molnar <mingo@elte.hu> |
4589 | M: paulus@samba.org | ||
4590 | P: Ingo Molnar | ||
4591 | M: mingo@elte.hu | ||
4592 | L: linux-kernel@vger.kernel.org | ||
4593 | S: Supported | 3940 | S: Supported |
4594 | 3941 | ||
4595 | PERSONALITY HANDLING | 3942 | PERSONALITY HANDLING |
4596 | P: Christoph Hellwig | 3943 | M: Christoph Hellwig <hch@infradead.org> |
4597 | M: hch@infradead.org | ||
4598 | L: linux-abi-devel@lists.sourceforge.net | 3944 | L: linux-abi-devel@lists.sourceforge.net |
4599 | S: Maintained | 3945 | S: Maintained |
4600 | F: include/linux/personality.h | 3946 | F: include/linux/personality.h |
4601 | 3947 | ||
4602 | PHRAM MTD DRIVER | 3948 | PHRAM MTD DRIVER |
4603 | P: Joern Engel | 3949 | M: Joern Engel <joern@lazybastard.org> |
4604 | M: joern@lazybastard.org | ||
4605 | L: linux-mtd@lists.infradead.org | 3950 | L: linux-mtd@lists.infradead.org |
4606 | S: Maintained | 3951 | S: Maintained |
4607 | F: drivers/mtd/devices/phram.c | 3952 | F: drivers/mtd/devices/phram.c |
4608 | 3953 | ||
4609 | PKTCDVD DRIVER | 3954 | PKTCDVD DRIVER |
4610 | P: Peter Osterlund | 3955 | M: Peter Osterlund <petero2@telia.com> |
4611 | M: petero2@telia.com | ||
4612 | S: Maintained | 3956 | S: Maintained |
4613 | F: drivers/block/pktcdvd.c | 3957 | F: drivers/block/pktcdvd.c |
4614 | F: include/linux/pktcdvd.h | 3958 | F: include/linux/pktcdvd.h |
4615 | 3959 | ||
4616 | POSIX CLOCKS and TIMERS | 3960 | POSIX CLOCKS and TIMERS |
4617 | P: Thomas Gleixner | 3961 | M: Thomas Gleixner <tglx@linutronix.de> |
4618 | M: tglx@linutronix.de | ||
4619 | S: Supported | 3962 | S: Supported |
4620 | F: fs/timerfd.c | 3963 | F: fs/timerfd.c |
4621 | F: include/linux/timer* | 3964 | F: include/linux/timer* |
4622 | F: kernel/*timer* | 3965 | F: kernel/*timer* |
4623 | 3966 | ||
4624 | POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS | 3967 | POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS |
4625 | P: Anton Vorontsov | 3968 | M: Anton Vorontsov <cbou@mail.ru> |
4626 | M: cbou@mail.ru | 3969 | M: David Woodhouse <dwmw2@infradead.org> |
4627 | P: David Woodhouse | ||
4628 | M: dwmw2@infradead.org | ||
4629 | T: git git://git.infradead.org/battery-2.6.git | 3970 | T: git git://git.infradead.org/battery-2.6.git |
4630 | S: Maintained | 3971 | S: Maintained |
4631 | F: include/linux/power_supply.h | 3972 | F: include/linux/power_supply.h |
4632 | F: drivers/power/power_supply* | 3973 | F: drivers/power/power_supply* |
4633 | 3974 | ||
4634 | PNP SUPPORT | 3975 | PNP SUPPORT |
4635 | P: Adam Belay | 3976 | M: Adam Belay <abelay@mit.edu> |
4636 | M: abelay@mit.edu | 3977 | M: Bjorn Helgaas <bjorn.helgaas@hp.com> |
4637 | P: Bjorn Helgaas | ||
4638 | M: bjorn.helgaas@hp.com | ||
4639 | S: Maintained | 3978 | S: Maintained |
4640 | F: drivers/pnp/ | 3979 | F: drivers/pnp/ |
4641 | 3980 | ||
4642 | PNXxxxx I2C DRIVER | 3981 | PNXxxxx I2C DRIVER |
4643 | P: Vitaly Wool | 3982 | M: Vitaly Wool <vitalywool@gmail.com> |
4644 | M: vitalywool@gmail.com | ||
4645 | L: linux-i2c@vger.kernel.org | 3983 | L: linux-i2c@vger.kernel.org |
4646 | S: Maintained | 3984 | S: Maintained |
4647 | F: drivers/i2c/busses/i2c-pnx.c | 3985 | F: drivers/i2c/busses/i2c-pnx.c |
4648 | 3986 | ||
4649 | PPP PROTOCOL DRIVERS AND COMPRESSORS | 3987 | PPP PROTOCOL DRIVERS AND COMPRESSORS |
4650 | P: Paul Mackerras | 3988 | M: Paul Mackerras <paulus@samba.org> |
4651 | M: paulus@samba.org | ||
4652 | L: linux-ppp@vger.kernel.org | 3989 | L: linux-ppp@vger.kernel.org |
4653 | S: Maintained | 3990 | S: Maintained |
4654 | F: drivers/net/ppp_* | 3991 | F: drivers/net/ppp_* |
4655 | 3992 | ||
4656 | PPP OVER ATM (RFC 2364) | 3993 | PPP OVER ATM (RFC 2364) |
4657 | P: Mitchell Blank Jr | 3994 | M: Mitchell Blank Jr <mitch@sfgoth.com> |
4658 | M: mitch@sfgoth.com | ||
4659 | S: Maintained | 3995 | S: Maintained |
4660 | F: net/atm/pppoatm.c | 3996 | F: net/atm/pppoatm.c |
4661 | F: include/linux/atmppp.h | 3997 | F: include/linux/atmppp.h |
4662 | 3998 | ||
4663 | PPP OVER ETHERNET | 3999 | PPP OVER ETHERNET |
4664 | P: Michal Ostrowski | 4000 | M: Michal Ostrowski <mostrows@earthlink.net> |
4665 | M: mostrows@earthlink.net | ||
4666 | S: Maintained | 4001 | S: Maintained |
4667 | F: drivers/net/pppoe.c | 4002 | F: drivers/net/pppoe.c |
4668 | F: drivers/net/pppox.c | 4003 | F: drivers/net/pppox.c |
4669 | 4004 | ||
4670 | PPP OVER L2TP | 4005 | PPP OVER L2TP |
4671 | P: James Chapman | 4006 | M: James Chapman <jchapman@katalix.com> |
4672 | M: jchapman@katalix.com | ||
4673 | S: Maintained | 4007 | S: Maintained |
4674 | F: drivers/net/pppol2tp.c | 4008 | F: drivers/net/pppol2tp.c |
4675 | F: include/linux/if_pppol2tp.h | 4009 | F: include/linux/if_pppol2tp.h |
4676 | 4010 | ||
4677 | PPS SUPPORT | 4011 | PPS SUPPORT |
4678 | P: Rodolfo Giometti | 4012 | M: Rodolfo Giometti <giometti@enneenne.com> |
4679 | M: giometti@enneenne.com | ||
4680 | W: http://wiki.enneenne.com/index.php/LinuxPPS_support | 4013 | W: http://wiki.enneenne.com/index.php/LinuxPPS_support |
4681 | L: linuxpps@ml.enneenne.com (subscribers-only) | 4014 | L: linuxpps@ml.enneenne.com (subscribers-only) |
4682 | S: Maintained | 4015 | S: Maintained |
4016 | F: Documentation/pps/ | ||
4017 | F: drivers/pps/ | ||
4018 | F: include/linux/pps*.h | ||
4683 | 4019 | ||
4684 | PREEMPTIBLE KERNEL | 4020 | PREEMPTIBLE KERNEL |
4685 | P: Robert Love | 4021 | M: Robert Love <rml@tech9.net> |
4686 | M: rml@tech9.net | ||
4687 | L: kpreempt-tech@lists.sourceforge.net | 4022 | L: kpreempt-tech@lists.sourceforge.net |
4688 | W: ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel | 4023 | W: ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel |
4689 | S: Supported | 4024 | S: Supported |
@@ -4691,37 +4026,32 @@ F: Documentation/preempt-locking.txt | |||
4691 | F: include/linux/preempt.h | 4026 | F: include/linux/preempt.h |
4692 | 4027 | ||
4693 | PRISM54 WIRELESS DRIVER | 4028 | PRISM54 WIRELESS DRIVER |
4694 | P: Luis R. Rodriguez | 4029 | M: "Luis R. Rodriguez" <mcgrof@gmail.com> |
4695 | M: mcgrof@gmail.com | ||
4696 | L: linux-wireless@vger.kernel.org | 4030 | L: linux-wireless@vger.kernel.org |
4697 | W: http://prism54.org | 4031 | W: http://prism54.org |
4698 | S: Maintained | 4032 | S: Maintained |
4699 | F: drivers/net/wireless/prism54/ | 4033 | F: drivers/net/wireless/prism54/ |
4700 | 4034 | ||
4701 | PROMISE DC4030 CACHING DISK CONTROLLER DRIVER | 4035 | PROMISE DC4030 CACHING DISK CONTROLLER DRIVER |
4702 | P: Peter Denison | 4036 | M: Peter Denison <promise@pnd-pc.demon.co.uk> |
4703 | M: promise@pnd-pc.demon.co.uk | ||
4704 | W: http://www.pnd-pc.demon.co.uk/promise/ | 4037 | W: http://www.pnd-pc.demon.co.uk/promise/ |
4705 | S: Maintained | 4038 | S: Maintained |
4706 | 4039 | ||
4707 | PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER | 4040 | PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER |
4708 | P: Mikael Pettersson | 4041 | M: Mikael Pettersson <mikpe@it.uu.se> |
4709 | M: mikpe@it.uu.se | ||
4710 | L: linux-ide@vger.kernel.org | 4042 | L: linux-ide@vger.kernel.org |
4711 | S: Maintained | 4043 | S: Maintained |
4712 | F: drivers/ata/sata_promise.* | 4044 | F: drivers/ata/sata_promise.* |
4713 | 4045 | ||
4714 | PS3 NETWORK SUPPORT | 4046 | PS3 NETWORK SUPPORT |
4715 | P: Geoff Levand | 4047 | M: Geoff Levand <geoffrey.levand@am.sony.com> |
4716 | M: geoffrey.levand@am.sony.com | ||
4717 | L: netdev@vger.kernel.org | 4048 | L: netdev@vger.kernel.org |
4718 | L: cbe-oss-dev@ozlabs.org | 4049 | L: cbe-oss-dev@ozlabs.org |
4719 | S: Supported | 4050 | S: Supported |
4720 | F: drivers/net/ps3_gelic_net.* | 4051 | F: drivers/net/ps3_gelic_net.* |
4721 | 4052 | ||
4722 | PS3 PLATFORM SUPPORT | 4053 | PS3 PLATFORM SUPPORT |
4723 | P: Geoff Levand | 4054 | M: Geoff Levand <geoffrey.levand@am.sony.com> |
4724 | M: geoffrey.levand@am.sony.com | ||
4725 | L: linuxppc-dev@ozlabs.org | 4055 | L: linuxppc-dev@ozlabs.org |
4726 | L: cbe-oss-dev@ozlabs.org | 4056 | L: cbe-oss-dev@ozlabs.org |
4727 | S: Supported | 4057 | S: Supported |
@@ -4736,16 +4066,13 @@ F: drivers/usb/host/*ps3.c | |||
4736 | F: sound/ppc/snd_ps3* | 4066 | F: sound/ppc/snd_ps3* |
4737 | 4067 | ||
4738 | PS3VRAM DRIVER | 4068 | PS3VRAM DRIVER |
4739 | P: Jim Paris | 4069 | M: Jim Paris <jim@jtan.com> |
4740 | M: jim@jtan.com | ||
4741 | L: cbe-oss-dev@ozlabs.org | 4070 | L: cbe-oss-dev@ozlabs.org |
4742 | S: Maintained | 4071 | S: Maintained |
4743 | 4072 | ||
4744 | PTRACE SUPPORT | 4073 | PTRACE SUPPORT |
4745 | P: Roland McGrath | 4074 | M: Roland McGrath <roland@redhat.com> |
4746 | M: roland@redhat.com | 4075 | M: Oleg Nesterov <oleg@redhat.com> |
4747 | P: Oleg Nesterov | ||
4748 | M: oleg@redhat.com | ||
4749 | S: Maintained | 4076 | S: Maintained |
4750 | F: include/asm-generic/syscall.h | 4077 | F: include/asm-generic/syscall.h |
4751 | F: include/linux/ptrace.h | 4078 | F: include/linux/ptrace.h |
@@ -4754,8 +4081,7 @@ F: include/linux/tracehook.h | |||
4754 | F: kernel/ptrace.c | 4081 | F: kernel/ptrace.c |
4755 | 4082 | ||
4756 | PVRUSB2 VIDEO4LINUX DRIVER | 4083 | PVRUSB2 VIDEO4LINUX DRIVER |
4757 | P: Mike Isely | 4084 | M: Mike Isely <isely@pobox.com> |
4758 | M: isely@pobox.com | ||
4759 | L: pvrusb2@isely.net (subscribers-only) | 4085 | L: pvrusb2@isely.net (subscribers-only) |
4760 | L: linux-media@vger.kernel.org | 4086 | L: linux-media@vger.kernel.org |
4761 | W: http://www.isely.net/pvrusb2/ | 4087 | W: http://www.isely.net/pvrusb2/ |
@@ -4765,10 +4091,8 @@ F: Documentation/video4linux/README.pvrusb2 | |||
4765 | F: drivers/media/video/pvrusb2/ | 4091 | F: drivers/media/video/pvrusb2/ |
4766 | 4092 | ||
4767 | PXA2xx/PXA3xx SUPPORT | 4093 | PXA2xx/PXA3xx SUPPORT |
4768 | P: Eric Miao | 4094 | M: Eric Miao <eric.y.miao@gmail.com> |
4769 | M: eric.y.miao@gmail.com | 4095 | M: Russell King <linux@arm.linux.org.uk> |
4770 | P: Russell King | ||
4771 | M: linux@arm.linux.org.uk | ||
4772 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 4096 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
4773 | S: Maintained | 4097 | S: Maintained |
4774 | F: arch/arm/mach-pxa/ | 4098 | F: arch/arm/mach-pxa/ |
@@ -4780,17 +4104,14 @@ F: sound/arm/pxa* | |||
4780 | F: sound/soc/pxa | 4104 | F: sound/soc/pxa |
4781 | 4105 | ||
4782 | PXA168 SUPPORT | 4106 | PXA168 SUPPORT |
4783 | P: Eric Miao | 4107 | M: Eric Miao <eric.y.miao@gmail.com> |
4784 | M: eric.y.miao@gmail.com | 4108 | M: Jason Chagas <jason.chagas@marvell.com> |
4785 | P: Jason Chagas | ||
4786 | M: jason.chagas@marvell.com | ||
4787 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 4109 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
4788 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git | 4110 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git |
4789 | S: Maintained | 4111 | S: Maintained |
4790 | 4112 | ||
4791 | PXA910 SUPPORT | 4113 | PXA910 SUPPORT |
4792 | P: Eric Miao | 4114 | M: Eric Miao <eric.y.miao@gmail.com> |
4793 | M: eric.y.miao@gmail.com | ||
4794 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 4115 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
4795 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git | 4116 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6.git |
4796 | S: Maintained | 4117 | S: Maintained |
@@ -4799,13 +4120,12 @@ PXA MMCI DRIVER | |||
4799 | S: Orphan | 4120 | S: Orphan |
4800 | 4121 | ||
4801 | PXA RTC DRIVER | 4122 | PXA RTC DRIVER |
4802 | P: Robert Jarzmik | 4123 | M: Robert Jarzmik <robert.jarzmik@free.fr> |
4803 | M: robert.jarzmik@free.fr | ||
4804 | L: rtc-linux@googlegroups.com | 4124 | L: rtc-linux@googlegroups.com |
4805 | S: Maintained | 4125 | S: Maintained |
4806 | 4126 | ||
4807 | QLOGIC QLA2XXX FC-SCSI DRIVER | 4127 | QLOGIC QLA2XXX FC-SCSI DRIVER |
4808 | P: Andrew Vasquez | 4128 | M: Andrew Vasquez <andrew.vasquez@qlogic.com> |
4809 | M: linux-driver@qlogic.com | 4129 | M: linux-driver@qlogic.com |
4810 | L: linux-scsi@vger.kernel.org | 4130 | L: linux-scsi@vger.kernel.org |
4811 | S: Supported | 4131 | S: Supported |
@@ -4813,7 +4133,7 @@ F: Documentation/scsi/LICENSE.qla2xxx | |||
4813 | F: drivers/scsi/qla2xxx/ | 4133 | F: drivers/scsi/qla2xxx/ |
4814 | 4134 | ||
4815 | QLOGIC QLA3XXX NETWORK DRIVER | 4135 | QLOGIC QLA3XXX NETWORK DRIVER |
4816 | P: Ron Mercer | 4136 | M: Ron Mercer <ron.mercer@qlogic.com> |
4817 | M: linux-driver@qlogic.com | 4137 | M: linux-driver@qlogic.com |
4818 | L: netdev@vger.kernel.org | 4138 | L: netdev@vger.kernel.org |
4819 | S: Supported | 4139 | S: Supported |
@@ -4821,16 +4141,14 @@ F: Documentation/networking/LICENSE.qla3xxx | |||
4821 | F: drivers/net/qla3xxx.* | 4141 | F: drivers/net/qla3xxx.* |
4822 | 4142 | ||
4823 | QLOGIC QLGE 10Gb ETHERNET DRIVER | 4143 | QLOGIC QLGE 10Gb ETHERNET DRIVER |
4824 | P: Ron Mercer | 4144 | M: Ron Mercer <ron.mercer@qlogic.com> |
4825 | M: linux-driver@qlogic.com | 4145 | M: linux-driver@qlogic.com |
4826 | M: ron.mercer@qlogic.com | ||
4827 | L: netdev@vger.kernel.org | 4146 | L: netdev@vger.kernel.org |
4828 | S: Supported | 4147 | S: Supported |
4829 | F: drivers/net/qlge/ | 4148 | F: drivers/net/qlge/ |
4830 | 4149 | ||
4831 | QNX4 FILESYSTEM | 4150 | QNX4 FILESYSTEM |
4832 | P: Anders Larsen | 4151 | M: Anders Larsen <al@alarsen.net> |
4833 | M: al@alarsen.net | ||
4834 | W: http://www.alarsen.net/linux/qnx4fs/ | 4152 | W: http://www.alarsen.net/linux/qnx4fs/ |
4835 | S: Maintained | 4153 | S: Maintained |
4836 | F: fs/qnx4/ | 4154 | F: fs/qnx4/ |
@@ -4838,16 +4156,14 @@ F: include/linux/qnx4_fs.h | |||
4838 | F: include/linux/qnxtypes.h | 4156 | F: include/linux/qnxtypes.h |
4839 | 4157 | ||
4840 | RADEON FRAMEBUFFER DISPLAY DRIVER | 4158 | RADEON FRAMEBUFFER DISPLAY DRIVER |
4841 | P: Benjamin Herrenschmidt | 4159 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
4842 | M: benh@kernel.crashing.org | ||
4843 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 4160 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
4844 | S: Maintained | 4161 | S: Maintained |
4845 | F: drivers/video/aty/radeon* | 4162 | F: drivers/video/aty/radeon* |
4846 | F: include/linux/radeonfb.h | 4163 | F: include/linux/radeonfb.h |
4847 | 4164 | ||
4848 | RAGE128 FRAMEBUFFER DISPLAY DRIVER | 4165 | RAGE128 FRAMEBUFFER DISPLAY DRIVER |
4849 | P: Paul Mackerras | 4166 | M: Paul Mackerras <paulus@samba.org> |
4850 | M: paulus@samba.org | ||
4851 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 4167 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
4852 | S: Maintained | 4168 | S: Maintained |
4853 | F: drivers/video/aty/aty128fb.c | 4169 | F: drivers/video/aty/aty128fb.c |
@@ -4862,64 +4178,53 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/ivd/rt2x00.git | |||
4862 | F: drivers/net/wireless/rt2x00/ | 4178 | F: drivers/net/wireless/rt2x00/ |
4863 | 4179 | ||
4864 | RAMDISK RAM BLOCK DEVICE DRIVER | 4180 | RAMDISK RAM BLOCK DEVICE DRIVER |
4865 | P: Nick Piggin | 4181 | M: Nick Piggin <npiggin@suse.de> |
4866 | M: npiggin@suse.de | ||
4867 | S: Maintained | 4182 | S: Maintained |
4868 | F: Documentation/blockdev/ramdisk.txt | 4183 | F: Documentation/blockdev/ramdisk.txt |
4869 | F: drivers/block/brd.c | 4184 | F: drivers/block/brd.c |
4870 | 4185 | ||
4871 | RANDOM NUMBER DRIVER | 4186 | RANDOM NUMBER DRIVER |
4872 | P: Matt Mackall | 4187 | M: Matt Mackall <mpm@selenic.com> |
4873 | M: mpm@selenic.com | ||
4874 | S: Maintained | 4188 | S: Maintained |
4875 | F: drivers/char/random.c | 4189 | F: drivers/char/random.c |
4876 | 4190 | ||
4877 | RAPIDIO SUBSYSTEM | 4191 | RAPIDIO SUBSYSTEM |
4878 | P: Matt Porter | 4192 | M: Matt Porter <mporter@kernel.crashing.org> |
4879 | M: mporter@kernel.crashing.org | ||
4880 | S: Maintained | 4193 | S: Maintained |
4881 | F: drivers/rapidio/ | 4194 | F: drivers/rapidio/ |
4882 | 4195 | ||
4883 | RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER | 4196 | RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER |
4884 | P: Corey Thomas | 4197 | M: Corey Thomas <coreythomas@charter.net> |
4885 | M: coreythomas@charter.net | ||
4886 | L: linux-wireless@vger.kernel.org | 4198 | L: linux-wireless@vger.kernel.org |
4887 | S: Maintained | 4199 | S: Maintained |
4888 | F: drivers/net/wireless/ray* | 4200 | F: drivers/net/wireless/ray* |
4889 | 4201 | ||
4890 | RCUTORTURE MODULE | 4202 | RCUTORTURE MODULE |
4891 | P: Josh Triplett | 4203 | M: Josh Triplett <josh@freedesktop.org> |
4892 | M: josh@freedesktop.org | 4204 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
4893 | P: Paul E. McKenney | ||
4894 | M: paulmck@linux.vnet.ibm.com | ||
4895 | S: Maintained | 4205 | S: Maintained |
4896 | F: Documentation/RCU/torture.txt | 4206 | F: Documentation/RCU/torture.txt |
4897 | F: kernel/rcutorture.c | 4207 | F: kernel/rcutorture.c |
4898 | 4208 | ||
4899 | RDC R-321X SoC | 4209 | RDC R-321X SoC |
4900 | P: Florian Fainelli | 4210 | M: Florian Fainelli <florian@openwrt.org> |
4901 | M: florian@openwrt.org | ||
4902 | S: Maintained | 4211 | S: Maintained |
4903 | 4212 | ||
4904 | RDC R6040 FAST ETHERNET DRIVER | 4213 | RDC R6040 FAST ETHERNET DRIVER |
4905 | P: Florian Fainelli | 4214 | M: Florian Fainelli <florian@openwrt.org> |
4906 | M: florian@openwrt.org | ||
4907 | L: netdev@vger.kernel.org | 4215 | L: netdev@vger.kernel.org |
4908 | S: Maintained | 4216 | S: Maintained |
4909 | F: drivers/net/r6040.c | 4217 | F: drivers/net/r6040.c |
4910 | 4218 | ||
4911 | RDS - RELIABLE DATAGRAM SOCKETS | 4219 | RDS - RELIABLE DATAGRAM SOCKETS |
4912 | P: Andy Grover | 4220 | M: Andy Grover <andy.grover@oracle.com> |
4913 | M: andy.grover@oracle.com | ||
4914 | L: rds-devel@oss.oracle.com (moderated for non-subscribers) | 4221 | L: rds-devel@oss.oracle.com (moderated for non-subscribers) |
4915 | S: Supported | 4222 | S: Supported |
4916 | F: net/rds/ | 4223 | F: net/rds/ |
4917 | 4224 | ||
4918 | READ-COPY UPDATE (RCU) | 4225 | READ-COPY UPDATE (RCU) |
4919 | P: Dipankar Sarma | 4226 | M: Dipankar Sarma <dipankar@in.ibm.com> |
4920 | M: dipankar@in.ibm.com | 4227 | M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> |
4921 | P: Paul E. McKenney | ||
4922 | M: paulmck@linux.vnet.ibm.com | ||
4923 | W: http://www.rdrop.com/users/paulmck/rclock/ | 4228 | W: http://www.rdrop.com/users/paulmck/rclock/ |
4924 | S: Supported | 4229 | S: Supported |
4925 | F: Documentation/RCU/rcu.txt | 4230 | F: Documentation/RCU/rcu.txt |
@@ -4929,16 +4234,14 @@ F: include/linux/srcu.h | |||
4929 | F: kernel/rcupdate.c | 4234 | F: kernel/rcupdate.c |
4930 | 4235 | ||
4931 | REAL TIME CLOCK DRIVER | 4236 | REAL TIME CLOCK DRIVER |
4932 | P: Paul Gortmaker | 4237 | M: Paul Gortmaker <p_gortmaker@yahoo.com> |
4933 | M: p_gortmaker@yahoo.com | ||
4934 | S: Maintained | 4238 | S: Maintained |
4935 | F: Documentation/rtc.txt | 4239 | F: Documentation/rtc.txt |
4936 | F: drivers/rtc/ | 4240 | F: drivers/rtc/ |
4937 | F: include/linux/rtc.h | 4241 | F: include/linux/rtc.h |
4938 | 4242 | ||
4939 | REAL TIME CLOCK (RTC) SUBSYSTEM | 4243 | REAL TIME CLOCK (RTC) SUBSYSTEM |
4940 | P: Alessandro Zummo | 4244 | M: Alessandro Zummo <a.zummo@towertech.it> |
4941 | M: a.zummo@towertech.it | ||
4942 | L: rtc-linux@googlegroups.com | 4245 | L: rtc-linux@googlegroups.com |
4943 | S: Maintained | 4246 | S: Maintained |
4944 | F: Documentation/rtc.txt | 4247 | F: Documentation/rtc.txt |
@@ -4951,8 +4254,7 @@ S: Supported | |||
4951 | F: fs/reiserfs/ | 4254 | F: fs/reiserfs/ |
4952 | 4255 | ||
4953 | RFKILL | 4256 | RFKILL |
4954 | P: Johannes Berg | 4257 | M: Johannes Berg <johannes@sipsolutions.net> |
4955 | M: johannes@sipsolutions.net | ||
4956 | L: linux-wireless@vger.kernel.org | 4258 | L: linux-wireless@vger.kernel.org |
4957 | S: Maintained | 4259 | S: Maintained |
4958 | F Documentation/rfkill.txt | 4260 | F Documentation/rfkill.txt |
@@ -4971,8 +4273,7 @@ F: Documentation/serial/rocket.txt | |||
4971 | F: drivers/char/rocket* | 4273 | F: drivers/char/rocket* |
4972 | 4274 | ||
4973 | ROSE NETWORK LAYER | 4275 | ROSE NETWORK LAYER |
4974 | P: Ralf Baechle | 4276 | M: Ralf Baechle <ralf@linux-mips.org> |
4975 | M: ralf@linux-mips.org | ||
4976 | L: linux-hams@vger.kernel.org | 4277 | L: linux-hams@vger.kernel.org |
4977 | W: http://www.linux-ax25.org/ | 4278 | W: http://www.linux-ax25.org/ |
4978 | S: Maintained | 4279 | S: Maintained |
@@ -4981,8 +4282,7 @@ F: include/net/rose.h | |||
4981 | F: net/rose/ | 4282 | F: net/rose/ |
4982 | 4283 | ||
4983 | RTL8180 WIRELESS DRIVER | 4284 | RTL8180 WIRELESS DRIVER |
4984 | P: John W. Linville | 4285 | M: "John W. Linville" <linville@tuxdriver.com> |
4985 | M: linville@tuxdriver.com | ||
4986 | L: linux-wireless@vger.kernel.org | 4286 | L: linux-wireless@vger.kernel.org |
4987 | W: http://linuxwireless.org/ | 4287 | W: http://linuxwireless.org/ |
4988 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git | 4288 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git |
@@ -4990,12 +4290,9 @@ S: Maintained | |||
4990 | F: drivers/net/wireless/rtl818* | 4290 | F: drivers/net/wireless/rtl818* |
4991 | 4291 | ||
4992 | RTL8187 WIRELESS DRIVER | 4292 | RTL8187 WIRELESS DRIVER |
4993 | P: Herton Ronaldo Krzesinski | 4293 | M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> |
4994 | M: herton@mandriva.com.br | 4294 | M: Hin-Tak Leung <htl10@users.sourceforge.net> |
4995 | P: Hin-Tak Leung | 4295 | M: Larry Finger <Larry.Finger@lwfinger.net> |
4996 | M: htl10@users.sourceforge.net | ||
4997 | P: Larry Finger | ||
4998 | M: Larry.Finger@lwfinger.net | ||
4999 | L: linux-wireless@vger.kernel.org | 4296 | L: linux-wireless@vger.kernel.org |
5000 | W: http://linuxwireless.org/ | 4297 | W: http://linuxwireless.org/ |
5001 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git | 4298 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git |
@@ -5003,17 +4300,14 @@ S: Maintained | |||
5003 | F: drivers/net/wireless/rtl818x/rtl8187* | 4300 | F: drivers/net/wireless/rtl818x/rtl8187* |
5004 | 4301 | ||
5005 | S3 SAVAGE FRAMEBUFFER DRIVER | 4302 | S3 SAVAGE FRAMEBUFFER DRIVER |
5006 | P: Antonino Daplas | 4303 | M: Antonino Daplas <adaplas@gmail.com> |
5007 | M: adaplas@gmail.com | ||
5008 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 4304 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
5009 | S: Maintained | 4305 | S: Maintained |
5010 | F: drivers/video/savage/ | 4306 | F: drivers/video/savage/ |
5011 | 4307 | ||
5012 | S390 | 4308 | S390 |
5013 | P: Martin Schwidefsky | 4309 | M: Martin Schwidefsky <schwidefsky@de.ibm.com> |
5014 | M: schwidefsky@de.ibm.com | 4310 | M: Heiko Carstens <heiko.carstens@de.ibm.com> |
5015 | P: Heiko Carstens | ||
5016 | M: heiko.carstens@de.ibm.com | ||
5017 | M: linux390@de.ibm.com | 4311 | M: linux390@de.ibm.com |
5018 | L: linux-s390@vger.kernel.org | 4312 | L: linux-s390@vger.kernel.org |
5019 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4313 | W: http://www.ibm.com/developerworks/linux/linux390/ |
@@ -5021,10 +4315,8 @@ S: Supported | |||
5021 | F: arch/s390/ | 4315 | F: arch/s390/ |
5022 | 4316 | ||
5023 | S390 NETWORK DRIVERS | 4317 | S390 NETWORK DRIVERS |
5024 | P: Ursula Braun | 4318 | M: Ursula Braun <ursula.braun@de.ibm.com> |
5025 | M: ursula.braun@de.ibm.com | 4319 | M: Frank Blaschka <blaschka@linux.vnet.ibm.com> |
5026 | P: Frank Blaschka | ||
5027 | M: blaschka@linux.vnet.ibm.com | ||
5028 | M: linux390@de.ibm.com | 4320 | M: linux390@de.ibm.com |
5029 | L: linux-s390@vger.kernel.org | 4321 | L: linux-s390@vger.kernel.org |
5030 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4322 | W: http://www.ibm.com/developerworks/linux/linux390/ |
@@ -5032,20 +4324,16 @@ S: Supported | |||
5032 | F: drivers/s390/net/ | 4324 | F: drivers/s390/net/ |
5033 | 4325 | ||
5034 | S390 ZCRYPT DRIVER | 4326 | S390 ZCRYPT DRIVER |
5035 | P: Felix Beck | 4327 | M: Felix Beck <felix.beck@de.ibm.com> |
5036 | M: felix.beck@de.ibm.com | 4328 | M: Ralph Wuerthner <ralph.wuerthner@de.ibm.com> |
5037 | P: Ralph Wuerthner | ||
5038 | M: ralph.wuerthner@de.ibm.com | ||
5039 | M: linux390@de.ibm.com | 4329 | M: linux390@de.ibm.com |
5040 | L: linux-s390@vger.kernel.org | 4330 | L: linux-s390@vger.kernel.org |
5041 | S: Supported | 4331 | S: Supported |
5042 | F: drivers/s390/crypto/ | 4332 | F: drivers/s390/crypto/ |
5043 | 4333 | ||
5044 | S390 ZFCP DRIVER | 4334 | S390 ZFCP DRIVER |
5045 | P: Christof Schmitt | 4335 | M: Christof Schmitt <christof.schmitt@de.ibm.com> |
5046 | M: christof.schmitt@de.ibm.com | 4336 | M: Martin Peschke <mp3@de.ibm.com> |
5047 | P: Martin Peschke | ||
5048 | M: mp3@de.ibm.com | ||
5049 | M: linux390@de.ibm.com | 4337 | M: linux390@de.ibm.com |
5050 | L: linux-s390@vger.kernel.org | 4338 | L: linux-s390@vger.kernel.org |
5051 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4339 | W: http://www.ibm.com/developerworks/linux/linux390/ |
@@ -5054,8 +4342,7 @@ F: Documentation/s390/zfcpdump.txt | |||
5054 | F: drivers/s390/scsi/zfcp_* | 4342 | F: drivers/s390/scsi/zfcp_* |
5055 | 4343 | ||
5056 | S390 IUCV NETWORK LAYER | 4344 | S390 IUCV NETWORK LAYER |
5057 | P: Ursula Braun | 4345 | M: Ursula Braun <ursula.braun@de.ibm.com> |
5058 | M: ursula.braun@de.ibm.com | ||
5059 | M: linux390@de.ibm.com | 4346 | M: linux390@de.ibm.com |
5060 | L: linux-s390@vger.kernel.org | 4347 | L: linux-s390@vger.kernel.org |
5061 | W: http://www.ibm.com/developerworks/linux/linux390/ | 4348 | W: http://www.ibm.com/developerworks/linux/linux390/ |
@@ -5065,15 +4352,13 @@ F: include/net/iucv/ | |||
5065 | F: net/iucv/ | 4352 | F: net/iucv/ |
5066 | 4353 | ||
5067 | S3C24XX SD/MMC Driver | 4354 | S3C24XX SD/MMC Driver |
5068 | P: Ben Dooks | 4355 | M: Ben Dooks <ben-linux@fluff.org> |
5069 | M: ben-linux@fluff.org | ||
5070 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 4356 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
5071 | S: Supported | 4357 | S: Supported |
5072 | F: drivers/mmc/host/s3cmci.* | 4358 | F: drivers/mmc/host/s3cmci.* |
5073 | 4359 | ||
5074 | SAA7146 VIDEO4LINUX-2 DRIVER | 4360 | SAA7146 VIDEO4LINUX-2 DRIVER |
5075 | P: Michael Hunold | 4361 | M: Michael Hunold <michael@mihu.de> |
5076 | M: michael@mihu.de | ||
5077 | L: linux-media@vger.kernel.org | 4362 | L: linux-media@vger.kernel.org |
5078 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 4363 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
5079 | W: http://www.mihu.de/linux/saa7146 | 4364 | W: http://www.mihu.de/linux/saa7146 |
@@ -5083,31 +4368,26 @@ F: drivers/media/video/*7146* | |||
5083 | F: include/media/*7146* | 4368 | F: include/media/*7146* |
5084 | 4369 | ||
5085 | SC1200 WDT DRIVER | 4370 | SC1200 WDT DRIVER |
5086 | P: Zwane Mwaikambo | 4371 | M: Zwane Mwaikambo <zwane@arm.linux.org.uk> |
5087 | M: zwane@arm.linux.org.uk | ||
5088 | S: Maintained | 4372 | S: Maintained |
5089 | F: drivers/watchdog/sc1200wdt.c | 4373 | F: drivers/watchdog/sc1200wdt.c |
5090 | 4374 | ||
5091 | SCHEDULER | 4375 | SCHEDULER |
5092 | P: Ingo Molnar | 4376 | M: Ingo Molnar <mingo@elte.hu> |
5093 | M: mingo@elte.hu | 4377 | M: Peter Zijlstra <peterz@infradead.org> |
5094 | P: Peter Zijlstra | ||
5095 | M: peterz@infradead.org | ||
5096 | S: Maintained | 4378 | S: Maintained |
5097 | F: kernel/sched* | 4379 | F: kernel/sched* |
5098 | F: include/linux/sched.h | 4380 | F: include/linux/sched.h |
5099 | 4381 | ||
5100 | SCSI CDROM DRIVER | 4382 | SCSI CDROM DRIVER |
5101 | P: Jens Axboe | 4383 | M: Jens Axboe <axboe@kernel.dk> |
5102 | M: axboe@kernel.dk | ||
5103 | L: linux-scsi@vger.kernel.org | 4384 | L: linux-scsi@vger.kernel.org |
5104 | W: http://www.kernel.dk | 4385 | W: http://www.kernel.dk |
5105 | S: Maintained | 4386 | S: Maintained |
5106 | F: drivers/scsi/sr* | 4387 | F: drivers/scsi/sr* |
5107 | 4388 | ||
5108 | SCSI SG DRIVER | 4389 | SCSI SG DRIVER |
5109 | P: Doug Gilbert | 4390 | M: Doug Gilbert <dgilbert@interlog.com> |
5110 | M: dgilbert@interlog.com | ||
5111 | L: linux-scsi@vger.kernel.org | 4391 | L: linux-scsi@vger.kernel.org |
5112 | W: http://www.torque.net/sg | 4392 | W: http://www.torque.net/sg |
5113 | S: Maintained | 4393 | S: Maintained |
@@ -5115,8 +4395,7 @@ F: drivers/scsi/sg.c | |||
5115 | F: include/scsi/sg.h | 4395 | F: include/scsi/sg.h |
5116 | 4396 | ||
5117 | SCSI SUBSYSTEM | 4397 | SCSI SUBSYSTEM |
5118 | P: James E.J. Bottomley | 4398 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
5119 | M: James.Bottomley@HansenPartnership.com | ||
5120 | L: linux-scsi@vger.kernel.org | 4399 | L: linux-scsi@vger.kernel.org |
5121 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git | 4400 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git |
5122 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git | 4401 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git |
@@ -5126,18 +4405,15 @@ F: drivers/scsi/ | |||
5126 | F: include/scsi/ | 4405 | F: include/scsi/ |
5127 | 4406 | ||
5128 | SCSI TAPE DRIVER | 4407 | SCSI TAPE DRIVER |
5129 | P: Kai Mäkisara | 4408 | M: Kai Mäkisara <Kai.Makisara@kolumbus.fi> |
5130 | M: Kai.Makisara@kolumbus.fi | ||
5131 | L: linux-scsi@vger.kernel.org | 4409 | L: linux-scsi@vger.kernel.org |
5132 | S: Maintained | 4410 | S: Maintained |
5133 | F: Documentation/scsi/st.txt | 4411 | F: Documentation/scsi/st.txt |
5134 | F: drivers/scsi/st* | 4412 | F: drivers/scsi/st* |
5135 | 4413 | ||
5136 | SCTP PROTOCOL | 4414 | SCTP PROTOCOL |
5137 | P: Vlad Yasevich | 4415 | M: Vlad Yasevich <vladislav.yasevich@hp.com> |
5138 | M: vladislav.yasevich@hp.com | 4416 | M: Sridhar Samudrala <sri@us.ibm.com> |
5139 | P: Sridhar Samudrala | ||
5140 | M: sri@us.ibm.com | ||
5141 | L: linux-sctp@vger.kernel.org | 4417 | L: linux-sctp@vger.kernel.org |
5142 | W: http://lksctp.sourceforge.net | 4418 | W: http://lksctp.sourceforge.net |
5143 | S: Supported | 4419 | S: Supported |
@@ -5147,8 +4423,7 @@ F: include/net/sctp/ | |||
5147 | F: net/sctp/ | 4423 | F: net/sctp/ |
5148 | 4424 | ||
5149 | SCx200 CPU SUPPORT | 4425 | SCx200 CPU SUPPORT |
5150 | P: Jim Cromie | 4426 | M: Jim Cromie <jim.cromie@gmail.com> |
5151 | M: jim.cromie@gmail.com | ||
5152 | S: Odd Fixes | 4427 | S: Odd Fixes |
5153 | F: Documentation/i2c/busses/scx200_acb | 4428 | F: Documentation/i2c/busses/scx200_acb |
5154 | F: arch/x86/kernel/scx200_32.c | 4429 | F: arch/x86/kernel/scx200_32.c |
@@ -5158,49 +4433,42 @@ F: drivers/mtd/maps/scx200_docflash.c | |||
5158 | F: include/linux/scx200.h | 4433 | F: include/linux/scx200.h |
5159 | 4434 | ||
5160 | SCx200 GPIO DRIVER | 4435 | SCx200 GPIO DRIVER |
5161 | P: Jim Cromie | 4436 | M: Jim Cromie <jim.cromie@gmail.com> |
5162 | M: jim.cromie@gmail.com | ||
5163 | S: Maintained | 4437 | S: Maintained |
5164 | F: drivers/char/scx200_gpio.c | 4438 | F: drivers/char/scx200_gpio.c |
5165 | F: include/linux/scx200_gpio.h | 4439 | F: include/linux/scx200_gpio.h |
5166 | 4440 | ||
5167 | SCx200 HRT CLOCKSOURCE DRIVER | 4441 | SCx200 HRT CLOCKSOURCE DRIVER |
5168 | P: Jim Cromie | 4442 | M: Jim Cromie <jim.cromie@gmail.com> |
5169 | M: jim.cromie@gmail.com | ||
5170 | S: Maintained | 4443 | S: Maintained |
5171 | F: drivers/clocksource/scx200_hrt.c | 4444 | F: drivers/clocksource/scx200_hrt.c |
5172 | 4445 | ||
5173 | SDRICOH_CS MMC/SD HOST CONTROLLER INTERFACE DRIVER | 4446 | SDRICOH_CS MMC/SD HOST CONTROLLER INTERFACE DRIVER |
5174 | P: Sascha Sommer | 4447 | M: Sascha Sommer <saschasommer@freenet.de> |
5175 | M: saschasommer@freenet.de | ||
5176 | L: sdricohcs-devel@lists.sourceforge.net (subscribers-only) | 4448 | L: sdricohcs-devel@lists.sourceforge.net (subscribers-only) |
5177 | S: Maintained | 4449 | S: Maintained |
5178 | F: drivers/mmc/host/sdricoh_cs.c | 4450 | F: drivers/mmc/host/sdricoh_cs.c |
5179 | 4451 | ||
5180 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER | 4452 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER |
5181 | P: Pierre Ossman | 4453 | M: Pierre Ossman <pierre@ossman.eu> |
5182 | M: pierre@ossman.eu | ||
5183 | L: sdhci-devel@lists.ossman.eu | 4454 | L: sdhci-devel@lists.ossman.eu |
5184 | S: Maintained | 4455 | S: Maintained |
5185 | 4456 | ||
5186 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) | 4457 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) |
5187 | P: Anton Vorontsov | 4458 | M: Anton Vorontsov <avorontsov@ru.mvista.com> |
5188 | M: avorontsov@ru.mvista.com | ||
5189 | L: linuxppc-dev@ozlabs.org | 4459 | L: linuxppc-dev@ozlabs.org |
5190 | L: sdhci-devel@lists.ossman.eu | 4460 | L: sdhci-devel@lists.ossman.eu |
5191 | S: Maintained | 4461 | S: Maintained |
5192 | F: drivers/mmc/host/sdhci.* | 4462 | F: drivers/mmc/host/sdhci.* |
5193 | 4463 | ||
5194 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER | 4464 | SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER |
5195 | P: Ben Dooks | 4465 | M: Ben Dooks <ben-linux@fluff.org> |
5196 | M: ben-linux@fluff.org | ||
5197 | L: sdhci-devel@lists.ossman.eu | 4466 | L: sdhci-devel@lists.ossman.eu |
5198 | S: Maintained | 4467 | S: Maintained |
5199 | F: drivers/mmc/host/sdhci-s3c.c | 4468 | F: drivers/mmc/host/sdhci-s3c.c |
5200 | 4469 | ||
5201 | SECURITY SUBSYSTEM | 4470 | SECURITY SUBSYSTEM |
5202 | P: James Morris | 4471 | M: James Morris <jmorris@namei.org> |
5203 | M: jmorris@namei.org | ||
5204 | L: linux-security-module@vger.kernel.org (suggested Cc:) | 4472 | L: linux-security-module@vger.kernel.org (suggested Cc:) |
5205 | T: git git://www.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git | 4473 | T: git git://www.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git |
5206 | W: http://security.wiki.kernel.org/ | 4474 | W: http://security.wiki.kernel.org/ |
@@ -5208,17 +4476,13 @@ S: Supported | |||
5208 | F: security/ | 4476 | F: security/ |
5209 | 4477 | ||
5210 | SECURITY CONTACT | 4478 | SECURITY CONTACT |
5211 | P: Security Officers | 4479 | M: Security Officers <security@kernel.org> |
5212 | M: security@kernel.org | ||
5213 | S: Supported | 4480 | S: Supported |
5214 | 4481 | ||
5215 | SELINUX SECURITY MODULE | 4482 | SELINUX SECURITY MODULE |
5216 | P: Stephen Smalley | 4483 | M: Stephen Smalley <sds@tycho.nsa.gov> |
5217 | M: sds@tycho.nsa.gov | 4484 | M: James Morris <jmorris@namei.org> |
5218 | P: James Morris | 4485 | M: Eric Paris <eparis@parisplace.org> |
5219 | M: jmorris@namei.org | ||
5220 | P: Eric Paris | ||
5221 | M: eparis@parisplace.org | ||
5222 | L: selinux@tycho.nsa.gov (subscribers-only, general discussion) | 4486 | L: selinux@tycho.nsa.gov (subscribers-only, general discussion) |
5223 | W: http://selinuxproject.org | 4487 | W: http://selinuxproject.org |
5224 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git | 4488 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git |
@@ -5227,15 +4491,13 @@ F: include/linux/selinux* | |||
5227 | F: security/selinux/ | 4491 | F: security/selinux/ |
5228 | 4492 | ||
5229 | SENSABLE PHANTOM | 4493 | SENSABLE PHANTOM |
5230 | P: Jiri Slaby | 4494 | M: Jiri Slaby <jirislaby@gmail.com> |
5231 | M: jirislaby@gmail.com | ||
5232 | S: Maintained | 4495 | S: Maintained |
5233 | F: drivers/misc/phantom.c | 4496 | F: drivers/misc/phantom.c |
5234 | F: include/linux/phantom.h | 4497 | F: include/linux/phantom.h |
5235 | 4498 | ||
5236 | SERIAL ATA (SATA) SUBSYSTEM | 4499 | SERIAL ATA (SATA) SUBSYSTEM |
5237 | P: Jeff Garzik | 4500 | M: Jeff Garzik <jgarzik@pobox.com> |
5238 | M: jgarzik@pobox.com | ||
5239 | L: linux-ide@vger.kernel.org | 4501 | L: linux-ide@vger.kernel.org |
5240 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev.git | 4502 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev.git |
5241 | S: Supported | 4503 | S: Supported |
@@ -5244,10 +4506,8 @@ F: include/linux/ata.h | |||
5244 | F: include/linux/libata.h | 4506 | F: include/linux/libata.h |
5245 | 4507 | ||
5246 | SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER | 4508 | SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER |
5247 | P: Sathya Perla | 4509 | M: Sathya Perla <sathyap@serverengines.com> |
5248 | M: sathyap@serverengines.com | 4510 | M: Subbu Seetharaman <subbus@serverengines.com> |
5249 | P: Subbu Seetharaman | ||
5250 | M: subbus@serverengines.com | ||
5251 | L: netdev@vger.kernel.org | 4511 | L: netdev@vger.kernel.org |
5252 | W: http://www.serverengines.com | 4512 | W: http://www.serverengines.com |
5253 | S: Supported | 4513 | S: Supported |
@@ -5256,20 +4516,17 @@ F: drivers/net/benet/ | |||
5256 | SFC NETWORK DRIVER | 4516 | SFC NETWORK DRIVER |
5257 | P: Steve Hodgson | 4517 | P: Steve Hodgson |
5258 | P: Ben Hutchings | 4518 | P: Ben Hutchings |
5259 | P: Robert Stonehouse | 4519 | M: Robert Stonehouse <linux-net-drivers@solarflare.com> |
5260 | M: linux-net-drivers@solarflare.com | ||
5261 | S: Supported | 4520 | S: Supported |
5262 | F: drivers/net/sfc/ | 4521 | F: drivers/net/sfc/ |
5263 | 4522 | ||
5264 | SGI GRU DRIVER | 4523 | SGI GRU DRIVER |
5265 | P: Jack Steiner | 4524 | M: Jack Steiner <steiner@sgi.com> |
5266 | M: steiner@sgi.com | ||
5267 | S: Maintained | 4525 | S: Maintained |
5268 | F: drivers/misc/sgi-gru/ | 4526 | F: drivers/misc/sgi-gru/ |
5269 | 4527 | ||
5270 | SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER | 4528 | SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER |
5271 | P: Pat Gefre | 4529 | M: Pat Gefre <pfg@sgi.com> |
5272 | M: pfg@sgi.com | ||
5273 | L: linux-ia64@vger.kernel.org | 4530 | L: linux-ia64@vger.kernel.org |
5274 | S: Supported | 4531 | S: Supported |
5275 | F: Documentation/ia64/serial.txt | 4532 | F: Documentation/ia64/serial.txt |
@@ -5277,22 +4534,19 @@ F: drivers/serial/ioc?_serial.c | |||
5277 | F: include/linux/ioc?.h | 4534 | F: include/linux/ioc?.h |
5278 | 4535 | ||
5279 | SGI VISUAL WORKSTATION 320 AND 540 | 4536 | SGI VISUAL WORKSTATION 320 AND 540 |
5280 | P: Andrey Panin | 4537 | M: Andrey Panin <pazke@donpac.ru> |
5281 | M: pazke@donpac.ru | ||
5282 | L: linux-visws-devel@lists.sf.net | 4538 | L: linux-visws-devel@lists.sf.net |
5283 | W: http://linux-visws.sf.net | 4539 | W: http://linux-visws.sf.net |
5284 | S: Maintained for 2.6. | 4540 | S: Maintained for 2.6. |
5285 | F: Documentation/sgi-visws.txt | 4541 | F: Documentation/sgi-visws.txt |
5286 | 4542 | ||
5287 | SGI XP/XPC/XPNET DRIVER | 4543 | SGI XP/XPC/XPNET DRIVER |
5288 | P: Robin Holt | 4544 | M: Robin Holt <holt@sgi.com> |
5289 | M: holt@sgi.com | ||
5290 | S: Maintained | 4545 | S: Maintained |
5291 | F: drivers/misc/sgi-xp/ | 4546 | F: drivers/misc/sgi-xp/ |
5292 | 4547 | ||
5293 | SHARP LH SUPPORT (LH7952X & LH7A40X) | 4548 | SHARP LH SUPPORT (LH7952X & LH7A40X) |
5294 | P: Marc Singer | 4549 | M: Marc Singer <elf@buici.com> |
5295 | M: elf@buici.com | ||
5296 | W: http://projects.buici.com/arm | 4550 | W: http://projects.buici.com/arm |
5297 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) | 4551 | L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) |
5298 | S: Maintained | 4552 | S: Maintained |
@@ -5303,23 +4557,20 @@ F: drivers/usb/gadget/lh7a40* | |||
5303 | F: drivers/usb/host/ohci-lh7a40* | 4557 | F: drivers/usb/host/ohci-lh7a40* |
5304 | 4558 | ||
5305 | SHPC HOTPLUG DRIVER | 4559 | SHPC HOTPLUG DRIVER |
5306 | P: Kristen Carlson Accardi | 4560 | M: Kristen Carlson Accardi <kristen.c.accardi@intel.com> |
5307 | M: kristen.c.accardi@intel.com | ||
5308 | L: linux-pci@vger.kernel.org | 4561 | L: linux-pci@vger.kernel.org |
5309 | S: Supported | 4562 | S: Supported |
5310 | F: drivers/pci/hotplug/shpchp* | 4563 | F: drivers/pci/hotplug/shpchp* |
5311 | 4564 | ||
5312 | SIMTEC EB110ATX (Chalice CATS) | 4565 | SIMTEC EB110ATX (Chalice CATS) |
5313 | P: Ben Dooks | 4566 | P: Ben Dooks |
5314 | P: Vincent Sanders | 4567 | M: Vincent Sanders <support@simtec.co.uk> |
5315 | M: support@simtec.co.uk | ||
5316 | W: http://www.simtec.co.uk/products/EB110ATX/ | 4568 | W: http://www.simtec.co.uk/products/EB110ATX/ |
5317 | S: Supported | 4569 | S: Supported |
5318 | 4570 | ||
5319 | SIMTEC EB2410ITX (BAST) | 4571 | SIMTEC EB2410ITX (BAST) |
5320 | P: Ben Dooks | 4572 | P: Ben Dooks |
5321 | P: Vincent Sanders | 4573 | M: Vincent Sanders <support@simtec.co.uk> |
5322 | M: support@simtec.co.uk | ||
5323 | W: http://www.simtec.co.uk/products/EB2410ITX/ | 4574 | W: http://www.simtec.co.uk/products/EB2410ITX/ |
5324 | S: Supported | 4575 | S: Supported |
5325 | F: arch/arm/mach-s3c2410/ | 4576 | F: arch/arm/mach-s3c2410/ |
@@ -5327,31 +4578,27 @@ F: drivers/*/*s3c2410* | |||
5327 | F: drivers/*/*/*s3c2410* | 4578 | F: drivers/*/*/*s3c2410* |
5328 | 4579 | ||
5329 | SIS 190 ETHERNET DRIVER | 4580 | SIS 190 ETHERNET DRIVER |
5330 | P: Francois Romieu | 4581 | M: Francois Romieu <romieu@fr.zoreil.com> |
5331 | M: romieu@fr.zoreil.com | ||
5332 | L: netdev@vger.kernel.org | 4582 | L: netdev@vger.kernel.org |
5333 | S: Maintained | 4583 | S: Maintained |
5334 | F: drivers/net/sis190.c | 4584 | F: drivers/net/sis190.c |
5335 | 4585 | ||
5336 | SIS 900/7016 FAST ETHERNET DRIVER | 4586 | SIS 900/7016 FAST ETHERNET DRIVER |
5337 | P: Daniele Venzano | 4587 | M: Daniele Venzano <venza@brownhat.org> |
5338 | M: venza@brownhat.org | ||
5339 | W: http://www.brownhat.org/sis900.html | 4588 | W: http://www.brownhat.org/sis900.html |
5340 | L: netdev@vger.kernel.org | 4589 | L: netdev@vger.kernel.org |
5341 | S: Maintained | 4590 | S: Maintained |
5342 | F: drivers/net/sis900.* | 4591 | F: drivers/net/sis900.* |
5343 | 4592 | ||
5344 | SIS 96X I2C/SMBUS DRIVER | 4593 | SIS 96X I2C/SMBUS DRIVER |
5345 | P: Mark M. Hoffman | 4594 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
5346 | M: mhoffman@lightlink.com | ||
5347 | L: linux-i2c@vger.kernel.org | 4595 | L: linux-i2c@vger.kernel.org |
5348 | S: Maintained | 4596 | S: Maintained |
5349 | F: Documentation/i2c/busses/i2c-sis96x | 4597 | F: Documentation/i2c/busses/i2c-sis96x |
5350 | F: drivers/i2c/busses/i2c-sis96x.c | 4598 | F: drivers/i2c/busses/i2c-sis96x.c |
5351 | 4599 | ||
5352 | SIS FRAMEBUFFER DRIVER | 4600 | SIS FRAMEBUFFER DRIVER |
5353 | P: Thomas Winischhofer | 4601 | M: Thomas Winischhofer <thomas@winischhofer.net> |
5354 | M: thomas@winischhofer.net | ||
5355 | W: http://www.winischhofer.net/linuxsisvga.shtml | 4602 | W: http://www.winischhofer.net/linuxsisvga.shtml |
5356 | S: Maintained | 4603 | S: Maintained |
5357 | F: Documentation/fb/sisfb.txt | 4604 | F: Documentation/fb/sisfb.txt |
@@ -5359,70 +4606,59 @@ F: drivers/video/sis/ | |||
5359 | F: include/video/sisfb.h | 4606 | F: include/video/sisfb.h |
5360 | 4607 | ||
5361 | SIS USB2VGA DRIVER | 4608 | SIS USB2VGA DRIVER |
5362 | P: Thomas Winischhofer | 4609 | M: Thomas Winischhofer <thomas@winischhofer.net> |
5363 | M: thomas@winischhofer.net | ||
5364 | W: http://www.winischhofer.at/linuxsisusbvga.shtml | 4610 | W: http://www.winischhofer.at/linuxsisusbvga.shtml |
5365 | S: Maintained | 4611 | S: Maintained |
5366 | F: drivers/usb/misc/sisusbvga/ | 4612 | F: drivers/usb/misc/sisusbvga/ |
5367 | 4613 | ||
5368 | SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS | 4614 | SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS |
5369 | P: Stephen Hemminger | 4615 | M: Stephen Hemminger <shemminger@linux-foundation.org> |
5370 | M: shemminger@linux-foundation.org | ||
5371 | L: netdev@vger.kernel.org | 4616 | L: netdev@vger.kernel.org |
5372 | S: Maintained | 4617 | S: Maintained |
5373 | F: drivers/net/skge.* | 4618 | F: drivers/net/skge.* |
5374 | F: drivers/net/sky2.* | 4619 | F: drivers/net/sky2.* |
5375 | 4620 | ||
5376 | SLAB ALLOCATOR | 4621 | SLAB ALLOCATOR |
5377 | P: Christoph Lameter | 4622 | M: Christoph Lameter <cl@linux-foundation.org> |
5378 | M: cl@linux-foundation.org | 4623 | M: Pekka Enberg <penberg@cs.helsinki.fi> |
5379 | P: Pekka Enberg | 4624 | M: Matt Mackall <mpm@selenic.com> |
5380 | M: penberg@cs.helsinki.fi | ||
5381 | P: Matt Mackall | ||
5382 | M: mpm@selenic.com | ||
5383 | L: linux-mm@kvack.org | 4625 | L: linux-mm@kvack.org |
5384 | S: Maintained | 4626 | S: Maintained |
5385 | F: include/linux/sl?b*.h | 4627 | F: include/linux/sl?b*.h |
5386 | F: mm/sl?b.c | 4628 | F: mm/sl?b.c |
5387 | 4629 | ||
5388 | SMC91x ETHERNET DRIVER | 4630 | SMC91x ETHERNET DRIVER |
5389 | P: Nicolas Pitre | 4631 | M: Nicolas Pitre <nico@cam.org> |
5390 | M: nico@cam.org | ||
5391 | S: Maintained | 4632 | S: Maintained |
5392 | F: drivers/net/smc91x.* | 4633 | F: drivers/net/smc91x.* |
5393 | 4634 | ||
5394 | SMSC47B397 HARDWARE MONITOR DRIVER | 4635 | SMSC47B397 HARDWARE MONITOR DRIVER |
5395 | P: Mark M. Hoffman | 4636 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> |
5396 | M: mhoffman@lightlink.com | ||
5397 | L: lm-sensors@lm-sensors.org | 4637 | L: lm-sensors@lm-sensors.org |
5398 | S: Maintained | 4638 | S: Maintained |
5399 | F: Documentation/hwmon/smsc47b397 | 4639 | F: Documentation/hwmon/smsc47b397 |
5400 | F: drivers/hwmon/smsc47b397.c | 4640 | F: drivers/hwmon/smsc47b397.c |
5401 | 4641 | ||
5402 | SMSC911x ETHERNET DRIVER | 4642 | SMSC911x ETHERNET DRIVER |
5403 | P: Steve Glendinning | 4643 | M: Steve Glendinning <steve.glendinning@smsc.com> |
5404 | M: steve.glendinning@smsc.com | ||
5405 | L: netdev@vger.kernel.org | 4644 | L: netdev@vger.kernel.org |
5406 | S: Supported | 4645 | S: Supported |
5407 | F: include/linux/smsc911x.h | 4646 | F: include/linux/smsc911x.h |
5408 | F: drivers/net/smsc911x.* | 4647 | F: drivers/net/smsc911x.* |
5409 | 4648 | ||
5410 | SMSC9420 PCI ETHERNET DRIVER | 4649 | SMSC9420 PCI ETHERNET DRIVER |
5411 | P: Steve Glendinning | 4650 | M: Steve Glendinning <steve.glendinning@smsc.com> |
5412 | M: steve.glendinning@smsc.com | ||
5413 | L: netdev@vger.kernel.org | 4651 | L: netdev@vger.kernel.org |
5414 | S: Supported | 4652 | S: Supported |
5415 | F: drivers/net/smsc9420.* | 4653 | F: drivers/net/smsc9420.* |
5416 | 4654 | ||
5417 | SMX UIO Interface | 4655 | SMX UIO Interface |
5418 | P: Ben Nizette | 4656 | M: Ben Nizette <bn@niasdigital.com> |
5419 | M: bn@niasdigital.com | ||
5420 | S: Maintained | 4657 | S: Maintained |
5421 | F: drivers/uio/uio_smx.c | 4658 | F: drivers/uio/uio_smx.c |
5422 | 4659 | ||
5423 | SN-IA64 (Itanium) SUB-PLATFORM | 4660 | SN-IA64 (Itanium) SUB-PLATFORM |
5424 | P: Jes Sorensen | 4661 | M: Jes Sorensen <jes@sgi.com> |
5425 | M: jes@sgi.com | ||
5426 | L: linux-altix@sgi.com | 4662 | L: linux-altix@sgi.com |
5427 | L: linux-ia64@vger.kernel.org | 4663 | L: linux-ia64@vger.kernel.org |
5428 | W: http://www.sgi.com/altix | 4664 | W: http://www.sgi.com/altix |
@@ -5430,8 +4666,7 @@ S: Maintained | |||
5430 | F: arch/ia64/sn/ | 4666 | F: arch/ia64/sn/ |
5431 | 4667 | ||
5432 | SOC-CAMERA V4L2 SUBSYSTEM | 4668 | SOC-CAMERA V4L2 SUBSYSTEM |
5433 | P: Guennadi Liakhovetski | 4669 | M: Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
5434 | M: g.liakhovetski@gmx.de | ||
5435 | L: linux-media@vger.kernel.org | 4670 | L: linux-media@vger.kernel.org |
5436 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 4671 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
5437 | S: Maintained | 4672 | S: Maintained |
@@ -5439,37 +4674,32 @@ F: include/media/v4l2* | |||
5439 | F: drivers/media/video/v4l2* | 4674 | F: drivers/media/video/v4l2* |
5440 | 4675 | ||
5441 | SOEKRIS NET48XX LED SUPPORT | 4676 | SOEKRIS NET48XX LED SUPPORT |
5442 | P: Chris Boot | 4677 | M: Chris Boot <bootc@bootc.net> |
5443 | M: bootc@bootc.net | ||
5444 | S: Maintained | 4678 | S: Maintained |
5445 | F: drivers/leds/leds-net48xx.c | 4679 | F: drivers/leds/leds-net48xx.c |
5446 | 4680 | ||
5447 | SOFTWARE RAID (Multiple Disks) SUPPORT | 4681 | SOFTWARE RAID (Multiple Disks) SUPPORT |
5448 | P: Neil Brown | 4682 | M: Neil Brown <neilb@suse.de> |
5449 | M: neilb@suse.de | ||
5450 | L: linux-raid@vger.kernel.org | 4683 | L: linux-raid@vger.kernel.org |
5451 | S: Supported | 4684 | S: Supported |
5452 | F: drivers/md/ | 4685 | F: drivers/md/ |
5453 | F: include/linux/raid/ | 4686 | F: include/linux/raid/ |
5454 | 4687 | ||
5455 | SONIC NETWORK DRIVER | 4688 | SONIC NETWORK DRIVER |
5456 | P: Thomas Bogendoerfer | 4689 | M: Thomas Bogendoerfer <tsbogend@alpha.franken.de> |
5457 | M: tsbogend@alpha.franken.de | ||
5458 | L: netdev@vger.kernel.org | 4690 | L: netdev@vger.kernel.org |
5459 | S: Maintained | 4691 | S: Maintained |
5460 | F: drivers/net/sonic.* | 4692 | F: drivers/net/sonic.* |
5461 | 4693 | ||
5462 | SONICS SILICON BACKPLANE DRIVER (SSB) | 4694 | SONICS SILICON BACKPLANE DRIVER (SSB) |
5463 | P: Michael Buesch | 4695 | M: Michael Buesch <mb@bu3sch.de> |
5464 | M: mb@bu3sch.de | ||
5465 | L: netdev@vger.kernel.org | 4696 | L: netdev@vger.kernel.org |
5466 | S: Maintained | 4697 | S: Maintained |
5467 | F: drivers/ssb/ | 4698 | F: drivers/ssb/ |
5468 | F: include/linux/ssb/ | 4699 | F: include/linux/ssb/ |
5469 | 4700 | ||
5470 | SONY VAIO CONTROL DEVICE DRIVER | 4701 | SONY VAIO CONTROL DEVICE DRIVER |
5471 | P: Mattia Dongili | 4702 | M: Mattia Dongili <malattia@linux.it> |
5472 | M: malattia@linux.it | ||
5473 | L: linux-acpi@vger.kernel.org | 4703 | L: linux-acpi@vger.kernel.org |
5474 | W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers | 4704 | W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers |
5475 | S: Maintained | 4705 | S: Maintained |
@@ -5479,17 +4709,14 @@ F: drivers/platform/x86/sony-laptop.c | |||
5479 | F: include/linux/sony-laptop.h | 4709 | F: include/linux/sony-laptop.h |
5480 | 4710 | ||
5481 | SONY MEMORYSTICK CARD SUPPORT | 4711 | SONY MEMORYSTICK CARD SUPPORT |
5482 | P: Alex Dubov | 4712 | M: Alex Dubov <oakad@yahoo.com> |
5483 | M: oakad@yahoo.com | ||
5484 | W: http://tifmxx.berlios.de/ | 4713 | W: http://tifmxx.berlios.de/ |
5485 | S: Maintained | 4714 | S: Maintained |
5486 | F: drivers/memstick/host/tifm_ms.c | 4715 | F: drivers/memstick/host/tifm_ms.c |
5487 | 4716 | ||
5488 | SOUND | 4717 | SOUND |
5489 | P: Jaroslav Kysela | 4718 | M: Jaroslav Kysela <perex@perex.cz> |
5490 | M: perex@perex.cz | 4719 | M: Takashi Iwai <tiwai@suse.de> |
5491 | P: Takashi Iwai | ||
5492 | M: tiwai@suse.de | ||
5493 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 4720 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
5494 | W: http://www.alsa-project.org/ | 4721 | W: http://www.alsa-project.org/ |
5495 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git | 4722 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git |
@@ -5500,10 +4727,8 @@ F: include/sound/ | |||
5500 | F: sound/ | 4727 | F: sound/ |
5501 | 4728 | ||
5502 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) | 4729 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) |
5503 | P: Liam Girdwood | 4730 | M: Liam Girdwood <lrg@slimlogic.co.uk> |
5504 | M: lrg@slimlogic.co.uk | 4731 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> |
5505 | P: Mark Brown | ||
5506 | M: broonie@opensource.wolfsonmicro.com | ||
5507 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git | 4732 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git |
5508 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 4733 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
5509 | W: http://alsa-project.org/main/index.php/ASoC | 4734 | W: http://alsa-project.org/main/index.php/ASoC |
@@ -5512,8 +4737,7 @@ F: sound/soc/ | |||
5512 | F: include/sound/soc* | 4737 | F: include/sound/soc* |
5513 | 4738 | ||
5514 | SPARC + UltraSPARC (sparc/sparc64) | 4739 | SPARC + UltraSPARC (sparc/sparc64) |
5515 | P: David S. Miller | 4740 | M: "David S. Miller" <davem@davemloft.net> |
5516 | M: davem@davemloft.net | ||
5517 | L: sparclinux@vger.kernel.org | 4741 | L: sparclinux@vger.kernel.org |
5518 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git | 4742 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git |
5519 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git | 4743 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git |
@@ -5521,15 +4745,13 @@ S: Maintained | |||
5521 | F: arch/sparc/ | 4745 | F: arch/sparc/ |
5522 | 4746 | ||
5523 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER | 4747 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER |
5524 | P: Roger Wolff | 4748 | M: Roger Wolff <R.E.Wolff@BitWizard.nl> |
5525 | M: R.E.Wolff@BitWizard.nl | ||
5526 | S: Supported | 4749 | S: Supported |
5527 | F: Documentation/serial/specialix.txt | 4750 | F: Documentation/serial/specialix.txt |
5528 | F: drivers/char/specialix* | 4751 | F: drivers/char/specialix* |
5529 | 4752 | ||
5530 | SPI SUBSYSTEM | 4753 | SPI SUBSYSTEM |
5531 | P: David Brownell | 4754 | M: David Brownell <dbrownell@users.sourceforge.net> |
5532 | M: dbrownell@users.sourceforge.net | ||
5533 | L: spi-devel-general@lists.sourceforge.net | 4755 | L: spi-devel-general@lists.sourceforge.net |
5534 | S: Maintained | 4756 | S: Maintained |
5535 | F: Documentation/spi/ | 4757 | F: Documentation/spi/ |
@@ -5537,18 +4759,15 @@ F: drivers/spi/ | |||
5537 | F: include/linux/spi/ | 4759 | F: include/linux/spi/ |
5538 | 4760 | ||
5539 | SPIDERNET NETWORK DRIVER for CELL | 4761 | SPIDERNET NETWORK DRIVER for CELL |
5540 | P: Ishizaki Kou | 4762 | M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp> |
5541 | M: kou.ishizaki@toshiba.co.jp | 4763 | M: Jens Osterkamp <jens@de.ibm.com> |
5542 | P: Jens Osterkamp | ||
5543 | M: jens@de.ibm.com | ||
5544 | L: netdev@vger.kernel.org | 4764 | L: netdev@vger.kernel.org |
5545 | S: Supported | 4765 | S: Supported |
5546 | F: Documentation/networking/spider_net.txt | 4766 | F: Documentation/networking/spider_net.txt |
5547 | F: drivers/net/spider_net* | 4767 | F: drivers/net/spider_net* |
5548 | 4768 | ||
5549 | SPU FILE SYSTEM | 4769 | SPU FILE SYSTEM |
5550 | P: Jeremy Kerr | 4770 | M: Jeremy Kerr <jk@ozlabs.org> |
5551 | M: jk@ozlabs.org | ||
5552 | L: linuxppc-dev@ozlabs.org | 4771 | L: linuxppc-dev@ozlabs.org |
5553 | L: cbe-oss-dev@ozlabs.org | 4772 | L: cbe-oss-dev@ozlabs.org |
5554 | W: http://www.ibm.com/developerworks/power/cell/ | 4773 | W: http://www.ibm.com/developerworks/power/cell/ |
@@ -5557,8 +4776,7 @@ F: Documentation/filesystems/spufs.txt | |||
5557 | F: arch/powerpc/platforms/cell/spufs/ | 4776 | F: arch/powerpc/platforms/cell/spufs/ |
5558 | 4777 | ||
5559 | SQUASHFS FILE SYSTEM | 4778 | SQUASHFS FILE SYSTEM |
5560 | P: Phillip Lougher | 4779 | M: Phillip Lougher <phillip@lougher.demon.co.uk> |
5561 | M: phillip@lougher.demon.co.uk | ||
5562 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) | 4780 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) |
5563 | W: http://squashfs.org.uk | 4781 | W: http://squashfs.org.uk |
5564 | S: Maintained | 4782 | S: Maintained |
@@ -5566,30 +4784,25 @@ F: Documentation/filesystems/squashfs.txt | |||
5566 | F: fs/squashfs/ | 4784 | F: fs/squashfs/ |
5567 | 4785 | ||
5568 | SRM (Alpha) environment access | 4786 | SRM (Alpha) environment access |
5569 | P: Jan-Benedict Glaw | 4787 | M: Jan-Benedict Glaw <jbglaw@lug-owl.de> |
5570 | M: jbglaw@lug-owl.de | ||
5571 | S: Maintained | 4788 | S: Maintained |
5572 | F: arch/alpha/kernel/srm_env.c | 4789 | F: arch/alpha/kernel/srm_env.c |
5573 | 4790 | ||
5574 | STABLE BRANCH | 4791 | STABLE BRANCH |
5575 | P: Greg Kroah-Hartman | 4792 | M: Greg Kroah-Hartman <greg@kroah.com> |
5576 | M: greg@kroah.com | 4793 | M: Chris Wright <chrisw@sous-sol.org> |
5577 | P: Chris Wright | ||
5578 | M: chrisw@sous-sol.org | ||
5579 | L: stable@kernel.org | 4794 | L: stable@kernel.org |
5580 | S: Maintained | 4795 | S: Maintained |
5581 | 4796 | ||
5582 | STAGING SUBSYSTEM | 4797 | STAGING SUBSYSTEM |
5583 | P: Greg Kroah-Hartman | 4798 | M: Greg Kroah-Hartman <gregkh@suse.de> |
5584 | M: gregkh@suse.de | ||
5585 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 4799 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ |
5586 | L: devel@driverdev.osuosl.org | 4800 | L: devel@driverdev.osuosl.org |
5587 | S: Maintained | 4801 | S: Maintained |
5588 | F: drivers/staging/ | 4802 | F: drivers/staging/ |
5589 | 4803 | ||
5590 | STARFIRE/DURALAN NETWORK DRIVER | 4804 | STARFIRE/DURALAN NETWORK DRIVER |
5591 | P: Ion Badulescu | 4805 | M: Ion Badulescu <ionut@badula.org> |
5592 | M: ionut@badula.org | ||
5593 | S: Odd Fixes | 4806 | S: Odd Fixes |
5594 | F: drivers/net/starfire* | 4807 | F: drivers/net/starfire* |
5595 | 4808 | ||
@@ -5599,15 +4812,13 @@ F: drivers/net/wireless/strip.c | |||
5599 | F: include/linux/if_strip.h | 4812 | F: include/linux/if_strip.h |
5600 | 4813 | ||
5601 | STRADIS MPEG-2 DECODER DRIVER | 4814 | STRADIS MPEG-2 DECODER DRIVER |
5602 | P: Nathan Laredo | 4815 | M: Nathan Laredo <laredo@gnu.org> |
5603 | M: laredo@gnu.org | ||
5604 | W: http://www.stradis.com/ | 4816 | W: http://www.stradis.com/ |
5605 | S: Maintained | 4817 | S: Maintained |
5606 | F: drivers/media/video/stradis.c | 4818 | F: drivers/media/video/stradis.c |
5607 | 4819 | ||
5608 | SUN3/3X | 4820 | SUN3/3X |
5609 | P: Sam Creasey | 4821 | M: Sam Creasey <sammy@sammy.net> |
5610 | M: sammy@sammy.net | ||
5611 | W: http://sammy.net/sun3/ | 4822 | W: http://sammy.net/sun3/ |
5612 | S: Maintained | 4823 | S: Maintained |
5613 | F: arch/m68k/kernel/*sun3* | 4824 | F: arch/m68k/kernel/*sun3* |
@@ -5615,8 +4826,7 @@ F: arch/m68k/sun3*/ | |||
5615 | F: arch/m68k/include/asm/sun3* | 4826 | F: arch/m68k/include/asm/sun3* |
5616 | 4827 | ||
5617 | SUPERH | 4828 | SUPERH |
5618 | P: Paul Mundt | 4829 | M: Paul Mundt <lethal@linux-sh.org> |
5619 | M: lethal@linux-sh.org | ||
5620 | L: linux-sh@vger.kernel.org | 4830 | L: linux-sh@vger.kernel.org |
5621 | W: http://www.linux-sh.org | 4831 | W: http://www.linux-sh.org |
5622 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git | 4832 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git |
@@ -5626,12 +4836,9 @@ F: arch/sh/ | |||
5626 | F: drivers/sh/ | 4836 | F: drivers/sh/ |
5627 | 4837 | ||
5628 | SUSPEND TO RAM | 4838 | SUSPEND TO RAM |
5629 | P: Len Brown | 4839 | M: Len Brown <len.brown@intel.com> |
5630 | M: len.brown@intel.com | 4840 | M: Pavel Machek <pavel@ucw.cz> |
5631 | P: Pavel Machek | 4841 | M: "Rafael J. Wysocki" <rjw@sisk.pl> |
5632 | M: pavel@ucw.cz | ||
5633 | P: Rafael J. Wysocki | ||
5634 | M: rjw@sisk.pl | ||
5635 | L: linux-pm@lists.linux-foundation.org | 4842 | L: linux-pm@lists.linux-foundation.org |
5636 | S: Supported | 4843 | S: Supported |
5637 | F: Documentation/power/ | 4844 | F: Documentation/power/ |
@@ -5643,32 +4850,28 @@ F: include/linux/freezer.h | |||
5643 | F: include/linux/pm.h | 4850 | F: include/linux/pm.h |
5644 | 4851 | ||
5645 | SVGA HANDLING | 4852 | SVGA HANDLING |
5646 | P: Martin Mares | 4853 | M: Martin Mares <mj@ucw.cz> |
5647 | M: mj@ucw.cz | ||
5648 | L: linux-video@atrey.karlin.mff.cuni.cz | 4854 | L: linux-video@atrey.karlin.mff.cuni.cz |
5649 | S: Maintained | 4855 | S: Maintained |
5650 | F: Documentation/svga.txt | 4856 | F: Documentation/svga.txt |
5651 | F: arch/x86/boot/video* | 4857 | F: arch/x86/boot/video* |
5652 | 4858 | ||
5653 | SYSV FILESYSTEM | 4859 | SYSV FILESYSTEM |
5654 | P: Christoph Hellwig | 4860 | M: Christoph Hellwig <hch@infradead.org> |
5655 | M: hch@infradead.org | ||
5656 | S: Maintained | 4861 | S: Maintained |
5657 | F: Documentation/filesystems/sysv-fs.txt | 4862 | F: Documentation/filesystems/sysv-fs.txt |
5658 | F: fs/sysv/ | 4863 | F: fs/sysv/ |
5659 | F: include/linux/sysv_fs.h | 4864 | F: include/linux/sysv_fs.h |
5660 | 4865 | ||
5661 | TASKSTATS STATISTICS INTERFACE | 4866 | TASKSTATS STATISTICS INTERFACE |
5662 | P: Balbir Singh | 4867 | M: Balbir Singh <balbir@linux.vnet.ibm.com> |
5663 | M: balbir@linux.vnet.ibm.com | ||
5664 | S: Maintained | 4868 | S: Maintained |
5665 | F: Documentation/accounting/taskstats* | 4869 | F: Documentation/accounting/taskstats* |
5666 | F: include/linux/taskstats* | 4870 | F: include/linux/taskstats* |
5667 | F: kernel/taskstats.c | 4871 | F: kernel/taskstats.c |
5668 | 4872 | ||
5669 | TC CLASSIFIER | 4873 | TC CLASSIFIER |
5670 | P: Jamal Hadi Salim | 4874 | M: Jamal Hadi Salim <hadi@cyberus.ca> |
5671 | M: hadi@cyberus.ca | ||
5672 | L: netdev@vger.kernel.org | 4875 | L: netdev@vger.kernel.org |
5673 | S: Maintained | 4876 | S: Maintained |
5674 | F: include/linux/pkt_cls.h | 4877 | F: include/linux/pkt_cls.h |
@@ -5676,38 +4879,31 @@ F: include/net/pkt_cls.h | |||
5676 | F: net/sched/ | 4879 | F: net/sched/ |
5677 | 4880 | ||
5678 | TCP LOW PRIORITY MODULE | 4881 | TCP LOW PRIORITY MODULE |
5679 | P: Wong Hoi Sing, Edison | 4882 | M: "Wong Hoi Sing, Edison" <hswong3i@gmail.com> |
5680 | M: hswong3i@gmail.com | 4883 | M: "Hung Hing Lun, Mike" <hlhung3i@gmail.com> |
5681 | P: Hung Hing Lun, Mike | ||
5682 | M: hlhung3i@gmail.com | ||
5683 | W: http://tcp-lp-mod.sourceforge.net/ | 4884 | W: http://tcp-lp-mod.sourceforge.net/ |
5684 | S: Maintained | 4885 | S: Maintained |
5685 | F: net/ipv4/tcp_lp.c | 4886 | F: net/ipv4/tcp_lp.c |
5686 | 4887 | ||
5687 | TEHUTI ETHERNET DRIVER | 4888 | TEHUTI ETHERNET DRIVER |
5688 | P: Alexander Indenbaum | 4889 | M: Alexander Indenbaum <baum@tehutinetworks.net> |
5689 | M: baum@tehutinetworks.net | 4890 | M: Andy Gospodarek <andy@greyhouse.net> |
5690 | P: Andy Gospodarek | ||
5691 | M: andy@greyhouse.net | ||
5692 | L: netdev@vger.kernel.org | 4891 | L: netdev@vger.kernel.org |
5693 | S: Supported | 4892 | S: Supported |
5694 | F: drivers/net/tehuti* | 4893 | F: drivers/net/tehuti* |
5695 | 4894 | ||
5696 | Telecom Clock Driver for MCPL0010 | 4895 | Telecom Clock Driver for MCPL0010 |
5697 | P: Mark Gross | 4896 | M: Mark Gross <mark.gross@intel.com> |
5698 | M: mark.gross@intel.com | ||
5699 | S: Supported | 4897 | S: Supported |
5700 | F: drivers/char/tlclk.c | 4898 | F: drivers/char/tlclk.c |
5701 | 4899 | ||
5702 | TENSILICA XTENSA PORT (xtensa) | 4900 | TENSILICA XTENSA PORT (xtensa) |
5703 | P: Chris Zankel | 4901 | M: Chris Zankel <chris@zankel.net> |
5704 | M: chris@zankel.net | ||
5705 | S: Maintained | 4902 | S: Maintained |
5706 | F: arch/xtensa/ | 4903 | F: arch/xtensa/ |
5707 | 4904 | ||
5708 | THINKPAD ACPI EXTRAS DRIVER | 4905 | THINKPAD ACPI EXTRAS DRIVER |
5709 | P: Henrique de Moraes Holschuh | 4906 | M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br> |
5710 | M: ibm-acpi@hmh.eng.br | ||
5711 | L: ibm-acpi-devel@lists.sourceforge.net | 4907 | L: ibm-acpi-devel@lists.sourceforge.net |
5712 | W: http://ibm-acpi.sourceforge.net | 4908 | W: http://ibm-acpi.sourceforge.net |
5713 | W: http://thinkwiki.org/wiki/Ibm-acpi | 4909 | W: http://thinkwiki.org/wiki/Ibm-acpi |
@@ -5716,27 +4912,22 @@ S: Maintained | |||
5716 | F: drivers/platform/x86/thinkpad_acpi.c | 4912 | F: drivers/platform/x86/thinkpad_acpi.c |
5717 | 4913 | ||
5718 | TI FLASH MEDIA INTERFACE DRIVER | 4914 | TI FLASH MEDIA INTERFACE DRIVER |
5719 | P: Alex Dubov | 4915 | M: Alex Dubov <oakad@yahoo.com> |
5720 | M: oakad@yahoo.com | ||
5721 | S: Maintained | 4916 | S: Maintained |
5722 | F: drivers/misc/tifm* | 4917 | F: drivers/misc/tifm* |
5723 | F: drivers/mmc/host/tifm_sd.c | 4918 | F: drivers/mmc/host/tifm_sd.c |
5724 | F: include/linux/tifm.h | 4919 | F: include/linux/tifm.h |
5725 | 4920 | ||
5726 | TI TWL4030 SERIES SOC CODEC DRIVER | 4921 | TI TWL4030 SERIES SOC CODEC DRIVER |
5727 | P: Peter Ujfalusi | 4922 | M: Peter Ujfalusi <peter.ujfalusi@nokia.com> |
5728 | M: peter.ujfalusi@nokia.com | ||
5729 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 4923 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
5730 | S: Maintained | 4924 | S: Maintained |
5731 | F: sound/soc/codecs/twl4030* | 4925 | F: sound/soc/codecs/twl4030* |
5732 | 4926 | ||
5733 | TIPC NETWORK LAYER | 4927 | TIPC NETWORK LAYER |
5734 | P: Per Liden | 4928 | M: Per Liden <per.liden@ericsson.com> |
5735 | M: per.liden@ericsson.com | 4929 | M: Jon Maloy <jon.maloy@ericsson.com> |
5736 | P: Jon Maloy | 4930 | M: Allan Stephens <allan.stephens@windriver.com> |
5737 | M: jon.maloy@ericsson.com | ||
5738 | P: Allan Stephens | ||
5739 | M: allan.stephens@windriver.com | ||
5740 | L: tipc-discussion@lists.sourceforge.net | 4931 | L: tipc-discussion@lists.sourceforge.net |
5741 | W: http://tipc.sourceforge.net/ | 4932 | W: http://tipc.sourceforge.net/ |
5742 | W: http://tipc.cslab.ericsson.net/ | 4933 | W: http://tipc.cslab.ericsson.net/ |
@@ -5747,8 +4938,7 @@ F: include/net/tipc/ | |||
5747 | F: net/tipc/ | 4938 | F: net/tipc/ |
5748 | 4939 | ||
5749 | TLAN NETWORK DRIVER | 4940 | TLAN NETWORK DRIVER |
5750 | P: Samuel Chessman | 4941 | M: Samuel Chessman <chessman@tux.org> |
5751 | M: chessman@tux.org | ||
5752 | L: tlan-devel@lists.sourceforge.net (subscribers-only) | 4942 | L: tlan-devel@lists.sourceforge.net (subscribers-only) |
5753 | W: http://sourceforge.net/projects/tlan/ | 4943 | W: http://sourceforge.net/projects/tlan/ |
5754 | S: Maintained | 4944 | S: Maintained |
@@ -5756,10 +4946,8 @@ F: Documentation/networking/tlan.txt | |||
5756 | F: drivers/net/tlan.* | 4946 | F: drivers/net/tlan.* |
5757 | 4947 | ||
5758 | TOMOYO SECURITY MODULE | 4948 | TOMOYO SECURITY MODULE |
5759 | P: Kentaro Takeda | 4949 | M: Kentaro Takeda <takedakn@nttdata.co.jp> |
5760 | M: takedakn@nttdata.co.jp | 4950 | M: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> |
5761 | P: Tetsuo Handa | ||
5762 | M: penguin-kernel@I-love.SAKURA.ne.jp | ||
5763 | L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English) | 4951 | L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English) |
5764 | L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) | 4952 | L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) |
5765 | L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) | 4953 | L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) |
@@ -5773,8 +4961,7 @@ S: Orphan | |||
5773 | F: drivers/platform/x86/toshiba_acpi.c | 4961 | F: drivers/platform/x86/toshiba_acpi.c |
5774 | 4962 | ||
5775 | TOSHIBA SMM DRIVER | 4963 | TOSHIBA SMM DRIVER |
5776 | P: Jonathan Buzzard | 4964 | M: Jonathan Buzzard <jonathan@buzzard.org.uk> |
5777 | M: jonathan@buzzard.org.uk | ||
5778 | L: tlinux-users@tce.toshiba-dme.co.jp | 4965 | L: tlinux-users@tce.toshiba-dme.co.jp |
5779 | W: http://www.buzzard.org.uk/toshiba/ | 4966 | W: http://www.buzzard.org.uk/toshiba/ |
5780 | S: Maintained | 4967 | S: Maintained |
@@ -5782,43 +4969,34 @@ F: drivers/char/toshiba.c | |||
5782 | F: include/linux/toshiba.h | 4969 | F: include/linux/toshiba.h |
5783 | 4970 | ||
5784 | TMIO MMC DRIVER | 4971 | TMIO MMC DRIVER |
5785 | P: Ian Molton | 4972 | M: Ian Molton <ian@mnementh.co.uk> |
5786 | M: ian@mnementh.co.uk | ||
5787 | S: Maintained | 4973 | S: Maintained |
5788 | F: drivers/mmc/host/tmio_mmc.* | 4974 | F: drivers/mmc/host/tmio_mmc.* |
5789 | 4975 | ||
5790 | TMPFS (SHMEM FILESYSTEM) | 4976 | TMPFS (SHMEM FILESYSTEM) |
5791 | P: Hugh Dickins | 4977 | M: Hugh Dickins <hugh.dickins@tiscali.co.uk> |
5792 | M: hugh.dickins@tiscali.co.uk | ||
5793 | L: linux-mm@kvack.org | 4978 | L: linux-mm@kvack.org |
5794 | S: Maintained | 4979 | S: Maintained |
5795 | F: include/linux/shmem_fs.h | 4980 | F: include/linux/shmem_fs.h |
5796 | F: mm/shmem.c | 4981 | F: mm/shmem.c |
5797 | 4982 | ||
5798 | TPM DEVICE DRIVER | 4983 | TPM DEVICE DRIVER |
5799 | P: Debora Velarde | 4984 | M: Debora Velarde <debora@linux.vnet.ibm.com> |
5800 | M: debora@linux.vnet.ibm.com | 4985 | M: Rajiv Andrade <srajiv@linux.vnet.ibm.com> |
5801 | P: Rajiv Andrade | ||
5802 | M: srajiv@linux.vnet.ibm.com | ||
5803 | W: http://tpmdd.sourceforge.net | 4986 | W: http://tpmdd.sourceforge.net |
5804 | P: Marcel Selhorst | 4987 | M: Marcel Selhorst <m.selhorst@sirrix.com> |
5805 | M: m.selhorst@sirrix.com | ||
5806 | W: http://www.sirrix.com | 4988 | W: http://www.sirrix.com |
5807 | L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers) | 4989 | L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers) |
5808 | S: Maintained | 4990 | S: Maintained |
5809 | F: drivers/char/tpm/ | 4991 | F: drivers/char/tpm/ |
5810 | 4992 | ||
5811 | TRIVIAL PATCHES | 4993 | TRIVIAL PATCHES |
5812 | P: Jiri Kosina | 4994 | M: Jiri Kosina <trivial@kernel.org> |
5813 | M: trivial@kernel.org | ||
5814 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git | 4995 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git |
5815 | S: Maintained | 4996 | S: Maintained |
5816 | 4997 | ||
5817 | TTY LAYER | 4998 | TTY LAYER |
5818 | P: Alan Cox | 4999 | S: Orphan |
5819 | M: alan@lxorguk.ukuu.org.uk | ||
5820 | S: Maintained | ||
5821 | T: stgit http://zeniv.linux.org.uk/~alan/ttydev/ | ||
5822 | F: drivers/char/tty_* | 5000 | F: drivers/char/tty_* |
5823 | F: drivers/serial/serial_core.c | 5001 | F: drivers/serial/serial_core.c |
5824 | F: include/linux/serial_core.h | 5002 | F: include/linux/serial_core.h |
@@ -5826,17 +5004,14 @@ F: include/linux/serial.h | |||
5826 | F: include/linux/tty.h | 5004 | F: include/linux/tty.h |
5827 | 5005 | ||
5828 | TULIP NETWORK DRIVERS | 5006 | TULIP NETWORK DRIVERS |
5829 | P: Grant Grundler | 5007 | M: Grant Grundler <grundler@parisc-linux.org> |
5830 | M: grundler@parisc-linux.org | 5008 | M: Kyle McMartin <kyle@mcmartin.ca> |
5831 | P: Kyle McMartin | ||
5832 | M: kyle@mcmartin.ca | ||
5833 | L: netdev@vger.kernel.org | 5009 | L: netdev@vger.kernel.org |
5834 | S: Maintained | 5010 | S: Maintained |
5835 | F: drivers/net/tulip/ | 5011 | F: drivers/net/tulip/ |
5836 | 5012 | ||
5837 | TUN/TAP driver | 5013 | TUN/TAP driver |
5838 | P: Maxim Krasnyansky | 5014 | M: Maxim Krasnyansky <maxk@qualcomm.com> |
5839 | M: maxk@qualcomm.com | ||
5840 | L: vtun@office.satix.net | 5015 | L: vtun@office.satix.net |
5841 | W: http://vtun.sourceforge.net/tun | 5016 | W: http://vtun.sourceforge.net/tun |
5842 | S: Maintained | 5017 | S: Maintained |
@@ -5844,24 +5019,20 @@ F: Documentation/networking/tuntap.txt | |||
5844 | F: arch/um/os-Linux/drivers/ | 5019 | F: arch/um/os-Linux/drivers/ |
5845 | 5020 | ||
5846 | TURBOCHANNEL SUBSYSTEM | 5021 | TURBOCHANNEL SUBSYSTEM |
5847 | P: Maciej W. Rozycki | 5022 | M: "Maciej W. Rozycki" <macro@linux-mips.org> |
5848 | M: macro@linux-mips.org | ||
5849 | S: Maintained | 5023 | S: Maintained |
5850 | F: drivers/tc/ | 5024 | F: drivers/tc/ |
5851 | F: include/linux/tc.h | 5025 | F: include/linux/tc.h |
5852 | 5026 | ||
5853 | U14-34F SCSI DRIVER | 5027 | U14-34F SCSI DRIVER |
5854 | P: Dario Ballabio | 5028 | M: Dario Ballabio <ballabio_dario@emc.com> |
5855 | M: ballabio_dario@emc.com | ||
5856 | L: linux-scsi@vger.kernel.org | 5029 | L: linux-scsi@vger.kernel.org |
5857 | S: Maintained | 5030 | S: Maintained |
5858 | F: drivers/scsi/u14-34f.c | 5031 | F: drivers/scsi/u14-34f.c |
5859 | 5032 | ||
5860 | UBI FILE SYSTEM (UBIFS) | 5033 | UBI FILE SYSTEM (UBIFS) |
5861 | P: Artem Bityutskiy | 5034 | M: Artem Bityutskiy <dedekind@infradead.org> |
5862 | M: dedekind@infradead.org | 5035 | M: Adrian Hunter <adrian.hunter@nokia.com> |
5863 | P: Adrian Hunter | ||
5864 | M: adrian.hunter@nokia.com | ||
5865 | L: linux-mtd@lists.infradead.org | 5036 | L: linux-mtd@lists.infradead.org |
5866 | T: git git://git.infradead.org/ubifs-2.6.git | 5037 | T: git git://git.infradead.org/ubifs-2.6.git |
5867 | W: http://www.linux-mtd.infradead.org/doc/ubifs.html | 5038 | W: http://www.linux-mtd.infradead.org/doc/ubifs.html |
@@ -5870,37 +5041,32 @@ F: Documentation/filesystems/ubifs.txt | |||
5870 | F: fs/ubifs/ | 5041 | F: fs/ubifs/ |
5871 | 5042 | ||
5872 | UCLINUX (AND M68KNOMMU) | 5043 | UCLINUX (AND M68KNOMMU) |
5873 | P: Greg Ungerer | 5044 | M: Greg Ungerer <gerg@uclinux.org> |
5874 | M: gerg@uclinux.org | ||
5875 | W: http://www.uclinux.org/ | 5045 | W: http://www.uclinux.org/ |
5876 | L: uclinux-dev@uclinux.org (subscribers-only) | 5046 | L: uclinux-dev@uclinux.org (subscribers-only) |
5877 | S: Maintained | 5047 | S: Maintained |
5878 | F: arch/m68knommu/ | 5048 | F: arch/m68knommu/ |
5879 | 5049 | ||
5880 | UCLINUX FOR RENESAS H8/300 (H8300) | 5050 | UCLINUX FOR RENESAS H8/300 (H8300) |
5881 | P: Yoshinori Sato | 5051 | M: Yoshinori Sato <ysato@users.sourceforge.jp> |
5882 | M: ysato@users.sourceforge.jp | ||
5883 | W: http://uclinux-h8.sourceforge.jp/ | 5052 | W: http://uclinux-h8.sourceforge.jp/ |
5884 | S: Supported | 5053 | S: Supported |
5885 | 5054 | ||
5886 | UDF FILESYSTEM | 5055 | UDF FILESYSTEM |
5887 | P: Jan Kara | 5056 | M: Jan Kara <jack@suse.cz> |
5888 | M: jack@suse.cz | ||
5889 | W: http://linux-udf.sourceforge.net | 5057 | W: http://linux-udf.sourceforge.net |
5890 | S: Maintained | 5058 | S: Maintained |
5891 | F: Documentation/filesystems/udf.txt | 5059 | F: Documentation/filesystems/udf.txt |
5892 | F: fs/udf/ | 5060 | F: fs/udf/ |
5893 | 5061 | ||
5894 | UFS FILESYSTEM | 5062 | UFS FILESYSTEM |
5895 | P: Evgeniy Dushistov | 5063 | M: Evgeniy Dushistov <dushistov@mail.ru> |
5896 | M: dushistov@mail.ru | ||
5897 | S: Maintained | 5064 | S: Maintained |
5898 | F: Documentation/filesystems/ufs.txt | 5065 | F: Documentation/filesystems/ufs.txt |
5899 | F: fs/ufs/ | 5066 | F: fs/ufs/ |
5900 | 5067 | ||
5901 | ULTRA-WIDEBAND (UWB) SUBSYSTEM: | 5068 | ULTRA-WIDEBAND (UWB) SUBSYSTEM: |
5902 | P: David Vrabel | 5069 | M: David Vrabel <david.vrabel@csr.com> |
5903 | M: david.vrabel@csr.com | ||
5904 | L: linux-usb@vger.kernel.org | 5070 | L: linux-usb@vger.kernel.org |
5905 | S: Supported | 5071 | S: Supported |
5906 | F: drivers/uwb/* | 5072 | F: drivers/uwb/* |
@@ -5908,8 +5074,7 @@ F: include/linux/uwb.h | |||
5908 | F: include/linux/uwb/ | 5074 | F: include/linux/uwb/ |
5909 | 5075 | ||
5910 | UNIFORM CDROM DRIVER | 5076 | UNIFORM CDROM DRIVER |
5911 | P: Jens Axboe | 5077 | M: Jens Axboe <axboe@kernel.dk> |
5912 | M: axboe@kernel.dk | ||
5913 | W: http://www.kernel.dk | 5078 | W: http://www.kernel.dk |
5914 | S: Maintained | 5079 | S: Maintained |
5915 | F: Documentation/cdrom/ | 5080 | F: Documentation/cdrom/ |
@@ -5917,8 +5082,7 @@ F: drivers/cdrom/cdrom.c | |||
5917 | F: include/linux/cdrom.h | 5082 | F: include/linux/cdrom.h |
5918 | 5083 | ||
5919 | UNSORTED BLOCK IMAGES (UBI) | 5084 | UNSORTED BLOCK IMAGES (UBI) |
5920 | P: Artem Bityutskiy | 5085 | M: Artem Bityutskiy <dedekind@infradead.org> |
5921 | M: dedekind@infradead.org | ||
5922 | W: http://www.linux-mtd.infradead.org/ | 5086 | W: http://www.linux-mtd.infradead.org/ |
5923 | L: linux-mtd@lists.infradead.org | 5087 | L: linux-mtd@lists.infradead.org |
5924 | T: git git://git.infradead.org/ubi-2.6.git | 5088 | T: git git://git.infradead.org/ubi-2.6.git |
@@ -5928,23 +5092,20 @@ F: include/linux/mtd/ubi.h | |||
5928 | F: include/mtd/ubi-user.h | 5092 | F: include/mtd/ubi-user.h |
5929 | 5093 | ||
5930 | USB ACM DRIVER | 5094 | USB ACM DRIVER |
5931 | P: Oliver Neukum | 5095 | M: Oliver Neukum <oliver@neukum.name> |
5932 | M: oliver@neukum.name | ||
5933 | L: linux-usb@vger.kernel.org | 5096 | L: linux-usb@vger.kernel.org |
5934 | S: Maintained | 5097 | S: Maintained |
5935 | F: Documentation/usb/acm.txt | 5098 | F: Documentation/usb/acm.txt |
5936 | F: drivers/usb/class/cdc-acm.* | 5099 | F: drivers/usb/class/cdc-acm.* |
5937 | 5100 | ||
5938 | USB BLOCK DRIVER (UB ub) | 5101 | USB BLOCK DRIVER (UB ub) |
5939 | P: Pete Zaitcev | 5102 | M: Pete Zaitcev <zaitcev@redhat.com> |
5940 | M: zaitcev@redhat.com | ||
5941 | L: linux-usb@vger.kernel.org | 5103 | L: linux-usb@vger.kernel.org |
5942 | S: Supported | 5104 | S: Supported |
5943 | F: drivers/block/ub.c | 5105 | F: drivers/block/ub.c |
5944 | 5106 | ||
5945 | USB CDC ETHERNET DRIVER | 5107 | USB CDC ETHERNET DRIVER |
5946 | P: Greg Kroah-Hartman | 5108 | M: Greg Kroah-Hartman <greg@kroah.com> |
5947 | M: greg@kroah.com | ||
5948 | L: linux-usb@vger.kernel.org | 5109 | L: linux-usb@vger.kernel.org |
5949 | S: Maintained | 5110 | S: Maintained |
5950 | W: http://www.kroah.com/linux-usb/ | 5111 | W: http://www.kroah.com/linux-usb/ |
@@ -5952,39 +5113,34 @@ F: drivers/net/usb/cdc_*.c | |||
5952 | F: include/linux/usb/cdc.h | 5113 | F: include/linux/usb/cdc.h |
5953 | 5114 | ||
5954 | USB CYPRESS C67X00 DRIVER | 5115 | USB CYPRESS C67X00 DRIVER |
5955 | P: Peter Korsgaard | 5116 | M: Peter Korsgaard <jacmet@sunsite.dk> |
5956 | M: jacmet@sunsite.dk | ||
5957 | L: linux-usb@vger.kernel.org | 5117 | L: linux-usb@vger.kernel.org |
5958 | S: Maintained | 5118 | S: Maintained |
5959 | F: drivers/usb/c67x00/ | 5119 | F: drivers/usb/c67x00/ |
5960 | 5120 | ||
5961 | USB DAVICOM DM9601 DRIVER | 5121 | USB DAVICOM DM9601 DRIVER |
5962 | P: Peter Korsgaard | 5122 | M: Peter Korsgaard <jacmet@sunsite.dk> |
5963 | M: jacmet@sunsite.dk | ||
5964 | L: netdev@vger.kernel.org | 5123 | L: netdev@vger.kernel.org |
5965 | W: http://www.linux-usb.org/usbnet | 5124 | W: http://www.linux-usb.org/usbnet |
5966 | S: Maintained | 5125 | S: Maintained |
5967 | F: drivers/net/usb/dm9601.c | 5126 | F: drivers/net/usb/dm9601.c |
5968 | 5127 | ||
5969 | USB DIAMOND RIO500 DRIVER | 5128 | USB DIAMOND RIO500 DRIVER |
5970 | P: Cesar Miquel | 5129 | M: Cesar Miquel <miquel@df.uba.ar> |
5971 | M: miquel@df.uba.ar | ||
5972 | L: rio500-users@lists.sourceforge.net | 5130 | L: rio500-users@lists.sourceforge.net |
5973 | W: http://rio500.sourceforge.net | 5131 | W: http://rio500.sourceforge.net |
5974 | S: Maintained | 5132 | S: Maintained |
5975 | F: drivers/usb/misc/rio500* | 5133 | F: drivers/usb/misc/rio500* |
5976 | 5134 | ||
5977 | USB EHCI DRIVER | 5135 | USB EHCI DRIVER |
5978 | P: David Brownell | 5136 | M: David Brownell <dbrownell@users.sourceforge.net> |
5979 | M: dbrownell@users.sourceforge.net | ||
5980 | L: linux-usb@vger.kernel.org | 5137 | L: linux-usb@vger.kernel.org |
5981 | S: Odd Fixes | 5138 | S: Odd Fixes |
5982 | F: Documentation/usb/ehci.txt | 5139 | F: Documentation/usb/ehci.txt |
5983 | F: drivers/usb/host/ehci* | 5140 | F: drivers/usb/host/ehci* |
5984 | 5141 | ||
5985 | USB ET61X[12]51 DRIVER | 5142 | USB ET61X[12]51 DRIVER |
5986 | P: Luca Risolia | 5143 | M: Luca Risolia <luca.risolia@studio.unibo.it> |
5987 | M: luca.risolia@studio.unibo.it | ||
5988 | L: linux-usb@vger.kernel.org | 5144 | L: linux-usb@vger.kernel.org |
5989 | L: linux-media@vger.kernel.org | 5145 | L: linux-media@vger.kernel.org |
5990 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5146 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -5993,8 +5149,7 @@ S: Maintained | |||
5993 | F: drivers/media/video/et61x251/ | 5149 | F: drivers/media/video/et61x251/ |
5994 | 5150 | ||
5995 | USB GADGET/PERIPHERAL SUBSYSTEM | 5151 | USB GADGET/PERIPHERAL SUBSYSTEM |
5996 | P: David Brownell | 5152 | M: David Brownell <dbrownell@users.sourceforge.net> |
5997 | M: dbrownell@users.sourceforge.net | ||
5998 | L: linux-usb@vger.kernel.org | 5153 | L: linux-usb@vger.kernel.org |
5999 | W: http://www.linux-usb.org/gadget | 5154 | W: http://www.linux-usb.org/gadget |
6000 | S: Maintained | 5155 | S: Maintained |
@@ -6002,8 +5157,7 @@ F: drivers/usb/gadget/ | |||
6002 | F: include/linux/usb/gadget* | 5157 | F: include/linux/usb/gadget* |
6003 | 5158 | ||
6004 | USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) | 5159 | USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) |
6005 | P: Jiri Kosina | 5160 | M: Jiri Kosina <jkosina@suse.cz> |
6006 | M: jkosina@suse.cz | ||
6007 | L: linux-usb@vger.kernel.org | 5161 | L: linux-usb@vger.kernel.org |
6008 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git | 5162 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git |
6009 | S: Maintained | 5163 | S: Maintained |
@@ -6011,23 +5165,20 @@ F: Documentation/usb/hiddev.txt | |||
6011 | F: drivers/hid/usbhid/ | 5165 | F: drivers/hid/usbhid/ |
6012 | 5166 | ||
6013 | USB ISP116X DRIVER | 5167 | USB ISP116X DRIVER |
6014 | P: Olav Kongas | 5168 | M: Olav Kongas <ok@artecdesign.ee> |
6015 | M: ok@artecdesign.ee | ||
6016 | L: linux-usb@vger.kernel.org | 5169 | L: linux-usb@vger.kernel.org |
6017 | S: Maintained | 5170 | S: Maintained |
6018 | F: drivers/usb/host/isp116x* | 5171 | F: drivers/usb/host/isp116x* |
6019 | F: include/linux/usb/isp116x.h | 5172 | F: include/linux/usb/isp116x.h |
6020 | 5173 | ||
6021 | USB KAWASAKI LSI DRIVER | 5174 | USB KAWASAKI LSI DRIVER |
6022 | P: Oliver Neukum | 5175 | M: Oliver Neukum <oliver@neukum.name> |
6023 | M: oliver@neukum.name | ||
6024 | L: linux-usb@vger.kernel.org | 5176 | L: linux-usb@vger.kernel.org |
6025 | S: Maintained | 5177 | S: Maintained |
6026 | F: drivers/usb/serial/kl5kusb105.* | 5178 | F: drivers/usb/serial/kl5kusb105.* |
6027 | 5179 | ||
6028 | USB MASS STORAGE DRIVER | 5180 | USB MASS STORAGE DRIVER |
6029 | P: Matthew Dharm | 5181 | M: Matthew Dharm <mdharm-usb@one-eyed-alien.net> |
6030 | M: mdharm-usb@one-eyed-alien.net | ||
6031 | L: linux-usb@vger.kernel.org | 5182 | L: linux-usb@vger.kernel.org |
6032 | L: usb-storage@lists.one-eyed-alien.net | 5183 | L: usb-storage@lists.one-eyed-alien.net |
6033 | S: Maintained | 5184 | S: Maintained |
@@ -6035,31 +5186,27 @@ W: http://www.one-eyed-alien.net/~mdharm/linux-usb/ | |||
6035 | F: drivers/usb/storage/ | 5186 | F: drivers/usb/storage/ |
6036 | 5187 | ||
6037 | USB OHCI DRIVER | 5188 | USB OHCI DRIVER |
6038 | P: David Brownell | 5189 | M: David Brownell <dbrownell@users.sourceforge.net> |
6039 | M: dbrownell@users.sourceforge.net | ||
6040 | L: linux-usb@vger.kernel.org | 5190 | L: linux-usb@vger.kernel.org |
6041 | S: Odd Fixes | 5191 | S: Odd Fixes |
6042 | F: Documentation/usb/ohci.txt | 5192 | F: Documentation/usb/ohci.txt |
6043 | F: drivers/usb/host/ohci* | 5193 | F: drivers/usb/host/ohci* |
6044 | 5194 | ||
6045 | USB OPTION-CARD DRIVER | 5195 | USB OPTION-CARD DRIVER |
6046 | P: Matthias Urlichs | 5196 | M: Matthias Urlichs <smurf@smurf.noris.de> |
6047 | M: smurf@smurf.noris.de | ||
6048 | L: linux-usb@vger.kernel.org | 5197 | L: linux-usb@vger.kernel.org |
6049 | S: Maintained | 5198 | S: Maintained |
6050 | F: drivers/usb/serial/option.c | 5199 | F: drivers/usb/serial/option.c |
6051 | 5200 | ||
6052 | USB OV511 DRIVER | 5201 | USB OV511 DRIVER |
6053 | P: Mark McClelland | 5202 | M: Mark McClelland <mmcclell@bigfoot.com> |
6054 | M: mmcclell@bigfoot.com | ||
6055 | L: linux-usb@vger.kernel.org | 5203 | L: linux-usb@vger.kernel.org |
6056 | W: http://alpha.dyndns.org/ov511/ | 5204 | W: http://alpha.dyndns.org/ov511/ |
6057 | S: Maintained | 5205 | S: Maintained |
6058 | F: drivers/media/video/ov511.* | 5206 | F: drivers/media/video/ov511.* |
6059 | 5207 | ||
6060 | USB PEGASUS DRIVER | 5208 | USB PEGASUS DRIVER |
6061 | P: Petko Manolov | 5209 | M: Petko Manolov <petkan@users.sourceforge.net> |
6062 | M: petkan@users.sourceforge.net | ||
6063 | L: linux-usb@vger.kernel.org | 5210 | L: linux-usb@vger.kernel.org |
6064 | L: netdev@vger.kernel.org | 5211 | L: netdev@vger.kernel.org |
6065 | W: http://pegasus2.sourceforge.net/ | 5212 | W: http://pegasus2.sourceforge.net/ |
@@ -6067,15 +5214,13 @@ S: Maintained | |||
6067 | F: drivers/net/usb/pegasus.* | 5214 | F: drivers/net/usb/pegasus.* |
6068 | 5215 | ||
6069 | USB PRINTER DRIVER (usblp) | 5216 | USB PRINTER DRIVER (usblp) |
6070 | P: Pete Zaitcev | 5217 | M: Pete Zaitcev <zaitcev@redhat.com> |
6071 | M: zaitcev@redhat.com | ||
6072 | L: linux-usb@vger.kernel.org | 5218 | L: linux-usb@vger.kernel.org |
6073 | S: Supported | 5219 | S: Supported |
6074 | F: drivers/usb/class/usblp.c | 5220 | F: drivers/usb/class/usblp.c |
6075 | 5221 | ||
6076 | USB RTL8150 DRIVER | 5222 | USB RTL8150 DRIVER |
6077 | P: Petko Manolov | 5223 | M: Petko Manolov <petkan@users.sourceforge.net> |
6078 | M: petkan@users.sourceforge.net | ||
6079 | L: linux-usb@vger.kernel.org | 5224 | L: linux-usb@vger.kernel.org |
6080 | L: netdev@vger.kernel.org | 5225 | L: netdev@vger.kernel.org |
6081 | W: http://pegasus2.sourceforge.net/ | 5226 | W: http://pegasus2.sourceforge.net/ |
@@ -6083,8 +5228,7 @@ S: Maintained | |||
6083 | F: drivers/net/usb/rtl8150.c | 5228 | F: drivers/net/usb/rtl8150.c |
6084 | 5229 | ||
6085 | USB SE401 DRIVER | 5230 | USB SE401 DRIVER |
6086 | P: Jeroen Vreeken | 5231 | M: Jeroen Vreeken <pe1rxq@amsat.org> |
6087 | M: pe1rxq@amsat.org | ||
6088 | L: linux-usb@vger.kernel.org | 5232 | L: linux-usb@vger.kernel.org |
6089 | W: http://www.chello.nl/~j.vreeken/se401/ | 5233 | W: http://www.chello.nl/~j.vreeken/se401/ |
6090 | S: Maintained | 5234 | S: Maintained |
@@ -6092,15 +5236,13 @@ F: Documentation/video4linux/se401.txt | |||
6092 | F: drivers/media/video/se401.* | 5236 | F: drivers/media/video/se401.* |
6093 | 5237 | ||
6094 | USB SERIAL BELKIN F5U103 DRIVER | 5238 | USB SERIAL BELKIN F5U103 DRIVER |
6095 | P: William Greathouse | 5239 | M: William Greathouse <wgreathouse@smva.com> |
6096 | M: wgreathouse@smva.com | ||
6097 | L: linux-usb@vger.kernel.org | 5240 | L: linux-usb@vger.kernel.org |
6098 | S: Maintained | 5241 | S: Maintained |
6099 | F: drivers/usb/serial/belkin_sa.* | 5242 | F: drivers/usb/serial/belkin_sa.* |
6100 | 5243 | ||
6101 | USB SERIAL CYPRESS M8 DRIVER | 5244 | USB SERIAL CYPRESS M8 DRIVER |
6102 | P: Lonnie Mendez | 5245 | M: Lonnie Mendez <dignome@gmail.com> |
6103 | M: dignome@gmail.com | ||
6104 | L: linux-usb@vger.kernel.org | 5246 | L: linux-usb@vger.kernel.org |
6105 | S: Maintained | 5247 | S: Maintained |
6106 | W: http://geocities.com/i0xox0i | 5248 | W: http://geocities.com/i0xox0i |
@@ -6108,23 +5250,20 @@ W: http://firstlight.net/cvs | |||
6108 | F: drivers/usb/serial/cypress_m8.* | 5250 | F: drivers/usb/serial/cypress_m8.* |
6109 | 5251 | ||
6110 | USB SERIAL CYBERJACK DRIVER | 5252 | USB SERIAL CYBERJACK DRIVER |
6111 | P: Matthias Bruestle and Harald Welte | 5253 | M: Matthias Bruestle and Harald Welte <support@reiner-sct.com> |
6112 | M: support@reiner-sct.com | ||
6113 | W: http://www.reiner-sct.de/support/treiber_cyberjack.php | 5254 | W: http://www.reiner-sct.de/support/treiber_cyberjack.php |
6114 | S: Maintained | 5255 | S: Maintained |
6115 | F: drivers/usb/serial/cyberjack.c | 5256 | F: drivers/usb/serial/cyberjack.c |
6116 | 5257 | ||
6117 | USB SERIAL DIGI ACCELEPORT DRIVER | 5258 | USB SERIAL DIGI ACCELEPORT DRIVER |
6118 | P: Peter Berger and Al Borchers | 5259 | M: Peter Berger <pberger@brimson.com> |
6119 | M: pberger@brimson.com | 5260 | M: Al Borchers <alborchers@steinerpoint.com> |
6120 | M: alborchers@steinerpoint.com | ||
6121 | L: linux-usb@vger.kernel.org | 5261 | L: linux-usb@vger.kernel.org |
6122 | S: Maintained | 5262 | S: Maintained |
6123 | F: drivers/usb/serial/digi_acceleport.c | 5263 | F: drivers/usb/serial/digi_acceleport.c |
6124 | 5264 | ||
6125 | USB SERIAL DRIVER | 5265 | USB SERIAL DRIVER |
6126 | P: Greg Kroah-Hartman | 5266 | M: Greg Kroah-Hartman <gregkh@suse.de> |
6127 | M: gregkh@suse.de | ||
6128 | L: linux-usb@vger.kernel.org | 5267 | L: linux-usb@vger.kernel.org |
6129 | S: Supported | 5268 | S: Supported |
6130 | F: Documentation/usb/usb-serial.txt | 5269 | F: Documentation/usb/usb-serial.txt |
@@ -6133,38 +5272,33 @@ F: drivers/usb/serial/usb-serial.c | |||
6133 | F: include/linux/usb/serial.h | 5272 | F: include/linux/usb/serial.h |
6134 | 5273 | ||
6135 | USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER | 5274 | USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER |
6136 | P: Gary Brubaker | 5275 | M: Gary Brubaker <xavyer@ix.netcom.com> |
6137 | M: xavyer@ix.netcom.com | ||
6138 | L: linux-usb@vger.kernel.org | 5276 | L: linux-usb@vger.kernel.org |
6139 | S: Maintained | 5277 | S: Maintained |
6140 | F: drivers/usb/serial/empeg.c | 5278 | F: drivers/usb/serial/empeg.c |
6141 | 5279 | ||
6142 | USB SERIAL KEYSPAN DRIVER | 5280 | USB SERIAL KEYSPAN DRIVER |
6143 | P: Greg Kroah-Hartman | 5281 | M: Greg Kroah-Hartman <greg@kroah.com> |
6144 | M: greg@kroah.com | ||
6145 | L: linux-usb@vger.kernel.org | 5282 | L: linux-usb@vger.kernel.org |
6146 | W: http://www.kroah.com/linux/ | 5283 | W: http://www.kroah.com/linux/ |
6147 | S: Maintained | 5284 | S: Maintained |
6148 | F: drivers/usb/serial/*keyspan* | 5285 | F: drivers/usb/serial/*keyspan* |
6149 | 5286 | ||
6150 | USB SERIAL WHITEHEAT DRIVER | 5287 | USB SERIAL WHITEHEAT DRIVER |
6151 | P: Support Department | 5288 | M: Support Department <support@connecttech.com> |
6152 | M: support@connecttech.com | ||
6153 | L: linux-usb@vger.kernel.org | 5289 | L: linux-usb@vger.kernel.org |
6154 | W: http://www.connecttech.com | 5290 | W: http://www.connecttech.com |
6155 | S: Supported | 5291 | S: Supported |
6156 | F: drivers/usb/serial/whiteheat* | 5292 | F: drivers/usb/serial/whiteheat* |
6157 | 5293 | ||
6158 | USB SMSC95XX ETHERNET DRIVER | 5294 | USB SMSC95XX ETHERNET DRIVER |
6159 | P: Steve Glendinning | 5295 | M: Steve Glendinning <steve.glendinning@smsc.com> |
6160 | M: steve.glendinning@smsc.com | ||
6161 | L: netdev@vger.kernel.org | 5296 | L: netdev@vger.kernel.org |
6162 | S: Supported | 5297 | S: Supported |
6163 | F: drivers/net/usb/smsc95xx.* | 5298 | F: drivers/net/usb/smsc95xx.* |
6164 | 5299 | ||
6165 | USB SN9C1xx DRIVER | 5300 | USB SN9C1xx DRIVER |
6166 | P: Luca Risolia | 5301 | M: Luca Risolia <luca.risolia@studio.unibo.it> |
6167 | M: luca.risolia@studio.unibo.it | ||
6168 | L: linux-usb@vger.kernel.org | 5302 | L: linux-usb@vger.kernel.org |
6169 | L: linux-media@vger.kernel.org | 5303 | L: linux-media@vger.kernel.org |
6170 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5304 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -6174,8 +5308,7 @@ F: Documentation/video4linux/sn9c102.txt | |||
6174 | F: drivers/media/video/sn9c102/ | 5308 | F: drivers/media/video/sn9c102/ |
6175 | 5309 | ||
6176 | USB SUBSYSTEM | 5310 | USB SUBSYSTEM |
6177 | P: Greg Kroah-Hartman | 5311 | M: Greg Kroah-Hartman <gregkh@suse.de> |
6178 | M: gregkh@suse.de | ||
6179 | L: linux-usb@vger.kernel.org | 5312 | L: linux-usb@vger.kernel.org |
6180 | W: http://www.linux-usb.org | 5313 | W: http://www.linux-usb.org |
6181 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ | 5314 | T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ |
@@ -6187,15 +5320,13 @@ F: include/linux/usb.h | |||
6187 | F: include/linux/usb/ | 5320 | F: include/linux/usb/ |
6188 | 5321 | ||
6189 | USB UHCI DRIVER | 5322 | USB UHCI DRIVER |
6190 | P: Alan Stern | 5323 | M: Alan Stern <stern@rowland.harvard.edu> |
6191 | M: stern@rowland.harvard.edu | ||
6192 | L: linux-usb@vger.kernel.org | 5324 | L: linux-usb@vger.kernel.org |
6193 | S: Maintained | 5325 | S: Maintained |
6194 | F: drivers/usb/host/uhci* | 5326 | F: drivers/usb/host/uhci* |
6195 | 5327 | ||
6196 | USB "USBNET" DRIVER FRAMEWORK | 5328 | USB "USBNET" DRIVER FRAMEWORK |
6197 | P: David Brownell | 5329 | M: David Brownell <dbrownell@users.sourceforge.net> |
6198 | M: dbrownell@users.sourceforge.net | ||
6199 | L: netdev@vger.kernel.org | 5330 | L: netdev@vger.kernel.org |
6200 | W: http://www.linux-usb.org/usbnet | 5331 | W: http://www.linux-usb.org/usbnet |
6201 | S: Maintained | 5332 | S: Maintained |
@@ -6203,8 +5334,7 @@ F: drivers/net/usb/usbnet.c | |||
6203 | F: include/linux/usb/usbnet.h | 5334 | F: include/linux/usb/usbnet.h |
6204 | 5335 | ||
6205 | USB VIDEO CLASS | 5336 | USB VIDEO CLASS |
6206 | P: Laurent Pinchart | 5337 | M: Laurent Pinchart <laurent.pinchart@skynet.be> |
6207 | M: laurent.pinchart@skynet.be | ||
6208 | L: linux-uvc-devel@lists.berlios.de (subscribers-only) | 5338 | L: linux-uvc-devel@lists.berlios.de (subscribers-only) |
6209 | L: linux-media@vger.kernel.org | 5339 | L: linux-media@vger.kernel.org |
6210 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5340 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -6213,8 +5343,7 @@ S: Maintained | |||
6213 | F: drivers/media/video/uvc/ | 5343 | F: drivers/media/video/uvc/ |
6214 | 5344 | ||
6215 | USB W996[87]CF DRIVER | 5345 | USB W996[87]CF DRIVER |
6216 | P: Luca Risolia | 5346 | M: Luca Risolia <luca.risolia@studio.unibo.it> |
6217 | M: luca.risolia@studio.unibo.it | ||
6218 | L: linux-usb@vger.kernel.org | 5347 | L: linux-usb@vger.kernel.org |
6219 | L: linux-media@vger.kernel.org | 5348 | L: linux-media@vger.kernel.org |
6220 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5349 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -6224,21 +5353,18 @@ F: Documentation/video4linux/w9968cf.txt | |||
6224 | F: drivers/media/video/w996* | 5353 | F: drivers/media/video/w996* |
6225 | 5354 | ||
6226 | USB WIRELESS RNDIS DRIVER (rndis_wlan) | 5355 | USB WIRELESS RNDIS DRIVER (rndis_wlan) |
6227 | P: Jussi Kivilinna | 5356 | M: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> |
6228 | M: jussi.kivilinna@mbnet.fi | ||
6229 | L: linux-wireless@vger.kernel.org | 5357 | L: linux-wireless@vger.kernel.org |
6230 | S: Maintained | 5358 | S: Maintained |
6231 | F: drivers/net/wireless/rndis_wlan.c | 5359 | F: drivers/net/wireless/rndis_wlan.c |
6232 | 5360 | ||
6233 | USB XHCI DRIVER | 5361 | USB XHCI DRIVER |
6234 | P: Sarah Sharp | 5362 | M: Sarah Sharp <sarah.a.sharp@intel.com> |
6235 | M: sarah.a.sharp@intel.com | ||
6236 | L: linux-usb@vger.kernel.org | 5363 | L: linux-usb@vger.kernel.org |
6237 | S: Supported | 5364 | S: Supported |
6238 | 5365 | ||
6239 | USB ZC0301 DRIVER | 5366 | USB ZC0301 DRIVER |
6240 | P: Luca Risolia | 5367 | M: Luca Risolia <luca.risolia@studio.unibo.it> |
6241 | M: luca.risolia@studio.unibo.it | ||
6242 | L: linux-usb@vger.kernel.org | 5368 | L: linux-usb@vger.kernel.org |
6243 | L: linux-media@vger.kernel.org | 5369 | L: linux-media@vger.kernel.org |
6244 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5370 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -6248,16 +5374,14 @@ F: Documentation/video4linux/zc0301.txt | |||
6248 | F: drivers/media/video/zc0301/ | 5374 | F: drivers/media/video/zc0301/ |
6249 | 5375 | ||
6250 | USB ZD1201 DRIVER | 5376 | USB ZD1201 DRIVER |
6251 | P: Jeroen Vreeken | 5377 | M: Jeroen Vreeken <pe1rxq@amsat.org> |
6252 | M: pe1rxq@amsat.org | ||
6253 | L: linux-usb@vger.kernel.org | 5378 | L: linux-usb@vger.kernel.org |
6254 | W: http://linux-lc100020.sourceforge.net | 5379 | W: http://linux-lc100020.sourceforge.net |
6255 | S: Maintained | 5380 | S: Maintained |
6256 | F: drivers/net/wireless/zd1201.* | 5381 | F: drivers/net/wireless/zd1201.* |
6257 | 5382 | ||
6258 | USB ZR364XX DRIVER | 5383 | USB ZR364XX DRIVER |
6259 | P: Antoine Jacquet | 5384 | M: Antoine Jacquet <royale@zerezo.com> |
6260 | M: royale@zerezo.com | ||
6261 | L: linux-usb@vger.kernel.org | 5385 | L: linux-usb@vger.kernel.org |
6262 | L: linux-media@vger.kernel.org | 5386 | L: linux-media@vger.kernel.org |
6263 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git | 5387 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git |
@@ -6267,8 +5391,7 @@ F: Documentation/video4linux/zr364xx.txt | |||
6267 | F: drivers/media/video/zr364xx.c | 5391 | F: drivers/media/video/zr364xx.c |
6268 | 5392 | ||
6269 | USER-MODE LINUX (UML) | 5393 | USER-MODE LINUX (UML) |
6270 | P: Jeff Dike | 5394 | M: Jeff Dike <jdike@addtoit.com> |
6271 | M: jdike@addtoit.com | ||
6272 | L: user-mode-linux-devel@lists.sourceforge.net | 5395 | L: user-mode-linux-devel@lists.sourceforge.net |
6273 | L: user-mode-linux-user@lists.sourceforge.net | 5396 | L: user-mode-linux-user@lists.sourceforge.net |
6274 | W: http://user-mode-linux.sourceforge.net | 5397 | W: http://user-mode-linux.sourceforge.net |
@@ -6279,26 +5402,22 @@ F: fs/hostfs/ | |||
6279 | F: fs/hppfs/ | 5402 | F: fs/hppfs/ |
6280 | 5403 | ||
6281 | USERSPACE I/O (UIO) | 5404 | USERSPACE I/O (UIO) |
6282 | P: Hans J. Koch | 5405 | M: "Hans J. Koch" <hjk@linutronix.de> |
6283 | M: hjk@linutronix.de | 5406 | M: Greg Kroah-Hartman <gregkh@suse.de> |
6284 | P: Greg Kroah-Hartman | ||
6285 | M: gregkh@suse.de | ||
6286 | S: Maintained | 5407 | S: Maintained |
6287 | F: Documentation/DocBook/uio-howto.tmpl | 5408 | F: Documentation/DocBook/uio-howto.tmpl |
6288 | F: drivers/uio/ | 5409 | F: drivers/uio/ |
6289 | F: include/linux/uio*.h | 5410 | F: include/linux/uio*.h |
6290 | 5411 | ||
6291 | UTIL-LINUX-NG PACKAGE | 5412 | UTIL-LINUX-NG PACKAGE |
6292 | P: Karel Zak | 5413 | M: Karel Zak <kzak@redhat.com> |
6293 | M: kzak@redhat.com | ||
6294 | L: util-linux-ng@vger.kernel.org | 5414 | L: util-linux-ng@vger.kernel.org |
6295 | W: http://kernel.org/~kzak/util-linux-ng/ | 5415 | W: http://kernel.org/~kzak/util-linux-ng/ |
6296 | T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git | 5416 | T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git |
6297 | S: Maintained | 5417 | S: Maintained |
6298 | 5418 | ||
6299 | UVESAFB DRIVER | 5419 | UVESAFB DRIVER |
6300 | P: Michal Januszewski | 5420 | M: Michal Januszewski <spock@gentoo.org> |
6301 | M: spock@gentoo.org | ||
6302 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 5421 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
6303 | W: http://dev.gentoo.org/~spock/projects/uvesafb/ | 5422 | W: http://dev.gentoo.org/~spock/projects/uvesafb/ |
6304 | S: Maintained | 5423 | S: Maintained |
@@ -6306,53 +5425,44 @@ F: Documentation/fb/uvesafb.txt | |||
6306 | F: drivers/video/uvesafb.* | 5425 | F: drivers/video/uvesafb.* |
6307 | 5426 | ||
6308 | VFAT/FAT/MSDOS FILESYSTEM | 5427 | VFAT/FAT/MSDOS FILESYSTEM |
6309 | P: OGAWA Hirofumi | 5428 | M: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> |
6310 | M: hirofumi@mail.parknet.co.jp | ||
6311 | S: Maintained | 5429 | S: Maintained |
6312 | F: Documentation/filesystems/vfat.txt | 5430 | F: Documentation/filesystems/vfat.txt |
6313 | F: fs/fat/ | 5431 | F: fs/fat/ |
6314 | 5432 | ||
6315 | VIA RHINE NETWORK DRIVER | 5433 | VIA RHINE NETWORK DRIVER |
6316 | P: Roger Luethi | 5434 | M: Roger Luethi <rl@hellgate.ch> |
6317 | M: rl@hellgate.ch | ||
6318 | S: Maintained | 5435 | S: Maintained |
6319 | F: drivers/net/via-rhine.c | 5436 | F: drivers/net/via-rhine.c |
6320 | 5437 | ||
6321 | VIAPRO SMBUS DRIVER | 5438 | VIAPRO SMBUS DRIVER |
6322 | P: Jean Delvare | 5439 | M: Jean Delvare <khali@linux-fr.org> |
6323 | M: khali@linux-fr.org | ||
6324 | L: linux-i2c@vger.kernel.org | 5440 | L: linux-i2c@vger.kernel.org |
6325 | S: Maintained | 5441 | S: Maintained |
6326 | F: Documentation/i2c/busses/i2c-viapro | 5442 | F: Documentation/i2c/busses/i2c-viapro |
6327 | F: drivers/i2c/busses/i2c-viapro.c | 5443 | F: drivers/i2c/busses/i2c-viapro.c |
6328 | 5444 | ||
6329 | VIA SD/MMC CARD CONTROLLER DRIVER | 5445 | VIA SD/MMC CARD CONTROLLER DRIVER |
6330 | P: Joseph Chan | 5446 | M: Joseph Chan <JosephChan@via.com.tw> |
6331 | M: JosephChan@via.com.tw | 5447 | M: Harald Welte <HaraldWelte@viatech.com> |
6332 | P: Harald Welte | ||
6333 | M: HaraldWelte@viatech.com | ||
6334 | S: Maintained | 5448 | S: Maintained |
6335 | F: drivers/mmc/host/via-sdmmc.c | 5449 | F: drivers/mmc/host/via-sdmmc.c |
6336 | 5450 | ||
6337 | VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER | 5451 | VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER |
6338 | P: Joseph Chan | 5452 | M: Joseph Chan <JosephChan@via.com.tw> |
6339 | M: JosephChan@via.com.tw | 5453 | M: Scott Fang <ScottFang@viatech.com.cn> |
6340 | P: Scott Fang | ||
6341 | M: ScottFang@viatech.com.cn | ||
6342 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) | 5454 | L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) |
6343 | S: Maintained | 5455 | S: Maintained |
6344 | F: drivers/video/via/ | 5456 | F: drivers/video/via/ |
6345 | 5457 | ||
6346 | VIA VELOCITY NETWORK DRIVER | 5458 | VIA VELOCITY NETWORK DRIVER |
6347 | P: Francois Romieu | 5459 | M: Francois Romieu <romieu@fr.zoreil.com> |
6348 | M: romieu@fr.zoreil.com | ||
6349 | L: netdev@vger.kernel.org | 5460 | L: netdev@vger.kernel.org |
6350 | S: Maintained | 5461 | S: Maintained |
6351 | F: drivers/net/via-velocity.* | 5462 | F: drivers/net/via-velocity.* |
6352 | 5463 | ||
6353 | VLAN (802.1Q) | 5464 | VLAN (802.1Q) |
6354 | P: Patrick McHardy | 5465 | M: Patrick McHardy <kaber@trash.net> |
6355 | M: kaber@trash.net | ||
6356 | L: netdev@vger.kernel.org | 5466 | L: netdev@vger.kernel.org |
6357 | S: Maintained | 5467 | S: Maintained |
6358 | F: drivers/net/macvlan.c | 5468 | F: drivers/net/macvlan.c |
@@ -6360,18 +5470,15 @@ F: include/linux/if_*vlan.h | |||
6360 | F: net/8021q/ | 5470 | F: net/8021q/ |
6361 | 5471 | ||
6362 | VLYNQ BUS | 5472 | VLYNQ BUS |
6363 | P: Florian Fainelli | 5473 | M: Florian Fainelli <florian@openwrt.org> |
6364 | M: florian@openwrt.org | ||
6365 | L: openwrt-devel@lists.openwrt.org | 5474 | L: openwrt-devel@lists.openwrt.org |
6366 | S: Maintained | 5475 | S: Maintained |
6367 | F: drivers/vlynq/vlynq.c | 5476 | F: drivers/vlynq/vlynq.c |
6368 | F: include/linux/vlynq.h | 5477 | F: include/linux/vlynq.h |
6369 | 5478 | ||
6370 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK | 5479 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK |
6371 | P: Liam Girdwood | 5480 | M: Liam Girdwood <lrg@slimlogic.co.uk> |
6372 | M: lrg@slimlogic.co.uk | 5481 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> |
6373 | P: Mark Brown | ||
6374 | M: broonie@opensource.wolfsonmicro.com | ||
6375 | W: http://opensource.wolfsonmicro.com/node/15 | 5482 | W: http://opensource.wolfsonmicro.com/node/15 |
6376 | W: http://www.slimlogic.co.uk/?p=48 | 5483 | W: http://www.slimlogic.co.uk/?p=48 |
6377 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/voltage-2.6.git | 5484 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/voltage-2.6.git |
@@ -6380,52 +5487,45 @@ F: drivers/regulator/ | |||
6380 | F: include/linux/regulator/ | 5487 | F: include/linux/regulator/ |
6381 | 5488 | ||
6382 | VT1211 HARDWARE MONITOR DRIVER | 5489 | VT1211 HARDWARE MONITOR DRIVER |
6383 | P: Juerg Haefliger | 5490 | M: Juerg Haefliger <juergh@gmail.com> |
6384 | M: juergh@gmail.com | ||
6385 | L: lm-sensors@lm-sensors.org | 5491 | L: lm-sensors@lm-sensors.org |
6386 | S: Maintained | 5492 | S: Maintained |
6387 | F: Documentation/hwmon/vt1211 | 5493 | F: Documentation/hwmon/vt1211 |
6388 | F: drivers/hwmon/vt1211.c | 5494 | F: drivers/hwmon/vt1211.c |
6389 | 5495 | ||
6390 | VT8231 HARDWARE MONITOR DRIVER | 5496 | VT8231 HARDWARE MONITOR DRIVER |
6391 | P: Roger Lucas | 5497 | M: Roger Lucas <vt8231@hiddenengine.co.uk> |
6392 | M: vt8231@hiddenengine.co.uk | ||
6393 | L: lm-sensors@lm-sensors.org | 5498 | L: lm-sensors@lm-sensors.org |
6394 | S: Maintained | 5499 | S: Maintained |
6395 | F: drivers/hwmon/vt8231.c | 5500 | F: drivers/hwmon/vt8231.c |
6396 | 5501 | ||
6397 | W1 DALLAS'S 1-WIRE BUS | 5502 | W1 DALLAS'S 1-WIRE BUS |
6398 | P: Evgeniy Polyakov | 5503 | M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> |
6399 | M: johnpol@2ka.mipt.ru | ||
6400 | S: Maintained | 5504 | S: Maintained |
6401 | F: Documentation/w1/ | 5505 | F: Documentation/w1/ |
6402 | F: drivers/w1/ | 5506 | F: drivers/w1/ |
6403 | 5507 | ||
6404 | W83791D HARDWARE MONITORING DRIVER | 5508 | W83791D HARDWARE MONITORING DRIVER |
6405 | P: Marc Hulsman | 5509 | M: Marc Hulsman <m.hulsman@tudelft.nl> |
6406 | M: m.hulsman@tudelft.nl | ||
6407 | L: lm-sensors@lm-sensors.org | 5510 | L: lm-sensors@lm-sensors.org |
6408 | S: Maintained | 5511 | S: Maintained |
6409 | F: Documentation/hwmon/w83791d | 5512 | F: Documentation/hwmon/w83791d |
6410 | F: drivers/hwmon/w83791d.c | 5513 | F: drivers/hwmon/w83791d.c |
6411 | 5514 | ||
6412 | W83793 HARDWARE MONITORING DRIVER | 5515 | W83793 HARDWARE MONITORING DRIVER |
6413 | P: Rudolf Marek | 5516 | M: Rudolf Marek <r.marek@assembler.cz> |
6414 | M: r.marek@assembler.cz | ||
6415 | L: lm-sensors@lm-sensors.org | 5517 | L: lm-sensors@lm-sensors.org |
6416 | S: Maintained | 5518 | S: Maintained |
6417 | F: Documentation/hwmon/w83793 | 5519 | F: Documentation/hwmon/w83793 |
6418 | F: drivers/hwmon/w83793.c | 5520 | F: drivers/hwmon/w83793.c |
6419 | 5521 | ||
6420 | W83L51xD SD/MMC CARD INTERFACE DRIVER | 5522 | W83L51xD SD/MMC CARD INTERFACE DRIVER |
6421 | P: Pierre Ossman | 5523 | M: Pierre Ossman <pierre@ossman.eu> |
6422 | M: pierre@ossman.eu | ||
6423 | S: Maintained | 5524 | S: Maintained |
6424 | F: drivers/mmc/host/wbsd.* | 5525 | F: drivers/mmc/host/wbsd.* |
6425 | 5526 | ||
6426 | WATCHDOG DEVICE DRIVERS | 5527 | WATCHDOG DEVICE DRIVERS |
6427 | P: Wim Van Sebroeck | 5528 | M: Wim Van Sebroeck <wim@iguana.be> |
6428 | M: wim@iguana.be | ||
6429 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git | 5529 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git |
6430 | S: Maintained | 5530 | S: Maintained |
6431 | F: Documentation/watchdog/ | 5531 | F: Documentation/watchdog/ |
@@ -6433,8 +5533,7 @@ F: drivers/watchdog/ | |||
6433 | F: include/linux/watchdog.h | 5533 | F: include/linux/watchdog.h |
6434 | 5534 | ||
6435 | WAVELAN NETWORK DRIVER & WIRELESS EXTENSIONS | 5535 | WAVELAN NETWORK DRIVER & WIRELESS EXTENSIONS |
6436 | P: Jean Tourrilhes | 5536 | M: Jean Tourrilhes <jt@hpl.hp.com> |
6437 | M: jt@hpl.hp.com | ||
6438 | L: linux-wireless@vger.kernel.org | 5537 | L: linux-wireless@vger.kernel.org |
6439 | W: http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/ | 5538 | W: http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/ |
6440 | S: Maintained | 5539 | S: Maintained |
@@ -6442,46 +5541,39 @@ F: Documentation/networking/wavelan.txt | |||
6442 | F: drivers/net/wireless/wavelan* | 5541 | F: drivers/net/wireless/wavelan* |
6443 | 5542 | ||
6444 | WD7000 SCSI DRIVER | 5543 | WD7000 SCSI DRIVER |
6445 | P: Miroslav Zagorac | 5544 | M: Miroslav Zagorac <zaga@fly.cc.fer.hr> |
6446 | M: zaga@fly.cc.fer.hr | ||
6447 | L: linux-scsi@vger.kernel.org | 5545 | L: linux-scsi@vger.kernel.org |
6448 | S: Maintained | 5546 | S: Maintained |
6449 | F: drivers/scsi/wd7000.c | 5547 | F: drivers/scsi/wd7000.c |
6450 | 5548 | ||
6451 | WIMAX STACK | 5549 | WIMAX STACK |
6452 | P: Inaky Perez-Gonzalez | 5550 | M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
6453 | M: inaky.perez-gonzalez@intel.com | ||
6454 | M: linux-wimax@intel.com | 5551 | M: linux-wimax@intel.com |
6455 | L: wimax@linuxwimax.org | 5552 | L: wimax@linuxwimax.org |
6456 | S: Supported | 5553 | S: Supported |
6457 | W: http://linuxwimax.org | 5554 | W: http://linuxwimax.org |
6458 | 5555 | ||
6459 | WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM | 5556 | WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM |
6460 | P: David Vrabel | 5557 | M: David Vrabel <david.vrabel@csr.com> |
6461 | M: david.vrabel@csr.com | ||
6462 | S: Maintained | 5558 | S: Maintained |
6463 | F: include/linux/wlp.h | 5559 | F: include/linux/wlp.h |
6464 | F: drivers/uwb/wlp/ | 5560 | F: drivers/uwb/wlp/ |
6465 | 5561 | ||
6466 | WISTRON LAPTOP BUTTON DRIVER | 5562 | WISTRON LAPTOP BUTTON DRIVER |
6467 | P: Miloslav Trmac | 5563 | M: Miloslav Trmac <mitr@volny.cz> |
6468 | M: mitr@volny.cz | ||
6469 | S: Maintained | 5564 | S: Maintained |
6470 | F: drivers/input/misc/wistron_btns.c | 5565 | F: drivers/input/misc/wistron_btns.c |
6471 | 5566 | ||
6472 | WL3501 WIRELESS PCMCIA CARD DRIVER | 5567 | WL3501 WIRELESS PCMCIA CARD DRIVER |
6473 | P: Arnaldo Carvalho de Melo | 5568 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> |
6474 | M: acme@ghostprotocols.net | ||
6475 | L: linux-wireless@vger.kernel.org | 5569 | L: linux-wireless@vger.kernel.org |
6476 | W: http://oops.ghostprotocols.net:81/blog | 5570 | W: http://oops.ghostprotocols.net:81/blog |
6477 | S: Maintained | 5571 | S: Maintained |
6478 | F: drivers/net/wireless/wl3501* | 5572 | F: drivers/net/wireless/wl3501* |
6479 | 5573 | ||
6480 | WM97XX TOUCHSCREEN DRIVERS | 5574 | WM97XX TOUCHSCREEN DRIVERS |
6481 | P: Mark Brown | 5575 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> |
6482 | M: broonie@opensource.wolfsonmicro.com | 5576 | M: Liam Girdwood <lrg@slimlogic.co.uk> |
6483 | P: Liam Girdwood | ||
6484 | M: lrg@slimlogic.co.uk | ||
6485 | L: linux-input@vger.kernel.org | 5577 | L: linux-input@vger.kernel.org |
6486 | T: git git://opensource.wolfsonmicro.com/linux-2.6-touch | 5578 | T: git git://opensource.wolfsonmicro.com/linux-2.6-touch |
6487 | W: http://opensource.wolfsonmicro.com/node/7 | 5579 | W: http://opensource.wolfsonmicro.com/node/7 |
@@ -6490,8 +5582,7 @@ F: drivers/input/touchscreen/*wm97* | |||
6490 | F: include/linux/wm97xx.h | 5582 | F: include/linux/wm97xx.h |
6491 | 5583 | ||
6492 | X.25 NETWORK LAYER | 5584 | X.25 NETWORK LAYER |
6493 | P: Henner Eisen | 5585 | M: Henner Eisen <eis@baty.hanse.de> |
6494 | M: eis@baty.hanse.de | ||
6495 | L: linux-x25@vger.kernel.org | 5586 | L: linux-x25@vger.kernel.org |
6496 | S: Maintained | 5587 | S: Maintained |
6497 | F: Documentation/networking/x25* | 5588 | F: Documentation/networking/x25* |
@@ -6499,12 +5590,9 @@ F: include/net/x25* | |||
6499 | F: net/x25/ | 5590 | F: net/x25/ |
6500 | 5591 | ||
6501 | X86 ARCHITECTURE (32-BIT AND 64-BIT) | 5592 | X86 ARCHITECTURE (32-BIT AND 64-BIT) |
6502 | P: Thomas Gleixner | 5593 | M: Thomas Gleixner <tglx@linutronix.de> |
6503 | M: tglx@linutronix.de | 5594 | M: Ingo Molnar <mingo@redhat.com> |
6504 | P: Ingo Molnar | 5595 | M: "H. Peter Anvin" <hpa@zytor.com> |
6505 | M: mingo@redhat.com | ||
6506 | P: H. Peter Anvin | ||
6507 | M: hpa@zytor.com | ||
6508 | M: x86@kernel.org | 5596 | M: x86@kernel.org |
6509 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git | 5597 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git |
6510 | S: Maintained | 5598 | S: Maintained |
@@ -6512,10 +5600,8 @@ F: Documentation/x86/ | |||
6512 | F: arch/x86/ | 5600 | F: arch/x86/ |
6513 | 5601 | ||
6514 | XEN HYPERVISOR INTERFACE | 5602 | XEN HYPERVISOR INTERFACE |
6515 | P: Jeremy Fitzhardinge | 5603 | M: Jeremy Fitzhardinge <jeremy@xensource.com> |
6516 | M: jeremy@xensource.com | 5604 | M: Chris Wright <chrisw@sous-sol.org> |
6517 | P: Chris Wright | ||
6518 | M: chrisw@sous-sol.org | ||
6519 | L: virtualization@lists.osdl.org | 5605 | L: virtualization@lists.osdl.org |
6520 | L: xen-devel@lists.xensource.com | 5606 | L: xen-devel@lists.xensource.com |
6521 | S: Supported | 5607 | S: Supported |
@@ -6527,8 +5613,7 @@ F: include/xen/ | |||
6527 | 5613 | ||
6528 | XFS FILESYSTEM | 5614 | XFS FILESYSTEM |
6529 | P: Silicon Graphics Inc | 5615 | P: Silicon Graphics Inc |
6530 | P: Felix Blyakher | 5616 | M: Felix Blyakher <felixb@sgi.com> |
6531 | M: felixb@sgi.com | ||
6532 | M: xfs-masters@oss.sgi.com | 5617 | M: xfs-masters@oss.sgi.com |
6533 | L: xfs@oss.sgi.com | 5618 | L: xfs@oss.sgi.com |
6534 | W: http://oss.sgi.com/projects/xfs | 5619 | W: http://oss.sgi.com/projects/xfs |
@@ -6538,38 +5623,33 @@ F: Documentation/filesystems/xfs.txt | |||
6538 | F: fs/xfs/ | 5623 | F: fs/xfs/ |
6539 | 5624 | ||
6540 | XILINX SYSTEMACE DRIVER | 5625 | XILINX SYSTEMACE DRIVER |
6541 | P: Grant Likely | 5626 | M: Grant Likely <grant.likely@secretlab.ca> |
6542 | M: grant.likely@secretlab.ca | ||
6543 | W: http://www.secretlab.ca/ | 5627 | W: http://www.secretlab.ca/ |
6544 | S: Maintained | 5628 | S: Maintained |
6545 | F: drivers/block/xsysace.c | 5629 | F: drivers/block/xsysace.c |
6546 | 5630 | ||
6547 | XILINX UARTLITE SERIAL DRIVER | 5631 | XILINX UARTLITE SERIAL DRIVER |
6548 | P: Peter Korsgaard | 5632 | M: Peter Korsgaard <jacmet@sunsite.dk> |
6549 | M: jacmet@sunsite.dk | ||
6550 | L: linux-serial@vger.kernel.org | 5633 | L: linux-serial@vger.kernel.org |
6551 | S: Maintained | 5634 | S: Maintained |
6552 | F: drivers/serial/uartlite.c | 5635 | F: drivers/serial/uartlite.c |
6553 | 5636 | ||
6554 | YAM DRIVER FOR AX.25 | 5637 | YAM DRIVER FOR AX.25 |
6555 | P: Jean-Paul Roubelat | 5638 | M: Jean-Paul Roubelat <jpr@f6fbb.org> |
6556 | M: jpr@f6fbb.org | ||
6557 | L: linux-hams@vger.kernel.org | 5639 | L: linux-hams@vger.kernel.org |
6558 | S: Maintained | 5640 | S: Maintained |
6559 | F: drivers/net/hamradio/yam* | 5641 | F: drivers/net/hamradio/yam* |
6560 | F: include/linux/yam.h | 5642 | F: include/linux/yam.h |
6561 | 5643 | ||
6562 | YEALINK PHONE DRIVER | 5644 | YEALINK PHONE DRIVER |
6563 | P: Henk Vergonet | 5645 | M: Henk Vergonet <Henk.Vergonet@gmail.com> |
6564 | M: Henk.Vergonet@gmail.com | ||
6565 | L: usbb2k-api-dev@nongnu.org | 5646 | L: usbb2k-api-dev@nongnu.org |
6566 | S: Maintained | 5647 | S: Maintained |
6567 | F: Documentation/input/yealink.txt | 5648 | F: Documentation/input/yealink.txt |
6568 | F: drivers/input/misc/yealink.* | 5649 | F: drivers/input/misc/yealink.* |
6569 | 5650 | ||
6570 | Z8530 DRIVER FOR AX.25 | 5651 | Z8530 DRIVER FOR AX.25 |
6571 | P: Joerg Reuter | 5652 | M: Joerg Reuter <jreuter@yaina.de> |
6572 | M: jreuter@yaina.de | ||
6573 | W: http://yaina.de/jreuter/ | 5653 | W: http://yaina.de/jreuter/ |
6574 | W: http://www.qsl.net/dl1bke/ | 5654 | W: http://www.qsl.net/dl1bke/ |
6575 | L: linux-hams@vger.kernel.org | 5655 | L: linux-hams@vger.kernel.org |
@@ -6579,10 +5659,8 @@ F: drivers/net/hamradio/*scc.c | |||
6579 | F: drivers/net/hamradio/z8530.h | 5659 | F: drivers/net/hamradio/z8530.h |
6580 | 5660 | ||
6581 | ZD1211RW WIRELESS DRIVER | 5661 | ZD1211RW WIRELESS DRIVER |
6582 | P: Daniel Drake | 5662 | M: Daniel Drake <dsd@gentoo.org> |
6583 | M: dsd@gentoo.org | 5663 | M: Ulrich Kunitz <kune@deine-taler.de> |
6584 | P: Ulrich Kunitz | ||
6585 | M: kune@deine-taler.de | ||
6586 | W: http://zd1211.ath.cx/wiki/DriverRewrite | 5664 | W: http://zd1211.ath.cx/wiki/DriverRewrite |
6587 | L: linux-wireless@vger.kernel.org | 5665 | L: linux-wireless@vger.kernel.org |
6588 | L: zd1211-devs@lists.sourceforge.net (subscribers-only) | 5666 | L: zd1211-devs@lists.sourceforge.net (subscribers-only) |
@@ -6598,14 +5676,12 @@ S: Odd Fixes | |||
6598 | F: drivers/media/video/zoran/ | 5676 | F: drivers/media/video/zoran/ |
6599 | 5677 | ||
6600 | ZS DECSTATION Z85C30 SERIAL DRIVER | 5678 | ZS DECSTATION Z85C30 SERIAL DRIVER |
6601 | P: Maciej W. Rozycki | 5679 | M: "Maciej W. Rozycki" <macro@linux-mips.org> |
6602 | M: macro@linux-mips.org | ||
6603 | S: Maintained | 5680 | S: Maintained |
6604 | F: drivers/serial/zs.* | 5681 | F: drivers/serial/zs.* |
6605 | 5682 | ||
6606 | THE REST | 5683 | THE REST |
6607 | P: Linus Torvalds | 5684 | M: Linus Torvalds <torvalds@linux-foundation.org> |
6608 | M: torvalds@linux-foundation.org | ||
6609 | L: linux-kernel@vger.kernel.org | 5685 | L: linux-kernel@vger.kernel.org |
6610 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git | 5686 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git |
6611 | S: Buried alive in reporters | 5687 | S: Buried alive in reporters |
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index 313389cd50d2..5136dad57cbb 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -17,8 +17,7 @@ | |||
17 | /* Pages for switcher itself, then two pages per cpu */ | 17 | /* Pages for switcher itself, then two pages per cpu */ |
18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) | 18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) |
19 | 19 | ||
20 | /* We map at -4M (-2M when PAE is activated) for ease of mapping | 20 | /* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */ |
21 | * into the guest (one PTE page). */ | ||
22 | #ifdef CONFIG_X86_PAE | 21 | #ifdef CONFIG_X86_PAE |
23 | #define SWITCHER_ADDR 0xFFE00000 | 22 | #define SWITCHER_ADDR 0xFFE00000 |
24 | #else | 23 | #else |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 33600a66755f..ba0eed8aa1a6 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -30,27 +30,27 @@ | |||
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | 31 | #include <asm/kvm_para.h> |
32 | 32 | ||
33 | /*G:030 But first, how does our Guest contact the Host to ask for privileged | 33 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | ||
34 | * operations? There are two ways: the direct way is to make a "hypercall", | 35 | * operations? There are two ways: the direct way is to make a "hypercall", |
35 | * to make requests of the Host Itself. | 36 | * to make requests of the Host Itself. |
36 | * | 37 | * |
37 | * We use the KVM hypercall mechanism. Seventeen hypercalls are | 38 | * We use the KVM hypercall mechanism, though completely different hypercall |
38 | * available: the hypercall number is put in the %eax register, and the | 39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in |
39 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. | 40 | * the %eax register, and the arguments (when required) are placed in %ebx, |
40 | * If a return value makes sense, it's returned in %eax. | 41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. |
41 | * | 42 | * |
42 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
43 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
44 | * definition of a gentleman: "someone who is only rude intentionally". */ | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
45 | /*:*/ | 46 | :*/ |
46 | 47 | ||
47 | /* Can't use our min() macro here: needs to be a constant */ | 48 | /* Can't use our min() macro here: needs to be a constant */ |
48 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
49 | 50 | ||
50 | #define LHCALL_RING_SIZE 64 | 51 | #define LHCALL_RING_SIZE 64 |
51 | struct hcall_args { | 52 | struct hcall_args { |
52 | /* These map directly onto eax, ebx, ecx, edx and esi | 53 | /* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */ |
53 | * in struct lguest_regs */ | ||
54 | unsigned long arg0, arg1, arg2, arg3, arg4; | 54 | unsigned long arg0, arg1, arg2, arg3, arg4; |
55 | }; | 55 | }; |
56 | 56 | ||
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index f2bf1f73d468..d677fa9ca650 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -22,7 +22,8 @@ | |||
22 | * | 22 | * |
23 | * So how does the kernel know it's a Guest? We'll see that later, but let's | 23 | * So how does the kernel know it's a Guest? We'll see that later, but let's |
24 | * just say that we end up here where we replace the native functions various | 24 | * just say that we end up here where we replace the native functions various |
25 | * "paravirt" structures with our Guest versions, then boot like normal. :*/ | 25 | * "paravirt" structures with our Guest versions, then boot like normal. |
26 | :*/ | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. | 29 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. |
@@ -74,7 +75,8 @@ | |||
74 | * | 75 | * |
75 | * The Guest in our tale is a simple creature: identical to the Host but | 76 | * The Guest in our tale is a simple creature: identical to the Host but |
76 | * behaving in simplified but equivalent ways. In particular, the Guest is the | 77 | * behaving in simplified but equivalent ways. In particular, the Guest is the |
77 | * same kernel as the Host (or at least, built from the same source code). :*/ | 78 | * same kernel as the Host (or at least, built from the same source code). |
79 | :*/ | ||
78 | 80 | ||
79 | struct lguest_data lguest_data = { | 81 | struct lguest_data lguest_data = { |
80 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, | 82 | .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, |
@@ -85,7 +87,8 @@ struct lguest_data lguest_data = { | |||
85 | .syscall_vec = SYSCALL_VECTOR, | 87 | .syscall_vec = SYSCALL_VECTOR, |
86 | }; | 88 | }; |
87 | 89 | ||
88 | /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a | 90 | /*G:037 |
91 | * async_hcall() is pretty simple: I'm quite proud of it really. We have a | ||
89 | * ring buffer of stored hypercalls which the Host will run though next time we | 92 | * ring buffer of stored hypercalls which the Host will run though next time we |
90 | * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall | 93 | * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall |
91 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, | 94 | * arguments, and a "hcall_status" word which is 0 if the call is ready to go, |
@@ -94,7 +97,8 @@ struct lguest_data lguest_data = { | |||
94 | * If we come around to a slot which hasn't been finished, then the table is | 97 | * If we come around to a slot which hasn't been finished, then the table is |
95 | * full and we just make the hypercall directly. This has the nice side | 98 | * full and we just make the hypercall directly. This has the nice side |
96 | * effect of causing the Host to run all the stored calls in the ring buffer | 99 | * effect of causing the Host to run all the stored calls in the ring buffer |
97 | * which empties it for next time! */ | 100 | * which empties it for next time! |
101 | */ | ||
98 | static void async_hcall(unsigned long call, unsigned long arg1, | 102 | static void async_hcall(unsigned long call, unsigned long arg1, |
99 | unsigned long arg2, unsigned long arg3, | 103 | unsigned long arg2, unsigned long arg3, |
100 | unsigned long arg4) | 104 | unsigned long arg4) |
@@ -103,9 +107,11 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
103 | static unsigned int next_call; | 107 | static unsigned int next_call; |
104 | unsigned long flags; | 108 | unsigned long flags; |
105 | 109 | ||
106 | /* Disable interrupts if not already disabled: we don't want an | 110 | /* |
111 | * Disable interrupts if not already disabled: we don't want an | ||
107 | * interrupt handler making a hypercall while we're already doing | 112 | * interrupt handler making a hypercall while we're already doing |
108 | * one! */ | 113 | * one! |
114 | */ | ||
109 | local_irq_save(flags); | 115 | local_irq_save(flags); |
110 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
111 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
@@ -125,8 +131,9 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
125 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
126 | } | 132 | } |
127 | 133 | ||
128 | /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first | 134 | /*G:035 |
129 | * real optimization trick! | 135 | * Notice the lazy_hcall() above, rather than hcall(). This is our first real |
136 | * optimization trick! | ||
130 | * | 137 | * |
131 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do | 138 | * When lazy_mode is set, it means we're allowed to defer all hypercalls and do |
132 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls | 139 | * them as a batch when lazy_mode is eventually turned off. Because hypercalls |
@@ -136,7 +143,8 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
136 | * lguest_leave_lazy_mode(). | 143 | * lguest_leave_lazy_mode(). |
137 | * | 144 | * |
138 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
139 | * future processing: */ | 146 | * future processing: |
147 | */ | ||
140 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, |
141 | unsigned long arg1) | 149 | unsigned long arg1) |
142 | { | 150 | { |
@@ -146,6 +154,7 @@ static void lazy_hcall1(unsigned long call, | |||
146 | async_hcall(call, arg1, 0, 0, 0); | 154 | async_hcall(call, arg1, 0, 0, 0); |
147 | } | 155 | } |
148 | 156 | ||
157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | ||
149 | static void lazy_hcall2(unsigned long call, | 158 | static void lazy_hcall2(unsigned long call, |
150 | unsigned long arg1, | 159 | unsigned long arg1, |
151 | unsigned long arg2) | 160 | unsigned long arg2) |
@@ -181,8 +190,10 @@ static void lazy_hcall4(unsigned long call, | |||
181 | } | 190 | } |
182 | #endif | 191 | #endif |
183 | 192 | ||
184 | /* When lazy mode is turned off reset the per-cpu lazy mode variable and then | 193 | /*G:036 |
185 | * issue the do-nothing hypercall to flush any stored calls. */ | 194 | * When lazy mode is turned off reset the per-cpu lazy mode variable and then |
195 | * issue the do-nothing hypercall to flush any stored calls. | ||
196 | :*/ | ||
186 | static void lguest_leave_lazy_mmu_mode(void) | 197 | static void lguest_leave_lazy_mmu_mode(void) |
187 | { | 198 | { |
188 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); |
@@ -208,9 +219,11 @@ static void lguest_end_context_switch(struct task_struct *next) | |||
208 | * check there before it tries to deliver an interrupt. | 219 | * check there before it tries to deliver an interrupt. |
209 | */ | 220 | */ |
210 | 221 | ||
211 | /* save_flags() is expected to return the processor state (ie. "flags"). The | 222 | /* |
223 | * save_flags() is expected to return the processor state (ie. "flags"). The | ||
212 | * flags word contains all kind of stuff, but in practice Linux only cares | 224 | * flags word contains all kind of stuff, but in practice Linux only cares |
213 | * about the interrupt flag. Our "save_flags()" just returns that. */ | 225 | * about the interrupt flag. Our "save_flags()" just returns that. |
226 | */ | ||
214 | static unsigned long save_fl(void) | 227 | static unsigned long save_fl(void) |
215 | { | 228 | { |
216 | return lguest_data.irq_enabled; | 229 | return lguest_data.irq_enabled; |
@@ -222,13 +235,15 @@ static void irq_disable(void) | |||
222 | lguest_data.irq_enabled = 0; | 235 | lguest_data.irq_enabled = 0; |
223 | } | 236 | } |
224 | 237 | ||
225 | /* Let's pause a moment. Remember how I said these are called so often? | 238 | /* |
239 | * Let's pause a moment. Remember how I said these are called so often? | ||
226 | * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to | 240 | * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to |
227 | * break some rules. In particular, these functions are assumed to save their | 241 | * break some rules. In particular, these functions are assumed to save their |
228 | * own registers if they need to: normal C functions assume they can trash the | 242 | * own registers if they need to: normal C functions assume they can trash the |
229 | * eax register. To use normal C functions, we use | 243 | * eax register. To use normal C functions, we use |
230 | * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the | 244 | * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the |
231 | * C function, then restores it. */ | 245 | * C function, then restores it. |
246 | */ | ||
232 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); | 247 | PV_CALLEE_SAVE_REGS_THUNK(save_fl); |
233 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | 248 | PV_CALLEE_SAVE_REGS_THUNK(irq_disable); |
234 | /*:*/ | 249 | /*:*/ |
@@ -237,18 +252,18 @@ PV_CALLEE_SAVE_REGS_THUNK(irq_disable); | |||
237 | extern void lg_irq_enable(void); | 252 | extern void lg_irq_enable(void); |
238 | extern void lg_restore_fl(unsigned long flags); | 253 | extern void lg_restore_fl(unsigned long flags); |
239 | 254 | ||
240 | /*M:003 Note that we don't check for outstanding interrupts when we re-enable | 255 | /*M:003 |
241 | * them (or when we unmask an interrupt). This seems to work for the moment, | 256 | * We could be more efficient in our checking of outstanding interrupts, rather |
242 | * since interrupts are rare and we'll just get the interrupt on the next timer | 257 | * than using a branch. One way would be to put the "irq_enabled" field in a |
243 | * tick, but now we can run with CONFIG_NO_HZ, we should revisit this. One way | 258 | * page by itself, and have the Host write-protect it when an interrupt comes |
244 | * would be to put the "irq_enabled" field in a page by itself, and have the | 259 | * in when irqs are disabled. There will then be a page fault as soon as |
245 | * Host write-protect it when an interrupt comes in when irqs are disabled. | 260 | * interrupts are re-enabled. |
246 | * There will then be a page fault as soon as interrupts are re-enabled. | ||
247 | * | 261 | * |
248 | * A better method is to implement soft interrupt disable generally for x86: | 262 | * A better method is to implement soft interrupt disable generally for x86: |
249 | * instead of disabling interrupts, we set a flag. If an interrupt does come | 263 | * instead of disabling interrupts, we set a flag. If an interrupt does come |
250 | * in, we then disable them for real. This is uncommon, so we could simply use | 264 | * in, we then disable them for real. This is uncommon, so we could simply use |
251 | * a hypercall for interrupt control and not worry about efficiency. :*/ | 265 | * a hypercall for interrupt control and not worry about efficiency. |
266 | :*/ | ||
252 | 267 | ||
253 | /*G:034 | 268 | /*G:034 |
254 | * The Interrupt Descriptor Table (IDT). | 269 | * The Interrupt Descriptor Table (IDT). |
@@ -261,10 +276,12 @@ extern void lg_restore_fl(unsigned long flags); | |||
261 | static void lguest_write_idt_entry(gate_desc *dt, | 276 | static void lguest_write_idt_entry(gate_desc *dt, |
262 | int entrynum, const gate_desc *g) | 277 | int entrynum, const gate_desc *g) |
263 | { | 278 | { |
264 | /* The gate_desc structure is 8 bytes long: we hand it to the Host in | 279 | /* |
280 | * The gate_desc structure is 8 bytes long: we hand it to the Host in | ||
265 | * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors | 281 | * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors |
266 | * around like this; typesafety wasn't a big concern in Linux's early | 282 | * around like this; typesafety wasn't a big concern in Linux's early |
267 | * years. */ | 283 | * years. |
284 | */ | ||
268 | u32 *desc = (u32 *)g; | 285 | u32 *desc = (u32 *)g; |
269 | /* Keep the local copy up to date. */ | 286 | /* Keep the local copy up to date. */ |
270 | native_write_idt_entry(dt, entrynum, g); | 287 | native_write_idt_entry(dt, entrynum, g); |
@@ -272,9 +289,11 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
272 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); |
273 | } | 290 | } |
274 | 291 | ||
275 | /* Changing to a different IDT is very rare: we keep the IDT up-to-date every | 292 | /* |
293 | * Changing to a different IDT is very rare: we keep the IDT up-to-date every | ||
276 | * time it is written, so we can simply loop through all entries and tell the | 294 | * time it is written, so we can simply loop through all entries and tell the |
277 | * Host about them. */ | 295 | * Host about them. |
296 | */ | ||
278 | static void lguest_load_idt(const struct desc_ptr *desc) | 297 | static void lguest_load_idt(const struct desc_ptr *desc) |
279 | { | 298 | { |
280 | unsigned int i; | 299 | unsigned int i; |
@@ -305,9 +324,11 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
305 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); |
306 | } | 325 | } |
307 | 326 | ||
308 | /* For a single GDT entry which changes, we do the lazy thing: alter our GDT, | 327 | /* |
328 | * For a single GDT entry which changes, we do the lazy thing: alter our GDT, | ||
309 | * then tell the Host to reload the entire thing. This operation is so rare | 329 | * then tell the Host to reload the entire thing. This operation is so rare |
310 | * that this naive implementation is reasonable. */ | 330 | * that this naive implementation is reasonable. |
331 | */ | ||
311 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | 332 | static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, |
312 | const void *desc, int type) | 333 | const void *desc, int type) |
313 | { | 334 | { |
@@ -317,29 +338,36 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
317 | dt[entrynum].a, dt[entrynum].b); | 338 | dt[entrynum].a, dt[entrynum].b); |
318 | } | 339 | } |
319 | 340 | ||
320 | /* OK, I lied. There are three "thread local storage" GDT entries which change | 341 | /* |
342 | * OK, I lied. There are three "thread local storage" GDT entries which change | ||
321 | * on every context switch (these three entries are how glibc implements | 343 | * on every context switch (these three entries are how glibc implements |
322 | * __thread variables). So we have a hypercall specifically for this case. */ | 344 | * __thread variables). So we have a hypercall specifically for this case. |
345 | */ | ||
323 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) | 346 | static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) |
324 | { | 347 | { |
325 | /* There's one problem which normal hardware doesn't have: the Host | 348 | /* |
349 | * There's one problem which normal hardware doesn't have: the Host | ||
326 | * can't handle us removing entries we're currently using. So we clear | 350 | * can't handle us removing entries we're currently using. So we clear |
327 | * the GS register here: if it's needed it'll be reloaded anyway. */ | 351 | * the GS register here: if it's needed it'll be reloaded anyway. |
352 | */ | ||
328 | lazy_load_gs(0); | 353 | lazy_load_gs(0); |
329 | lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); | 354 | lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu); |
330 | } | 355 | } |
331 | 356 | ||
332 | /*G:038 That's enough excitement for now, back to ploughing through each of | 357 | /*G:038 |
333 | * the different pv_ops structures (we're about 1/3 of the way through). | 358 | * That's enough excitement for now, back to ploughing through each of the |
359 | * different pv_ops structures (we're about 1/3 of the way through). | ||
334 | * | 360 | * |
335 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only | 361 | * This is the Local Descriptor Table, another weird Intel thingy. Linux only |
336 | * uses this for some strange applications like Wine. We don't do anything | 362 | * uses this for some strange applications like Wine. We don't do anything |
337 | * here, so they'll get an informative and friendly Segmentation Fault. */ | 363 | * here, so they'll get an informative and friendly Segmentation Fault. |
364 | */ | ||
338 | static void lguest_set_ldt(const void *addr, unsigned entries) | 365 | static void lguest_set_ldt(const void *addr, unsigned entries) |
339 | { | 366 | { |
340 | } | 367 | } |
341 | 368 | ||
342 | /* This loads a GDT entry into the "Task Register": that entry points to a | 369 | /* |
370 | * This loads a GDT entry into the "Task Register": that entry points to a | ||
343 | * structure called the Task State Segment. Some comments scattered though the | 371 | * structure called the Task State Segment. Some comments scattered though the |
344 | * kernel code indicate that this used for task switching in ages past, along | 372 | * kernel code indicate that this used for task switching in ages past, along |
345 | * with blood sacrifice and astrology. | 373 | * with blood sacrifice and astrology. |
@@ -347,19 +375,21 @@ static void lguest_set_ldt(const void *addr, unsigned entries) | |||
347 | * Now there's nothing interesting in here that we don't get told elsewhere. | 375 | * Now there's nothing interesting in here that we don't get told elsewhere. |
348 | * But the native version uses the "ltr" instruction, which makes the Host | 376 | * But the native version uses the "ltr" instruction, which makes the Host |
349 | * complain to the Guest about a Segmentation Fault and it'll oops. So we | 377 | * complain to the Guest about a Segmentation Fault and it'll oops. So we |
350 | * override the native version with a do-nothing version. */ | 378 | * override the native version with a do-nothing version. |
379 | */ | ||
351 | static void lguest_load_tr_desc(void) | 380 | static void lguest_load_tr_desc(void) |
352 | { | 381 | { |
353 | } | 382 | } |
354 | 383 | ||
355 | /* The "cpuid" instruction is a way of querying both the CPU identity | 384 | /* |
385 | * The "cpuid" instruction is a way of querying both the CPU identity | ||
356 | * (manufacturer, model, etc) and its features. It was introduced before the | 386 | * (manufacturer, model, etc) and its features. It was introduced before the |
357 | * Pentium in 1993 and keeps getting extended by both Intel, AMD and others. | 387 | * Pentium in 1993 and keeps getting extended by both Intel, AMD and others. |
358 | * As you might imagine, after a decade and a half this treatment, it is now a | 388 | * As you might imagine, after a decade and a half this treatment, it is now a |
359 | * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. | 389 | * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. |
360 | * | 390 | * |
361 | * This instruction even it has its own Wikipedia entry. The Wikipedia entry | 391 | * This instruction even it has its own Wikipedia entry. The Wikipedia entry |
362 | * has been translated into 4 languages. I am not making this up! | 392 | * has been translated into 5 languages. I am not making this up! |
363 | * | 393 | * |
364 | * We could get funky here and identify ourselves as "GenuineLguest", but | 394 | * We could get funky here and identify ourselves as "GenuineLguest", but |
365 | * instead we just use the real "cpuid" instruction. Then I pretty much turned | 395 | * instead we just use the real "cpuid" instruction. Then I pretty much turned |
@@ -371,7 +401,8 @@ static void lguest_load_tr_desc(void) | |||
371 | * Replacing the cpuid so we can turn features off is great for the kernel, but | 401 | * Replacing the cpuid so we can turn features off is great for the kernel, but |
372 | * anyone (including userspace) can just use the raw "cpuid" instruction and | 402 | * anyone (including userspace) can just use the raw "cpuid" instruction and |
373 | * the Host won't even notice since it isn't privileged. So we try not to get | 403 | * the Host won't even notice since it isn't privileged. So we try not to get |
374 | * too worked up about it. */ | 404 | * too worked up about it. |
405 | */ | ||
375 | static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | 406 | static void lguest_cpuid(unsigned int *ax, unsigned int *bx, |
376 | unsigned int *cx, unsigned int *dx) | 407 | unsigned int *cx, unsigned int *dx) |
377 | { | 408 | { |
@@ -379,43 +410,63 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
379 | 410 | ||
380 | native_cpuid(ax, bx, cx, dx); | 411 | native_cpuid(ax, bx, cx, dx); |
381 | switch (function) { | 412 | switch (function) { |
382 | case 0: /* ID and highest CPUID. Futureproof a little by sticking to | 413 | /* |
383 | * older ones. */ | 414 | * CPUID 0 gives the highest legal CPUID number (and the ID string). |
415 | * We futureproof our code a little by sticking to known CPUID values. | ||
416 | */ | ||
417 | case 0: | ||
384 | if (*ax > 5) | 418 | if (*ax > 5) |
385 | *ax = 5; | 419 | *ax = 5; |
386 | break; | 420 | break; |
387 | case 1: /* Basic feature request. */ | 421 | |
388 | /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ | 422 | /* |
423 | * CPUID 1 is a basic feature request. | ||
424 | * | ||
425 | * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3 | ||
426 | * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE. | ||
427 | */ | ||
428 | case 1: | ||
389 | *cx &= 0x00002201; | 429 | *cx &= 0x00002201; |
390 | /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */ | ||
391 | *dx &= 0x07808151; | 430 | *dx &= 0x07808151; |
392 | /* The Host can do a nice optimization if it knows that the | 431 | /* |
432 | * The Host can do a nice optimization if it knows that the | ||
393 | * kernel mappings (addresses above 0xC0000000 or whatever | 433 | * kernel mappings (addresses above 0xC0000000 or whatever |
394 | * PAGE_OFFSET is set to) haven't changed. But Linux calls | 434 | * PAGE_OFFSET is set to) haven't changed. But Linux calls |
395 | * flush_tlb_user() for both user and kernel mappings unless | 435 | * flush_tlb_user() for both user and kernel mappings unless |
396 | * the Page Global Enable (PGE) feature bit is set. */ | 436 | * the Page Global Enable (PGE) feature bit is set. |
437 | */ | ||
397 | *dx |= 0x00002000; | 438 | *dx |= 0x00002000; |
398 | /* We also lie, and say we're family id 5. 6 or greater | 439 | /* |
440 | * We also lie, and say we're family id 5. 6 or greater | ||
399 | * leads to a rdmsr in early_init_intel which we can't handle. | 441 | * leads to a rdmsr in early_init_intel which we can't handle. |
400 | * Family ID is returned as bits 8-12 in ax. */ | 442 | * Family ID is returned as bits 8-12 in ax. |
443 | */ | ||
401 | *ax &= 0xFFFFF0FF; | 444 | *ax &= 0xFFFFF0FF; |
402 | *ax |= 0x00000500; | 445 | *ax |= 0x00000500; |
403 | break; | 446 | break; |
447 | /* | ||
448 | * 0x80000000 returns the highest Extended Function, so we futureproof | ||
449 | * like we do above by limiting it to known fields. | ||
450 | */ | ||
404 | case 0x80000000: | 451 | case 0x80000000: |
405 | /* Futureproof this a little: if they ask how much extended | ||
406 | * processor information there is, limit it to known fields. */ | ||
407 | if (*ax > 0x80000008) | 452 | if (*ax > 0x80000008) |
408 | *ax = 0x80000008; | 453 | *ax = 0x80000008; |
409 | break; | 454 | break; |
455 | |||
456 | /* | ||
457 | * PAE systems can mark pages as non-executable. Linux calls this the | ||
458 | * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced | ||
459 | * Virus Protection). We just switch turn if off here, since we don't | ||
460 | * support it. | ||
461 | */ | ||
410 | case 0x80000001: | 462 | case 0x80000001: |
411 | /* Here we should fix nx cap depending on host. */ | ||
412 | /* For this version of PAE, we just clear NX bit. */ | ||
413 | *dx &= ~(1 << 20); | 463 | *dx &= ~(1 << 20); |
414 | break; | 464 | break; |
415 | } | 465 | } |
416 | } | 466 | } |
417 | 467 | ||
418 | /* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4. | 468 | /* |
469 | * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4. | ||
419 | * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother | 470 | * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother |
420 | * it. The Host needs to know when the Guest wants to change them, so we have | 471 | * it. The Host needs to know when the Guest wants to change them, so we have |
421 | * a whole series of functions like read_cr0() and write_cr0(). | 472 | * a whole series of functions like read_cr0() and write_cr0(). |
@@ -430,7 +481,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
430 | * name like "FPUTRAP bit" be a little less cryptic? | 481 | * name like "FPUTRAP bit" be a little less cryptic? |
431 | * | 482 | * |
432 | * We store cr0 locally because the Host never changes it. The Guest sometimes | 483 | * We store cr0 locally because the Host never changes it. The Guest sometimes |
433 | * wants to read it and we'd prefer not to bother the Host unnecessarily. */ | 484 | * wants to read it and we'd prefer not to bother the Host unnecessarily. |
485 | */ | ||
434 | static unsigned long current_cr0; | 486 | static unsigned long current_cr0; |
435 | static void lguest_write_cr0(unsigned long val) | 487 | static void lguest_write_cr0(unsigned long val) |
436 | { | 488 | { |
@@ -443,18 +495,22 @@ static unsigned long lguest_read_cr0(void) | |||
443 | return current_cr0; | 495 | return current_cr0; |
444 | } | 496 | } |
445 | 497 | ||
446 | /* Intel provided a special instruction to clear the TS bit for people too cool | 498 | /* |
499 | * Intel provided a special instruction to clear the TS bit for people too cool | ||
447 | * to use write_cr0() to do it. This "clts" instruction is faster, because all | 500 | * to use write_cr0() to do it. This "clts" instruction is faster, because all |
448 | * the vowels have been optimized out. */ | 501 | * the vowels have been optimized out. |
502 | */ | ||
449 | static void lguest_clts(void) | 503 | static void lguest_clts(void) |
450 | { | 504 | { |
451 | lazy_hcall1(LHCALL_TS, 0); | 505 | lazy_hcall1(LHCALL_TS, 0); |
452 | current_cr0 &= ~X86_CR0_TS; | 506 | current_cr0 &= ~X86_CR0_TS; |
453 | } | 507 | } |
454 | 508 | ||
455 | /* cr2 is the virtual address of the last page fault, which the Guest only ever | 509 | /* |
510 | * cr2 is the virtual address of the last page fault, which the Guest only ever | ||
456 | * reads. The Host kindly writes this into our "struct lguest_data", so we | 511 | * reads. The Host kindly writes this into our "struct lguest_data", so we |
457 | * just read it out of there. */ | 512 | * just read it out of there. |
513 | */ | ||
458 | static unsigned long lguest_read_cr2(void) | 514 | static unsigned long lguest_read_cr2(void) |
459 | { | 515 | { |
460 | return lguest_data.cr2; | 516 | return lguest_data.cr2; |
@@ -463,10 +519,12 @@ static unsigned long lguest_read_cr2(void) | |||
463 | /* See lguest_set_pte() below. */ | 519 | /* See lguest_set_pte() below. */ |
464 | static bool cr3_changed = false; | 520 | static bool cr3_changed = false; |
465 | 521 | ||
466 | /* cr3 is the current toplevel pagetable page: the principle is the same as | 522 | /* |
523 | * cr3 is the current toplevel pagetable page: the principle is the same as | ||
467 | * cr0. Keep a local copy, and tell the Host when it changes. The only | 524 | * cr0. Keep a local copy, and tell the Host when it changes. The only |
468 | * difference is that our local copy is in lguest_data because the Host needs | 525 | * difference is that our local copy is in lguest_data because the Host needs |
469 | * to set it upon our initial hypercall. */ | 526 | * to set it upon our initial hypercall. |
527 | */ | ||
470 | static void lguest_write_cr3(unsigned long cr3) | 528 | static void lguest_write_cr3(unsigned long cr3) |
471 | { | 529 | { |
472 | lguest_data.pgdir = cr3; | 530 | lguest_data.pgdir = cr3; |
@@ -511,7 +569,7 @@ static void lguest_write_cr4(unsigned long val) | |||
511 | * cr3 ---> +---------+ | 569 | * cr3 ---> +---------+ |
512 | * | --------->+---------+ | 570 | * | --------->+---------+ |
513 | * | | | PADDR1 | | 571 | * | | | PADDR1 | |
514 | * Top-level | | PADDR2 | | 572 | * Mid-level | | PADDR2 | |
515 | * (PMD) page | | | | 573 | * (PMD) page | | | |
516 | * | | Lower-level | | 574 | * | | Lower-level | |
517 | * | | (PTE) page | | 575 | * | | (PTE) page | |
@@ -531,21 +589,62 @@ static void lguest_write_cr4(unsigned long val) | |||
531 | * Index into top Index into second Offset within page | 589 | * Index into top Index into second Offset within page |
532 | * page directory page pagetable page | 590 | * page directory page pagetable page |
533 | * | 591 | * |
534 | * The kernel spends a lot of time changing both the top-level page directory | 592 | * Now, unfortunately, this isn't the whole story: Intel added Physical Address |
535 | * and lower-level pagetable pages. The Guest doesn't know physical addresses, | 593 | * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits). |
536 | * so while it maintains these page tables exactly like normal, it also needs | 594 | * These are held in 64-bit page table entries, so we can now only fit 512 |
537 | * to keep the Host informed whenever it makes a change: the Host will create | 595 | * entries in a page, and the neat three-level tree breaks down. |
538 | * the real page tables based on the Guests'. | 596 | * |
597 | * The result is a four level page table: | ||
598 | * | ||
599 | * cr3 --> [ 4 Upper ] | ||
600 | * [ Level ] | ||
601 | * [ Entries ] | ||
602 | * [(PUD Page)]---> +---------+ | ||
603 | * | --------->+---------+ | ||
604 | * | | | PADDR1 | | ||
605 | * Mid-level | | PADDR2 | | ||
606 | * (PMD) page | | | | ||
607 | * | | Lower-level | | ||
608 | * | | (PTE) page | | ||
609 | * | | | | | ||
610 | * .... .... | ||
611 | * | ||
612 | * | ||
613 | * And the virtual address is decoded as: | ||
614 | * | ||
615 | * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 | ||
616 | * |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>| | ||
617 | * Index into Index into mid Index into lower Offset within page | ||
618 | * top entries directory page pagetable page | ||
619 | * | ||
620 | * It's too hard to switch between these two formats at runtime, so Linux only | ||
621 | * supports one or the other depending on whether CONFIG_X86_PAE is set. Many | ||
622 | * distributions turn it on, and not just for people with silly amounts of | ||
623 | * memory: the larger PTE entries allow room for the NX bit, which lets the | ||
624 | * kernel disable execution of pages and increase security. | ||
625 | * | ||
626 | * This was a problem for lguest, which couldn't run on these distributions; | ||
627 | * then Matias Zabaljauregui figured it all out and implemented it, and only a | ||
628 | * handful of puppies were crushed in the process! | ||
629 | * | ||
630 | * Back to our point: the kernel spends a lot of time changing both the | ||
631 | * top-level page directory and lower-level pagetable pages. The Guest doesn't | ||
632 | * know physical addresses, so while it maintains these page tables exactly | ||
633 | * like normal, it also needs to keep the Host informed whenever it makes a | ||
634 | * change: the Host will create the real page tables based on the Guests'. | ||
539 | */ | 635 | */ |
540 | 636 | ||
541 | /* The Guest calls this to set a second-level entry (pte), ie. to map a page | 637 | /* |
542 | * into a process' address space. We set the entry then tell the Host the | 638 | * The Guest calls this after it has set a second-level entry (pte), ie. to map |
543 | * toplevel and address this corresponds to. The Guest uses one pagetable per | 639 | * a page into a process' address space. Wetell the Host the toplevel and |
544 | * process, so we need to tell the Host which one we're changing (mm->pgd). */ | 640 | * address this corresponds to. The Guest uses one pagetable per process, so |
641 | * we need to tell the Host which one we're changing (mm->pgd). | ||
642 | */ | ||
545 | static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, | 643 | static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, |
546 | pte_t *ptep) | 644 | pte_t *ptep) |
547 | { | 645 | { |
548 | #ifdef CONFIG_X86_PAE | 646 | #ifdef CONFIG_X86_PAE |
647 | /* PAE needs to hand a 64 bit page table entry, so it uses two args. */ | ||
549 | lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr, | 648 | lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr, |
550 | ptep->pte_low, ptep->pte_high); | 649 | ptep->pte_low, ptep->pte_high); |
551 | #else | 650 | #else |
@@ -553,6 +652,7 @@ static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, | |||
553 | #endif | 652 | #endif |
554 | } | 653 | } |
555 | 654 | ||
655 | /* This is the "set and update" combo-meal-deal version. */ | ||
556 | static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, | 656 | static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, |
557 | pte_t *ptep, pte_t pteval) | 657 | pte_t *ptep, pte_t pteval) |
558 | { | 658 | { |
@@ -560,10 +660,13 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
560 | lguest_pte_update(mm, addr, ptep); | 660 | lguest_pte_update(mm, addr, ptep); |
561 | } | 661 | } |
562 | 662 | ||
563 | /* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd | 663 | /* |
664 | * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd | ||
564 | * to set a middle-level entry when PAE is activated. | 665 | * to set a middle-level entry when PAE is activated. |
666 | * | ||
565 | * Again, we set the entry then tell the Host which page we changed, | 667 | * Again, we set the entry then tell the Host which page we changed, |
566 | * and the index of the entry we changed. */ | 668 | * and the index of the entry we changed. |
669 | */ | ||
567 | #ifdef CONFIG_X86_PAE | 670 | #ifdef CONFIG_X86_PAE |
568 | static void lguest_set_pud(pud_t *pudp, pud_t pudval) | 671 | static void lguest_set_pud(pud_t *pudp, pud_t pudval) |
569 | { | 672 | { |
@@ -582,8 +685,7 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
582 | } | 685 | } |
583 | #else | 686 | #else |
584 | 687 | ||
585 | /* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not | 688 | /* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */ |
586 | * activated. */ | ||
587 | static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | 689 | static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) |
588 | { | 690 | { |
589 | native_set_pmd(pmdp, pmdval); | 691 | native_set_pmd(pmdp, pmdval); |
@@ -592,7 +694,8 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
592 | } | 694 | } |
593 | #endif | 695 | #endif |
594 | 696 | ||
595 | /* There are a couple of legacy places where the kernel sets a PTE, but we | 697 | /* |
698 | * There are a couple of legacy places where the kernel sets a PTE, but we | ||
596 | * don't know the top level any more. This is useless for us, since we don't | 699 | * don't know the top level any more. This is useless for us, since we don't |
597 | * know which pagetable is changing or what address, so we just tell the Host | 700 | * know which pagetable is changing or what address, so we just tell the Host |
598 | * to forget all of them. Fortunately, this is very rare. | 701 | * to forget all of them. Fortunately, this is very rare. |
@@ -600,7 +703,8 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |||
600 | * ... except in early boot when the kernel sets up the initial pagetables, | 703 | * ... except in early boot when the kernel sets up the initial pagetables, |
601 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell | 704 | * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell |
602 | * the Host anything changed until we've done the first page table switch, | 705 | * the Host anything changed until we've done the first page table switch, |
603 | * which brings boot back to 0.25 seconds. */ | 706 | * which brings boot back to 0.25 seconds. |
707 | */ | ||
604 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) | 708 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
605 | { | 709 | { |
606 | native_set_pte(ptep, pteval); | 710 | native_set_pte(ptep, pteval); |
@@ -609,6 +713,11 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) | |||
609 | } | 713 | } |
610 | 714 | ||
611 | #ifdef CONFIG_X86_PAE | 715 | #ifdef CONFIG_X86_PAE |
716 | /* | ||
717 | * With 64-bit PTE values, we need to be careful setting them: if we set 32 | ||
718 | * bits at a time, the hardware could see a weird half-set entry. These | ||
719 | * versions ensure we update all 64 bits at once. | ||
720 | */ | ||
612 | static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) | 721 | static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) |
613 | { | 722 | { |
614 | native_set_pte_atomic(ptep, pte); | 723 | native_set_pte_atomic(ptep, pte); |
@@ -616,19 +725,21 @@ static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) | |||
616 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); | 725 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); |
617 | } | 726 | } |
618 | 727 | ||
619 | void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 728 | static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, |
729 | pte_t *ptep) | ||
620 | { | 730 | { |
621 | native_pte_clear(mm, addr, ptep); | 731 | native_pte_clear(mm, addr, ptep); |
622 | lguest_pte_update(mm, addr, ptep); | 732 | lguest_pte_update(mm, addr, ptep); |
623 | } | 733 | } |
624 | 734 | ||
625 | void lguest_pmd_clear(pmd_t *pmdp) | 735 | static void lguest_pmd_clear(pmd_t *pmdp) |
626 | { | 736 | { |
627 | lguest_set_pmd(pmdp, __pmd(0)); | 737 | lguest_set_pmd(pmdp, __pmd(0)); |
628 | } | 738 | } |
629 | #endif | 739 | #endif |
630 | 740 | ||
631 | /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on | 741 | /* |
742 | * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on | ||
632 | * native page table operations. On native hardware you can set a new page | 743 | * native page table operations. On native hardware you can set a new page |
633 | * table entry whenever you want, but if you want to remove one you have to do | 744 | * table entry whenever you want, but if you want to remove one you have to do |
634 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). | 745 | * a TLB flush (a TLB is a little cache of page table entries kept by the CPU). |
@@ -637,24 +748,29 @@ void lguest_pmd_clear(pmd_t *pmdp) | |||
637 | * called when a valid entry is written, not when it's removed (ie. marked not | 748 | * called when a valid entry is written, not when it's removed (ie. marked not |
638 | * present). Instead, this is where we come when the Guest wants to remove a | 749 | * present). Instead, this is where we come when the Guest wants to remove a |
639 | * page table entry: we tell the Host to set that entry to 0 (ie. the present | 750 | * page table entry: we tell the Host to set that entry to 0 (ie. the present |
640 | * bit is zero). */ | 751 | * bit is zero). |
752 | */ | ||
641 | static void lguest_flush_tlb_single(unsigned long addr) | 753 | static void lguest_flush_tlb_single(unsigned long addr) |
642 | { | 754 | { |
643 | /* Simply set it to zero: if it was not, it will fault back in. */ | 755 | /* Simply set it to zero: if it was not, it will fault back in. */ |
644 | lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); | 756 | lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); |
645 | } | 757 | } |
646 | 758 | ||
647 | /* This is what happens after the Guest has removed a large number of entries. | 759 | /* |
760 | * This is what happens after the Guest has removed a large number of entries. | ||
648 | * This tells the Host that any of the page table entries for userspace might | 761 | * This tells the Host that any of the page table entries for userspace might |
649 | * have changed, ie. virtual addresses below PAGE_OFFSET. */ | 762 | * have changed, ie. virtual addresses below PAGE_OFFSET. |
763 | */ | ||
650 | static void lguest_flush_tlb_user(void) | 764 | static void lguest_flush_tlb_user(void) |
651 | { | 765 | { |
652 | lazy_hcall1(LHCALL_FLUSH_TLB, 0); | 766 | lazy_hcall1(LHCALL_FLUSH_TLB, 0); |
653 | } | 767 | } |
654 | 768 | ||
655 | /* This is called when the kernel page tables have changed. That's not very | 769 | /* |
770 | * This is called when the kernel page tables have changed. That's not very | ||
656 | * common (unless the Guest is using highmem, which makes the Guest extremely | 771 | * common (unless the Guest is using highmem, which makes the Guest extremely |
657 | * slow), so it's worth separating this from the user flushing above. */ | 772 | * slow), so it's worth separating this from the user flushing above. |
773 | */ | ||
658 | static void lguest_flush_tlb_kernel(void) | 774 | static void lguest_flush_tlb_kernel(void) |
659 | { | 775 | { |
660 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); | 776 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); |
@@ -691,26 +807,38 @@ static struct irq_chip lguest_irq_controller = { | |||
691 | .unmask = enable_lguest_irq, | 807 | .unmask = enable_lguest_irq, |
692 | }; | 808 | }; |
693 | 809 | ||
694 | /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware | 810 | /* |
811 | * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware | ||
695 | * interrupt (except 128, which is used for system calls), and then tells the | 812 | * interrupt (except 128, which is used for system calls), and then tells the |
696 | * Linux infrastructure that each interrupt is controlled by our level-based | 813 | * Linux infrastructure that each interrupt is controlled by our level-based |
697 | * lguest interrupt controller. */ | 814 | * lguest interrupt controller. |
815 | */ | ||
698 | static void __init lguest_init_IRQ(void) | 816 | static void __init lguest_init_IRQ(void) |
699 | { | 817 | { |
700 | unsigned int i; | 818 | unsigned int i; |
701 | 819 | ||
702 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { | 820 | for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { |
703 | /* Some systems map "vectors" to interrupts weirdly. Lguest has | 821 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ |
704 | * a straightforward 1 to 1 mapping, so force that here. */ | ||
705 | __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; | 822 | __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; |
706 | if (i != SYSCALL_VECTOR) | 823 | if (i != SYSCALL_VECTOR) |
707 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); | 824 | set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); |
708 | } | 825 | } |
709 | /* This call is required to set up for 4k stacks, where we have | 826 | |
710 | * separate stacks for hard and soft interrupts. */ | 827 | /* |
828 | * This call is required to set up for 4k stacks, where we have | ||
829 | * separate stacks for hard and soft interrupts. | ||
830 | */ | ||
711 | irq_ctx_init(smp_processor_id()); | 831 | irq_ctx_init(smp_processor_id()); |
712 | } | 832 | } |
713 | 833 | ||
834 | /* | ||
835 | * With CONFIG_SPARSE_IRQ, interrupt descriptors are allocated as-needed, so | ||
836 | * rather than set them in lguest_init_IRQ we are called here every time an | ||
837 | * lguest device needs an interrupt. | ||
838 | * | ||
839 | * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should | ||
840 | * pass that up! | ||
841 | */ | ||
714 | void lguest_setup_irq(unsigned int irq) | 842 | void lguest_setup_irq(unsigned int irq) |
715 | { | 843 | { |
716 | irq_to_desc_alloc_node(irq, 0); | 844 | irq_to_desc_alloc_node(irq, 0); |
@@ -729,31 +857,39 @@ static unsigned long lguest_get_wallclock(void) | |||
729 | return lguest_data.time.tv_sec; | 857 | return lguest_data.time.tv_sec; |
730 | } | 858 | } |
731 | 859 | ||
732 | /* The TSC is an Intel thing called the Time Stamp Counter. The Host tells us | 860 | /* |
861 | * The TSC is an Intel thing called the Time Stamp Counter. The Host tells us | ||
733 | * what speed it runs at, or 0 if it's unusable as a reliable clock source. | 862 | * what speed it runs at, or 0 if it's unusable as a reliable clock source. |
734 | * This matches what we want here: if we return 0 from this function, the x86 | 863 | * This matches what we want here: if we return 0 from this function, the x86 |
735 | * TSC clock will give up and not register itself. */ | 864 | * TSC clock will give up and not register itself. |
865 | */ | ||
736 | static unsigned long lguest_tsc_khz(void) | 866 | static unsigned long lguest_tsc_khz(void) |
737 | { | 867 | { |
738 | return lguest_data.tsc_khz; | 868 | return lguest_data.tsc_khz; |
739 | } | 869 | } |
740 | 870 | ||
741 | /* If we can't use the TSC, the kernel falls back to our lower-priority | 871 | /* |
742 | * "lguest_clock", where we read the time value given to us by the Host. */ | 872 | * If we can't use the TSC, the kernel falls back to our lower-priority |
873 | * "lguest_clock", where we read the time value given to us by the Host. | ||
874 | */ | ||
743 | static cycle_t lguest_clock_read(struct clocksource *cs) | 875 | static cycle_t lguest_clock_read(struct clocksource *cs) |
744 | { | 876 | { |
745 | unsigned long sec, nsec; | 877 | unsigned long sec, nsec; |
746 | 878 | ||
747 | /* Since the time is in two parts (seconds and nanoseconds), we risk | 879 | /* |
880 | * Since the time is in two parts (seconds and nanoseconds), we risk | ||
748 | * reading it just as it's changing from 99 & 0.999999999 to 100 and 0, | 881 | * reading it just as it's changing from 99 & 0.999999999 to 100 and 0, |
749 | * and getting 99 and 0. As Linux tends to come apart under the stress | 882 | * and getting 99 and 0. As Linux tends to come apart under the stress |
750 | * of time travel, we must be careful: */ | 883 | * of time travel, we must be careful: |
884 | */ | ||
751 | do { | 885 | do { |
752 | /* First we read the seconds part. */ | 886 | /* First we read the seconds part. */ |
753 | sec = lguest_data.time.tv_sec; | 887 | sec = lguest_data.time.tv_sec; |
754 | /* This read memory barrier tells the compiler and the CPU that | 888 | /* |
889 | * This read memory barrier tells the compiler and the CPU that | ||
755 | * this can't be reordered: we have to complete the above | 890 | * this can't be reordered: we have to complete the above |
756 | * before going on. */ | 891 | * before going on. |
892 | */ | ||
757 | rmb(); | 893 | rmb(); |
758 | /* Now we read the nanoseconds part. */ | 894 | /* Now we read the nanoseconds part. */ |
759 | nsec = lguest_data.time.tv_nsec; | 895 | nsec = lguest_data.time.tv_nsec; |
@@ -777,9 +913,11 @@ static struct clocksource lguest_clock = { | |||
777 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 913 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
778 | }; | 914 | }; |
779 | 915 | ||
780 | /* We also need a "struct clock_event_device": Linux asks us to set it to go | 916 | /* |
917 | * We also need a "struct clock_event_device": Linux asks us to set it to go | ||
781 | * off some time in the future. Actually, James Morris figured all this out, I | 918 | * off some time in the future. Actually, James Morris figured all this out, I |
782 | * just applied the patch. */ | 919 | * just applied the patch. |
920 | */ | ||
783 | static int lguest_clockevent_set_next_event(unsigned long delta, | 921 | static int lguest_clockevent_set_next_event(unsigned long delta, |
784 | struct clock_event_device *evt) | 922 | struct clock_event_device *evt) |
785 | { | 923 | { |
@@ -829,8 +967,10 @@ static struct clock_event_device lguest_clockevent = { | |||
829 | .max_delta_ns = LG_CLOCK_MAX_DELTA, | 967 | .max_delta_ns = LG_CLOCK_MAX_DELTA, |
830 | }; | 968 | }; |
831 | 969 | ||
832 | /* This is the Guest timer interrupt handler (hardware interrupt 0). We just | 970 | /* |
833 | * call the clockevent infrastructure and it does whatever needs doing. */ | 971 | * This is the Guest timer interrupt handler (hardware interrupt 0). We just |
972 | * call the clockevent infrastructure and it does whatever needs doing. | ||
973 | */ | ||
834 | static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | 974 | static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) |
835 | { | 975 | { |
836 | unsigned long flags; | 976 | unsigned long flags; |
@@ -841,10 +981,12 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc) | |||
841 | local_irq_restore(flags); | 981 | local_irq_restore(flags); |
842 | } | 982 | } |
843 | 983 | ||
844 | /* At some point in the boot process, we get asked to set up our timing | 984 | /* |
985 | * At some point in the boot process, we get asked to set up our timing | ||
845 | * infrastructure. The kernel doesn't expect timer interrupts before this, but | 986 | * infrastructure. The kernel doesn't expect timer interrupts before this, but |
846 | * we cleverly initialized the "blocked_interrupts" field of "struct | 987 | * we cleverly initialized the "blocked_interrupts" field of "struct |
847 | * lguest_data" so that timer interrupts were blocked until now. */ | 988 | * lguest_data" so that timer interrupts were blocked until now. |
989 | */ | ||
848 | static void lguest_time_init(void) | 990 | static void lguest_time_init(void) |
849 | { | 991 | { |
850 | /* Set up the timer interrupt (0) to go to our simple timer routine */ | 992 | /* Set up the timer interrupt (0) to go to our simple timer routine */ |
@@ -868,14 +1010,16 @@ static void lguest_time_init(void) | |||
868 | * to work. They're pretty simple. | 1010 | * to work. They're pretty simple. |
869 | */ | 1011 | */ |
870 | 1012 | ||
871 | /* The Guest needs to tell the Host what stack it expects traps to use. For | 1013 | /* |
1014 | * The Guest needs to tell the Host what stack it expects traps to use. For | ||
872 | * native hardware, this is part of the Task State Segment mentioned above in | 1015 | * native hardware, this is part of the Task State Segment mentioned above in |
873 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. | 1016 | * lguest_load_tr_desc(), but to help hypervisors there's this special call. |
874 | * | 1017 | * |
875 | * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data | 1018 | * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data |
876 | * segment), the privilege level (we're privilege level 1, the Host is 0 and | 1019 | * segment), the privilege level (we're privilege level 1, the Host is 0 and |
877 | * will not tolerate us trying to use that), the stack pointer, and the number | 1020 | * will not tolerate us trying to use that), the stack pointer, and the number |
878 | * of pages in the stack. */ | 1021 | * of pages in the stack. |
1022 | */ | ||
879 | static void lguest_load_sp0(struct tss_struct *tss, | 1023 | static void lguest_load_sp0(struct tss_struct *tss, |
880 | struct thread_struct *thread) | 1024 | struct thread_struct *thread) |
881 | { | 1025 | { |
@@ -889,7 +1033,8 @@ static void lguest_set_debugreg(int regno, unsigned long value) | |||
889 | /* FIXME: Implement */ | 1033 | /* FIXME: Implement */ |
890 | } | 1034 | } |
891 | 1035 | ||
892 | /* There are times when the kernel wants to make sure that no memory writes are | 1036 | /* |
1037 | * There are times when the kernel wants to make sure that no memory writes are | ||
893 | * caught in the cache (that they've all reached real hardware devices). This | 1038 | * caught in the cache (that they've all reached real hardware devices). This |
894 | * doesn't matter for the Guest which has virtual hardware. | 1039 | * doesn't matter for the Guest which has virtual hardware. |
895 | * | 1040 | * |
@@ -903,11 +1048,13 @@ static void lguest_wbinvd(void) | |||
903 | { | 1048 | { |
904 | } | 1049 | } |
905 | 1050 | ||
906 | /* If the Guest expects to have an Advanced Programmable Interrupt Controller, | 1051 | /* |
1052 | * If the Guest expects to have an Advanced Programmable Interrupt Controller, | ||
907 | * we play dumb by ignoring writes and returning 0 for reads. So it's no | 1053 | * we play dumb by ignoring writes and returning 0 for reads. So it's no |
908 | * longer Programmable nor Controlling anything, and I don't think 8 lines of | 1054 | * longer Programmable nor Controlling anything, and I don't think 8 lines of |
909 | * code qualifies for Advanced. It will also never interrupt anything. It | 1055 | * code qualifies for Advanced. It will also never interrupt anything. It |
910 | * does, however, allow us to get through the Linux boot code. */ | 1056 | * does, however, allow us to get through the Linux boot code. |
1057 | */ | ||
911 | #ifdef CONFIG_X86_LOCAL_APIC | 1058 | #ifdef CONFIG_X86_LOCAL_APIC |
912 | static void lguest_apic_write(u32 reg, u32 v) | 1059 | static void lguest_apic_write(u32 reg, u32 v) |
913 | { | 1060 | { |
@@ -956,11 +1103,13 @@ static void lguest_safe_halt(void) | |||
956 | kvm_hypercall0(LHCALL_HALT); | 1103 | kvm_hypercall0(LHCALL_HALT); |
957 | } | 1104 | } |
958 | 1105 | ||
959 | /* The SHUTDOWN hypercall takes a string to describe what's happening, and | 1106 | /* |
1107 | * The SHUTDOWN hypercall takes a string to describe what's happening, and | ||
960 | * an argument which says whether this to restart (reboot) the Guest or not. | 1108 | * an argument which says whether this to restart (reboot) the Guest or not. |
961 | * | 1109 | * |
962 | * Note that the Host always prefers that the Guest speak in physical addresses | 1110 | * Note that the Host always prefers that the Guest speak in physical addresses |
963 | * rather than virtual addresses, so we use __pa() here. */ | 1111 | * rather than virtual addresses, so we use __pa() here. |
1112 | */ | ||
964 | static void lguest_power_off(void) | 1113 | static void lguest_power_off(void) |
965 | { | 1114 | { |
966 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), |
@@ -991,8 +1140,10 @@ static __init char *lguest_memory_setup(void) | |||
991 | * nice to move it back to lguest_init. Patch welcome... */ | 1140 | * nice to move it back to lguest_init. Patch welcome... */ |
992 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); | 1141 | atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
993 | 1142 | ||
994 | /* The Linux bootloader header contains an "e820" memory map: the | 1143 | /* |
995 | * Launcher populated the first entry with our memory limit. */ | 1144 | *The Linux bootloader header contains an "e820" memory map: the |
1145 | * Launcher populated the first entry with our memory limit. | ||
1146 | */ | ||
996 | e820_add_region(boot_params.e820_map[0].addr, | 1147 | e820_add_region(boot_params.e820_map[0].addr, |
997 | boot_params.e820_map[0].size, | 1148 | boot_params.e820_map[0].size, |
998 | boot_params.e820_map[0].type); | 1149 | boot_params.e820_map[0].type); |
@@ -1001,16 +1152,17 @@ static __init char *lguest_memory_setup(void) | |||
1001 | return "LGUEST"; | 1152 | return "LGUEST"; |
1002 | } | 1153 | } |
1003 | 1154 | ||
1004 | /* We will eventually use the virtio console device to produce console output, | 1155 | /* |
1156 | * We will eventually use the virtio console device to produce console output, | ||
1005 | * but before that is set up we use LHCALL_NOTIFY on normal memory to produce | 1157 | * but before that is set up we use LHCALL_NOTIFY on normal memory to produce |
1006 | * console output. */ | 1158 | * console output. |
1159 | */ | ||
1007 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 1160 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) |
1008 | { | 1161 | { |
1009 | char scratch[17]; | 1162 | char scratch[17]; |
1010 | unsigned int len = count; | 1163 | unsigned int len = count; |
1011 | 1164 | ||
1012 | /* We use a nul-terminated string, so we have to make a copy. Icky, | 1165 | /* We use a nul-terminated string, so we make a copy. Icky, huh? */ |
1013 | * huh? */ | ||
1014 | if (len > sizeof(scratch) - 1) | 1166 | if (len > sizeof(scratch) - 1) |
1015 | len = sizeof(scratch) - 1; | 1167 | len = sizeof(scratch) - 1; |
1016 | scratch[len] = '\0'; | 1168 | scratch[len] = '\0'; |
@@ -1021,8 +1173,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1021 | return len; | 1173 | return len; |
1022 | } | 1174 | } |
1023 | 1175 | ||
1024 | /* Rebooting also tells the Host we're finished, but the RESTART flag tells the | 1176 | /* |
1025 | * Launcher to reboot us. */ | 1177 | * Rebooting also tells the Host we're finished, but the RESTART flag tells the |
1178 | * Launcher to reboot us. | ||
1179 | */ | ||
1026 | static void lguest_restart(char *reason) | 1180 | static void lguest_restart(char *reason) |
1027 | { | 1181 | { |
1028 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1182 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); |
@@ -1049,7 +1203,8 @@ static void lguest_restart(char *reason) | |||
1049 | * fit comfortably. | 1203 | * fit comfortably. |
1050 | * | 1204 | * |
1051 | * First we need assembly templates of each of the patchable Guest operations, | 1205 | * First we need assembly templates of each of the patchable Guest operations, |
1052 | * and these are in i386_head.S. */ | 1206 | * and these are in i386_head.S. |
1207 | */ | ||
1053 | 1208 | ||
1054 | /*G:060 We construct a table from the assembler templates: */ | 1209 | /*G:060 We construct a table from the assembler templates: */ |
1055 | static const struct lguest_insns | 1210 | static const struct lguest_insns |
@@ -1060,9 +1215,11 @@ static const struct lguest_insns | |||
1060 | [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, | 1215 | [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, |
1061 | }; | 1216 | }; |
1062 | 1217 | ||
1063 | /* Now our patch routine is fairly simple (based on the native one in | 1218 | /* |
1219 | * Now our patch routine is fairly simple (based on the native one in | ||
1064 | * paravirt.c). If we have a replacement, we copy it in and return how much of | 1220 | * paravirt.c). If we have a replacement, we copy it in and return how much of |
1065 | * the available space we used. */ | 1221 | * the available space we used. |
1222 | */ | ||
1066 | static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | 1223 | static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, |
1067 | unsigned long addr, unsigned len) | 1224 | unsigned long addr, unsigned len) |
1068 | { | 1225 | { |
@@ -1074,8 +1231,7 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | |||
1074 | 1231 | ||
1075 | insn_len = lguest_insns[type].end - lguest_insns[type].start; | 1232 | insn_len = lguest_insns[type].end - lguest_insns[type].start; |
1076 | 1233 | ||
1077 | /* Similarly if we can't fit replacement (shouldn't happen, but let's | 1234 | /* Similarly if it can't fit (doesn't happen, but let's be thorough). */ |
1078 | * be thorough). */ | ||
1079 | if (len < insn_len) | 1235 | if (len < insn_len) |
1080 | return paravirt_patch_default(type, clobber, ibuf, addr, len); | 1236 | return paravirt_patch_default(type, clobber, ibuf, addr, len); |
1081 | 1237 | ||
@@ -1084,22 +1240,28 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, | |||
1084 | return insn_len; | 1240 | return insn_len; |
1085 | } | 1241 | } |
1086 | 1242 | ||
1087 | /*G:029 Once we get to lguest_init(), we know we're a Guest. The various | 1243 | /*G:029 |
1244 | * Once we get to lguest_init(), we know we're a Guest. The various | ||
1088 | * pv_ops structures in the kernel provide points for (almost) every routine we | 1245 | * pv_ops structures in the kernel provide points for (almost) every routine we |
1089 | * have to override to avoid privileged instructions. */ | 1246 | * have to override to avoid privileged instructions. |
1247 | */ | ||
1090 | __init void lguest_init(void) | 1248 | __init void lguest_init(void) |
1091 | { | 1249 | { |
1092 | /* We're under lguest, paravirt is enabled, and we're running at | 1250 | /* We're under lguest. */ |
1093 | * privilege level 1, not 0 as normal. */ | ||
1094 | pv_info.name = "lguest"; | 1251 | pv_info.name = "lguest"; |
1252 | /* Paravirt is enabled. */ | ||
1095 | pv_info.paravirt_enabled = 1; | 1253 | pv_info.paravirt_enabled = 1; |
1254 | /* We're running at privilege level 1, not 0 as normal. */ | ||
1096 | pv_info.kernel_rpl = 1; | 1255 | pv_info.kernel_rpl = 1; |
1256 | /* Everyone except Xen runs with this set. */ | ||
1097 | pv_info.shared_kernel_pmd = 1; | 1257 | pv_info.shared_kernel_pmd = 1; |
1098 | 1258 | ||
1099 | /* We set up all the lguest overrides for sensitive operations. These | 1259 | /* |
1100 | * are detailed with the operations themselves. */ | 1260 | * We set up all the lguest overrides for sensitive operations. These |
1261 | * are detailed with the operations themselves. | ||
1262 | */ | ||
1101 | 1263 | ||
1102 | /* interrupt-related operations */ | 1264 | /* Interrupt-related operations */ |
1103 | pv_irq_ops.init_IRQ = lguest_init_IRQ; | 1265 | pv_irq_ops.init_IRQ = lguest_init_IRQ; |
1104 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); | 1266 | pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); |
1105 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); | 1267 | pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); |
@@ -1107,11 +1269,11 @@ __init void lguest_init(void) | |||
1107 | pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); | 1269 | pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); |
1108 | pv_irq_ops.safe_halt = lguest_safe_halt; | 1270 | pv_irq_ops.safe_halt = lguest_safe_halt; |
1109 | 1271 | ||
1110 | /* init-time operations */ | 1272 | /* Setup operations */ |
1111 | pv_init_ops.memory_setup = lguest_memory_setup; | 1273 | pv_init_ops.memory_setup = lguest_memory_setup; |
1112 | pv_init_ops.patch = lguest_patch; | 1274 | pv_init_ops.patch = lguest_patch; |
1113 | 1275 | ||
1114 | /* Intercepts of various cpu instructions */ | 1276 | /* Intercepts of various CPU instructions */ |
1115 | pv_cpu_ops.load_gdt = lguest_load_gdt; | 1277 | pv_cpu_ops.load_gdt = lguest_load_gdt; |
1116 | pv_cpu_ops.cpuid = lguest_cpuid; | 1278 | pv_cpu_ops.cpuid = lguest_cpuid; |
1117 | pv_cpu_ops.load_idt = lguest_load_idt; | 1279 | pv_cpu_ops.load_idt = lguest_load_idt; |
@@ -1132,7 +1294,7 @@ __init void lguest_init(void) | |||
1132 | pv_cpu_ops.start_context_switch = paravirt_start_context_switch; | 1294 | pv_cpu_ops.start_context_switch = paravirt_start_context_switch; |
1133 | pv_cpu_ops.end_context_switch = lguest_end_context_switch; | 1295 | pv_cpu_ops.end_context_switch = lguest_end_context_switch; |
1134 | 1296 | ||
1135 | /* pagetable management */ | 1297 | /* Pagetable management */ |
1136 | pv_mmu_ops.write_cr3 = lguest_write_cr3; | 1298 | pv_mmu_ops.write_cr3 = lguest_write_cr3; |
1137 | pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user; | 1299 | pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user; |
1138 | pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single; | 1300 | pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single; |
@@ -1154,54 +1316,71 @@ __init void lguest_init(void) | |||
1154 | pv_mmu_ops.pte_update_defer = lguest_pte_update; | 1316 | pv_mmu_ops.pte_update_defer = lguest_pte_update; |
1155 | 1317 | ||
1156 | #ifdef CONFIG_X86_LOCAL_APIC | 1318 | #ifdef CONFIG_X86_LOCAL_APIC |
1157 | /* apic read/write intercepts */ | 1319 | /* APIC read/write intercepts */ |
1158 | set_lguest_basic_apic_ops(); | 1320 | set_lguest_basic_apic_ops(); |
1159 | #endif | 1321 | #endif |
1160 | 1322 | ||
1161 | /* time operations */ | 1323 | /* Time operations */ |
1162 | pv_time_ops.get_wallclock = lguest_get_wallclock; | 1324 | pv_time_ops.get_wallclock = lguest_get_wallclock; |
1163 | pv_time_ops.time_init = lguest_time_init; | 1325 | pv_time_ops.time_init = lguest_time_init; |
1164 | pv_time_ops.get_tsc_khz = lguest_tsc_khz; | 1326 | pv_time_ops.get_tsc_khz = lguest_tsc_khz; |
1165 | 1327 | ||
1166 | /* Now is a good time to look at the implementations of these functions | 1328 | /* |
1167 | * before returning to the rest of lguest_init(). */ | 1329 | * Now is a good time to look at the implementations of these functions |
1330 | * before returning to the rest of lguest_init(). | ||
1331 | */ | ||
1168 | 1332 | ||
1169 | /*G:070 Now we've seen all the paravirt_ops, we return to | 1333 | /*G:070 |
1334 | * Now we've seen all the paravirt_ops, we return to | ||
1170 | * lguest_init() where the rest of the fairly chaotic boot setup | 1335 | * lguest_init() where the rest of the fairly chaotic boot setup |
1171 | * occurs. */ | 1336 | * occurs. |
1337 | */ | ||
1172 | 1338 | ||
1173 | /* The stack protector is a weird thing where gcc places a canary | 1339 | /* |
1340 | * The stack protector is a weird thing where gcc places a canary | ||
1174 | * value on the stack and then checks it on return. This file is | 1341 | * value on the stack and then checks it on return. This file is |
1175 | * compiled with -fno-stack-protector it, so we got this far without | 1342 | * compiled with -fno-stack-protector it, so we got this far without |
1176 | * problems. The value of the canary is kept at offset 20 from the | 1343 | * problems. The value of the canary is kept at offset 20 from the |
1177 | * %gs register, so we need to set that up before calling C functions | 1344 | * %gs register, so we need to set that up before calling C functions |
1178 | * in other files. */ | 1345 | * in other files. |
1346 | */ | ||
1179 | setup_stack_canary_segment(0); | 1347 | setup_stack_canary_segment(0); |
1180 | /* We could just call load_stack_canary_segment(), but we might as | 1348 | |
1181 | * call switch_to_new_gdt() which loads the whole table and sets up | 1349 | /* |
1182 | * the per-cpu segment descriptor register %fs as well. */ | 1350 | * We could just call load_stack_canary_segment(), but we might as well |
1351 | * call switch_to_new_gdt() which loads the whole table and sets up the | ||
1352 | * per-cpu segment descriptor register %fs as well. | ||
1353 | */ | ||
1183 | switch_to_new_gdt(0); | 1354 | switch_to_new_gdt(0); |
1184 | 1355 | ||
1185 | /* As described in head_32.S, we map the first 128M of memory. */ | 1356 | /* We actually boot with all memory mapped, but let's say 128MB. */ |
1186 | max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; | 1357 | max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT; |
1187 | 1358 | ||
1188 | /* The Host<->Guest Switcher lives at the top of our address space, and | 1359 | /* |
1360 | * The Host<->Guest Switcher lives at the top of our address space, and | ||
1189 | * the Host told us how big it is when we made LGUEST_INIT hypercall: | 1361 | * the Host told us how big it is when we made LGUEST_INIT hypercall: |
1190 | * it put the answer in lguest_data.reserve_mem */ | 1362 | * it put the answer in lguest_data.reserve_mem |
1363 | */ | ||
1191 | reserve_top_address(lguest_data.reserve_mem); | 1364 | reserve_top_address(lguest_data.reserve_mem); |
1192 | 1365 | ||
1193 | /* If we don't initialize the lock dependency checker now, it crashes | 1366 | /* |
1194 | * paravirt_disable_iospace. */ | 1367 | * If we don't initialize the lock dependency checker now, it crashes |
1368 | * paravirt_disable_iospace. | ||
1369 | */ | ||
1195 | lockdep_init(); | 1370 | lockdep_init(); |
1196 | 1371 | ||
1197 | /* The IDE code spends about 3 seconds probing for disks: if we reserve | 1372 | /* |
1373 | * The IDE code spends about 3 seconds probing for disks: if we reserve | ||
1198 | * all the I/O ports up front it can't get them and so doesn't probe. | 1374 | * all the I/O ports up front it can't get them and so doesn't probe. |
1199 | * Other device drivers are similar (but less severe). This cuts the | 1375 | * Other device drivers are similar (but less severe). This cuts the |
1200 | * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */ | 1376 | * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. |
1377 | */ | ||
1201 | paravirt_disable_iospace(); | 1378 | paravirt_disable_iospace(); |
1202 | 1379 | ||
1203 | /* This is messy CPU setup stuff which the native boot code does before | 1380 | /* |
1204 | * start_kernel, so we have to do, too: */ | 1381 | * This is messy CPU setup stuff which the native boot code does before |
1382 | * start_kernel, so we have to do, too: | ||
1383 | */ | ||
1205 | cpu_detect(&new_cpu_data); | 1384 | cpu_detect(&new_cpu_data); |
1206 | /* head.S usually sets up the first capability word, so do it here. */ | 1385 | /* head.S usually sets up the first capability word, so do it here. */ |
1207 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | 1386 | new_cpu_data.x86_capability[0] = cpuid_edx(1); |
@@ -1218,22 +1397,28 @@ __init void lguest_init(void) | |||
1218 | acpi_ht = 0; | 1397 | acpi_ht = 0; |
1219 | #endif | 1398 | #endif |
1220 | 1399 | ||
1221 | /* We set the preferred console to "hvc". This is the "hypervisor | 1400 | /* |
1401 | * We set the preferred console to "hvc". This is the "hypervisor | ||
1222 | * virtual console" driver written by the PowerPC people, which we also | 1402 | * virtual console" driver written by the PowerPC people, which we also |
1223 | * adapted for lguest's use. */ | 1403 | * adapted for lguest's use. |
1404 | */ | ||
1224 | add_preferred_console("hvc", 0, NULL); | 1405 | add_preferred_console("hvc", 0, NULL); |
1225 | 1406 | ||
1226 | /* Register our very early console. */ | 1407 | /* Register our very early console. */ |
1227 | virtio_cons_early_init(early_put_chars); | 1408 | virtio_cons_early_init(early_put_chars); |
1228 | 1409 | ||
1229 | /* Last of all, we set the power management poweroff hook to point to | 1410 | /* |
1411 | * Last of all, we set the power management poweroff hook to point to | ||
1230 | * the Guest routine to power off, and the reboot hook to our restart | 1412 | * the Guest routine to power off, and the reboot hook to our restart |
1231 | * routine. */ | 1413 | * routine. |
1414 | */ | ||
1232 | pm_power_off = lguest_power_off; | 1415 | pm_power_off = lguest_power_off; |
1233 | machine_ops.restart = lguest_restart; | 1416 | machine_ops.restart = lguest_restart; |
1234 | 1417 | ||
1235 | /* Now we're set up, call i386_start_kernel() in head32.c and we proceed | 1418 | /* |
1236 | * to boot as normal. It never returns. */ | 1419 | * Now we're set up, call i386_start_kernel() in head32.c and we proceed |
1420 | * to boot as normal. It never returns. | ||
1421 | */ | ||
1237 | i386_start_kernel(); | 1422 | i386_start_kernel(); |
1238 | } | 1423 | } |
1239 | /* | 1424 | /* |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index a9c8cfe61cd4..27eac0faee48 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -5,7 +5,8 @@ | |||
5 | #include <asm/thread_info.h> | 5 | #include <asm/thread_info.h> |
6 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | 7 | ||
8 | /*G:020 Our story starts with the kernel booting into startup_32 in | 8 | /*G:020 |
9 | * Our story starts with the kernel booting into startup_32 in | ||
9 | * arch/x86/kernel/head_32.S. It expects a boot header, which is created by | 10 | * arch/x86/kernel/head_32.S. It expects a boot header, which is created by |
10 | * the bootloader (the Launcher in our case). | 11 | * the bootloader (the Launcher in our case). |
11 | * | 12 | * |
@@ -21,11 +22,14 @@ | |||
21 | * data without remembering to subtract __PAGE_OFFSET! | 22 | * data without remembering to subtract __PAGE_OFFSET! |
22 | * | 23 | * |
23 | * The .section line puts this code in .init.text so it will be discarded after | 24 | * The .section line puts this code in .init.text so it will be discarded after |
24 | * boot. */ | 25 | * boot. |
26 | */ | ||
25 | .section .init.text, "ax", @progbits | 27 | .section .init.text, "ax", @progbits |
26 | ENTRY(lguest_entry) | 28 | ENTRY(lguest_entry) |
27 | /* We make the "initialization" hypercall now to tell the Host about | 29 | /* |
28 | * us, and also find out where it put our page tables. */ | 30 | * We make the "initialization" hypercall now to tell the Host about |
31 | * us, and also find out where it put our page tables. | ||
32 | */ | ||
29 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
30 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
31 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ |
@@ -33,13 +37,14 @@ ENTRY(lguest_entry) | |||
33 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
34 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |
35 | 39 | ||
36 | /* Jumps are relative, and we're running __PAGE_OFFSET too low at the | 40 | /* Jumps are relative: we're running __PAGE_OFFSET too low. */ |
37 | * moment. */ | ||
38 | jmp lguest_init+__PAGE_OFFSET | 41 | jmp lguest_init+__PAGE_OFFSET |
39 | 42 | ||
40 | /*G:055 We create a macro which puts the assembler code between lgstart_ and | 43 | /*G:055 |
41 | * lgend_ markers. These templates are put in the .text section: they can't be | 44 | * We create a macro which puts the assembler code between lgstart_ and lgend_ |
42 | * discarded after boot as we may need to patch modules, too. */ | 45 | * markers. These templates are put in the .text section: they can't be |
46 | * discarded after boot as we may need to patch modules, too. | ||
47 | */ | ||
43 | .text | 48 | .text |
44 | #define LGUEST_PATCH(name, insns...) \ | 49 | #define LGUEST_PATCH(name, insns...) \ |
45 | lgstart_##name: insns; lgend_##name:; \ | 50 | lgstart_##name: insns; lgend_##name:; \ |
@@ -48,83 +53,103 @@ ENTRY(lguest_entry) | |||
48 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) | 53 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) |
49 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) | 54 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
50 | 55 | ||
51 | /*G:033 But using those wrappers is inefficient (we'll see why that doesn't | 56 | /*G:033 |
52 | * matter for save_fl and irq_disable later). If we write our routines | 57 | * But using those wrappers is inefficient (we'll see why that doesn't matter |
53 | * carefully in assembler, we can avoid clobbering any registers and avoid | 58 | * for save_fl and irq_disable later). If we write our routines carefully in |
54 | * jumping through the wrapper functions. | 59 | * assembler, we can avoid clobbering any registers and avoid jumping through |
60 | * the wrapper functions. | ||
55 | * | 61 | * |
56 | * I skipped over our first piece of assembler, but this one is worth studying | 62 | * I skipped over our first piece of assembler, but this one is worth studying |
57 | * in a bit more detail so I'll describe in easy stages. First, the routine | 63 | * in a bit more detail so I'll describe in easy stages. First, the routine to |
58 | * to enable interrupts: */ | 64 | * enable interrupts: |
65 | */ | ||
59 | ENTRY(lg_irq_enable) | 66 | ENTRY(lg_irq_enable) |
60 | /* The reverse of irq_disable, this sets lguest_data.irq_enabled to | 67 | /* |
61 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). */ | 68 | * The reverse of irq_disable, this sets lguest_data.irq_enabled to |
69 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). | ||
70 | */ | ||
62 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled | 71 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled |
63 | /* But now we need to check if the Host wants to know: there might have | 72 | /* |
73 | * But now we need to check if the Host wants to know: there might have | ||
64 | * been interrupts waiting to be delivered, in which case it will have | 74 | * been interrupts waiting to be delivered, in which case it will have |
65 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we | 75 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we |
66 | * jump to send_interrupts, otherwise we're done. */ | 76 | * jump to send_interrupts, otherwise we're done. |
77 | */ | ||
67 | testl $0, lguest_data+LGUEST_DATA_irq_pending | 78 | testl $0, lguest_data+LGUEST_DATA_irq_pending |
68 | jnz send_interrupts | 79 | jnz send_interrupts |
69 | /* One cool thing about x86 is that you can do many things without using | 80 | /* |
81 | * One cool thing about x86 is that you can do many things without using | ||
70 | * a register. In this case, the normal path hasn't needed to save or | 82 | * a register. In this case, the normal path hasn't needed to save or |
71 | * restore any registers at all! */ | 83 | * restore any registers at all! |
84 | */ | ||
72 | ret | 85 | ret |
73 | send_interrupts: | 86 | send_interrupts: |
74 | /* OK, now we need a register: eax is used for the hypercall number, | 87 | /* |
88 | * OK, now we need a register: eax is used for the hypercall number, | ||
75 | * which is LHCALL_SEND_INTERRUPTS. | 89 | * which is LHCALL_SEND_INTERRUPTS. |
76 | * | 90 | * |
77 | * We used not to bother with this pending detection at all, which was | 91 | * We used not to bother with this pending detection at all, which was |
78 | * much simpler. Sooner or later the Host would realize it had to | 92 | * much simpler. Sooner or later the Host would realize it had to |
79 | * send us an interrupt. But that turns out to make performance 7 | 93 | * send us an interrupt. But that turns out to make performance 7 |
80 | * times worse on a simple tcp benchmark. So now we do this the hard | 94 | * times worse on a simple tcp benchmark. So now we do this the hard |
81 | * way. */ | 95 | * way. |
96 | */ | ||
82 | pushl %eax | 97 | pushl %eax |
83 | movl $LHCALL_SEND_INTERRUPTS, %eax | 98 | movl $LHCALL_SEND_INTERRUPTS, %eax |
84 | /* This is a vmcall instruction (same thing that KVM uses). Older | 99 | /* |
100 | * This is a vmcall instruction (same thing that KVM uses). Older | ||
85 | * assembler versions might not know the "vmcall" instruction, so we | 101 | * assembler versions might not know the "vmcall" instruction, so we |
86 | * create one manually here. */ | 102 | * create one manually here. |
103 | */ | ||
87 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 104 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ |
105 | /* Put eax back the way we found it. */ | ||
88 | popl %eax | 106 | popl %eax |
89 | ret | 107 | ret |
90 | 108 | ||
91 | /* Finally, the "popf" or "restore flags" routine. The %eax register holds the | 109 | /* |
110 | * Finally, the "popf" or "restore flags" routine. The %eax register holds the | ||
92 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're | 111 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're |
93 | * enabling interrupts again, if it's 0 we're leaving them off. */ | 112 | * enabling interrupts again, if it's 0 we're leaving them off. |
113 | */ | ||
94 | ENTRY(lg_restore_fl) | 114 | ENTRY(lg_restore_fl) |
95 | /* This is just "lguest_data.irq_enabled = flags;" */ | 115 | /* This is just "lguest_data.irq_enabled = flags;" */ |
96 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled | 116 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled |
97 | /* Now, if the %eax value has enabled interrupts and | 117 | /* |
118 | * Now, if the %eax value has enabled interrupts and | ||
98 | * lguest_data.irq_pending is set, we want to tell the Host so it can | 119 | * lguest_data.irq_pending is set, we want to tell the Host so it can |
99 | * deliver any outstanding interrupts. Fortunately, both values will | 120 | * deliver any outstanding interrupts. Fortunately, both values will |
100 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" | 121 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" |
101 | * instruction will AND them together for us. If both are set, we | 122 | * instruction will AND them together for us. If both are set, we |
102 | * jump to send_interrupts. */ | 123 | * jump to send_interrupts. |
124 | */ | ||
103 | testl lguest_data+LGUEST_DATA_irq_pending, %eax | 125 | testl lguest_data+LGUEST_DATA_irq_pending, %eax |
104 | jnz send_interrupts | 126 | jnz send_interrupts |
105 | /* Again, the normal path has used no extra registers. Clever, huh? */ | 127 | /* Again, the normal path has used no extra registers. Clever, huh? */ |
106 | ret | 128 | ret |
129 | /*:*/ | ||
107 | 130 | ||
108 | /* These demark the EIP range where host should never deliver interrupts. */ | 131 | /* These demark the EIP range where host should never deliver interrupts. */ |
109 | .global lguest_noirq_start | 132 | .global lguest_noirq_start |
110 | .global lguest_noirq_end | 133 | .global lguest_noirq_end |
111 | 134 | ||
112 | /*M:004 When the Host reflects a trap or injects an interrupt into the Guest, | 135 | /*M:004 |
113 | * it sets the eflags interrupt bit on the stack based on | 136 | * When the Host reflects a trap or injects an interrupt into the Guest, it |
114 | * lguest_data.irq_enabled, so the Guest iret logic does the right thing when | 137 | * sets the eflags interrupt bit on the stack based on lguest_data.irq_enabled, |
115 | * restoring it. However, when the Host sets the Guest up for direct traps, | 138 | * so the Guest iret logic does the right thing when restoring it. However, |
116 | * such as system calls, the processor is the one to push eflags onto the | 139 | * when the Host sets the Guest up for direct traps, such as system calls, the |
117 | * stack, and the interrupt bit will be 1 (in reality, interrupts are always | 140 | * processor is the one to push eflags onto the stack, and the interrupt bit |
118 | * enabled in the Guest). | 141 | * will be 1 (in reality, interrupts are always enabled in the Guest). |
119 | * | 142 | * |
120 | * This turns out to be harmless: the only trap which should happen under Linux | 143 | * This turns out to be harmless: the only trap which should happen under Linux |
121 | * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc | 144 | * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc |
122 | * regions), which has to be reflected through the Host anyway. If another | 145 | * regions), which has to be reflected through the Host anyway. If another |
123 | * trap *does* go off when interrupts are disabled, the Guest will panic, and | 146 | * trap *does* go off when interrupts are disabled, the Guest will panic, and |
124 | * we'll never get to this iret! :*/ | 147 | * we'll never get to this iret! |
148 | :*/ | ||
125 | 149 | ||
126 | /*G:045 There is one final paravirt_op that the Guest implements, and glancing | 150 | /*G:045 |
127 | * at it you can see why I left it to last. It's *cool*! It's in *assembler*! | 151 | * There is one final paravirt_op that the Guest implements, and glancing at it |
152 | * you can see why I left it to last. It's *cool*! It's in *assembler*! | ||
128 | * | 153 | * |
129 | * The "iret" instruction is used to return from an interrupt or trap. The | 154 | * The "iret" instruction is used to return from an interrupt or trap. The |
130 | * stack looks like this: | 155 | * stack looks like this: |
@@ -148,15 +173,18 @@ ENTRY(lg_restore_fl) | |||
148 | * return to userspace or wherever. Our solution to this is to surround the | 173 | * return to userspace or wherever. Our solution to this is to surround the |
149 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the | 174 | * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the |
150 | * Host that it is *never* to interrupt us there, even if interrupts seem to be | 175 | * Host that it is *never* to interrupt us there, even if interrupts seem to be |
151 | * enabled. */ | 176 | * enabled. |
177 | */ | ||
152 | ENTRY(lguest_iret) | 178 | ENTRY(lguest_iret) |
153 | pushl %eax | 179 | pushl %eax |
154 | movl 12(%esp), %eax | 180 | movl 12(%esp), %eax |
155 | lguest_noirq_start: | 181 | lguest_noirq_start: |
156 | /* Note the %ss: segment prefix here. Normal data accesses use the | 182 | /* |
183 | * Note the %ss: segment prefix here. Normal data accesses use the | ||
157 | * "ds" segment, but that will have already been restored for whatever | 184 | * "ds" segment, but that will have already been restored for whatever |
158 | * we're returning to (such as userspace): we can't trust it. The %ss: | 185 | * we're returning to (such as userspace): we can't trust it. The %ss: |
159 | * prefix makes sure we use the stack segment, which is still valid. */ | 186 | * prefix makes sure we use the stack segment, which is still valid. |
187 | */ | ||
160 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled | 188 | movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled |
161 | popl %eax | 189 | popl %eax |
162 | iret | 190 | iret |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 58f621e81919..2112ed55e7ea 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -103,6 +103,7 @@ EXPORT_SYMBOL(kmap); | |||
103 | EXPORT_SYMBOL(kunmap); | 103 | EXPORT_SYMBOL(kunmap); |
104 | EXPORT_SYMBOL(kmap_atomic); | 104 | EXPORT_SYMBOL(kmap_atomic); |
105 | EXPORT_SYMBOL(kunmap_atomic); | 105 | EXPORT_SYMBOL(kunmap_atomic); |
106 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
106 | 107 | ||
107 | void __init set_highmem_pages_init(void) | 108 | void __init set_highmem_pages_init(void) |
108 | { | 109 | { |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 01574a066534..42159a28f433 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -397,6 +397,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
397 | }, | 397 | }, |
398 | }, | 398 | }, |
399 | { | 399 | { |
400 | .callback = init_set_sci_en_on_resume, | ||
401 | .ident = "Hewlett-Packard HP G7000 Notebook PC", | ||
402 | .matches = { | ||
403 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
404 | DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"), | ||
405 | }, | ||
406 | }, | ||
407 | { | ||
400 | .callback = init_old_suspend_ordering, | 408 | .callback = init_old_suspend_ordering, |
401 | .ident = "Panasonic CF51-2L", | 409 | .ident = "Panasonic CF51-2L", |
402 | .matches = { | 410 | .matches = { |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 336eb1ed73cc..958c1fa41900 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -515,10 +515,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
515 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ | 515 | { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ |
516 | { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ | 516 | { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ |
517 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ | 517 | { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ |
518 | { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ | ||
519 | { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ | ||
518 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ | 520 | { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ |
519 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ | 521 | { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ |
522 | { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ | ||
520 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 523 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
521 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | 524 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ |
525 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ | ||
522 | 526 | ||
523 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 527 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
524 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 528 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d0a14cf2bd74..56b8a3ff1286 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = { | |||
596 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ | 596 | { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ |
597 | { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ | 597 | { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ |
598 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ | 598 | { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ |
599 | { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ | ||
599 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ | 600 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ |
600 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ | 601 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ |
602 | { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */ | ||
601 | { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ | 603 | { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ |
604 | { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ | ||
602 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ | 605 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ |
603 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ | 606 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ |
604 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ | 607 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2c6aedaef718..8ac98ff16d7d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev) | |||
1515 | 1515 | ||
1516 | return rc; | 1516 | return rc; |
1517 | } | 1517 | } |
1518 | dev->n_native_sectors = native_sectors; | ||
1518 | 1519 | ||
1519 | /* nothing to do? */ | 1520 | /* nothing to do? */ |
1520 | if (native_sectors <= sectors || !ata_ignore_hpa) { | 1521 | if (native_sectors <= sectors || !ata_ignore_hpa) { |
@@ -4099,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | |||
4099 | unsigned int readid_flags) | 4100 | unsigned int readid_flags) |
4100 | { | 4101 | { |
4101 | u64 n_sectors = dev->n_sectors; | 4102 | u64 n_sectors = dev->n_sectors; |
4103 | u64 n_native_sectors = dev->n_native_sectors; | ||
4102 | int rc; | 4104 | int rc; |
4103 | 4105 | ||
4104 | if (!ata_dev_enabled(dev)) | 4106 | if (!ata_dev_enabled(dev)) |
@@ -4128,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, | |||
4128 | /* verify n_sectors hasn't changed */ | 4130 | /* verify n_sectors hasn't changed */ |
4129 | if (dev->class == ATA_DEV_ATA && n_sectors && | 4131 | if (dev->class == ATA_DEV_ATA && n_sectors && |
4130 | dev->n_sectors != n_sectors) { | 4132 | dev->n_sectors != n_sectors) { |
4131 | ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " | 4133 | ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch " |
4132 | "%llu != %llu\n", | 4134 | "%llu != %llu\n", |
4133 | (unsigned long long)n_sectors, | 4135 | (unsigned long long)n_sectors, |
4134 | (unsigned long long)dev->n_sectors); | 4136 | (unsigned long long)dev->n_sectors); |
4135 | 4137 | /* | |
4136 | /* restore original n_sectors */ | 4138 | * Something could have caused HPA to be unlocked |
4137 | dev->n_sectors = n_sectors; | 4139 | * involuntarily. If n_native_sectors hasn't changed |
4138 | 4140 | * and the new size matches it, keep the device. | |
4139 | rc = -ENODEV; | 4141 | */ |
4140 | goto fail; | 4142 | if (dev->n_native_sectors == n_native_sectors && |
4143 | dev->n_sectors > n_sectors && | ||
4144 | dev->n_sectors == n_native_sectors) { | ||
4145 | ata_dev_printk(dev, KERN_WARNING, | ||
4146 | "new n_sectors matches native, probably " | ||
4147 | "late HPA unlock, continuing\n"); | ||
4148 | /* keep using the old n_sectors */ | ||
4149 | dev->n_sectors = n_sectors; | ||
4150 | } else { | ||
4151 | /* restore original n_[native]_sectors and fail */ | ||
4152 | dev->n_native_sectors = n_native_sectors; | ||
4153 | dev->n_sectors = n_sectors; | ||
4154 | rc = -ENODEV; | ||
4155 | goto fail; | ||
4156 | } | ||
4141 | } | 4157 | } |
4142 | 4158 | ||
4143 | return 0; | 4159 | return 0; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1a07c061f644..79711b64054b 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2327 | struct ata_port *ap = link->ap; | 2327 | struct ata_port *ap = link->ap; |
2328 | struct ata_link *slave = ap->slave_link; | 2328 | struct ata_link *slave = ap->slave_link; |
2329 | struct ata_eh_context *ehc = &link->eh_context; | 2329 | struct ata_eh_context *ehc = &link->eh_context; |
2330 | struct ata_eh_context *sehc = &slave->eh_context; | 2330 | struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; |
2331 | unsigned int *classes = ehc->classes; | 2331 | unsigned int *classes = ehc->classes; |
2332 | unsigned int lflags = link->flags; | 2332 | unsigned int lflags = link->flags; |
2333 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); | 2333 | int verbose = !(ehc->i.flags & ATA_EHI_QUIET); |
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index 8561a9f195c1..5702affcb325 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c | |||
@@ -26,9 +26,7 @@ | |||
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/ata_platform.h> | 27 | #include <linux/ata_platform.h> |
28 | 28 | ||
29 | #include <mach/at91sam9260_matrix.h> | ||
30 | #include <mach/at91sam9_smc.h> | 29 | #include <mach/at91sam9_smc.h> |
31 | #include <mach/at91sam9260.h> | ||
32 | #include <mach/board.h> | 30 | #include <mach/board.h> |
33 | #include <mach/gpio.h> | 31 | #include <mach/gpio.h> |
34 | 32 | ||
@@ -44,65 +42,62 @@ struct at91_ide_info { | |||
44 | unsigned long mode; | 42 | unsigned long mode; |
45 | unsigned int cs; | 43 | unsigned int cs; |
46 | 44 | ||
45 | struct clk *mck; | ||
46 | |||
47 | void __iomem *ide_addr; | 47 | void __iomem *ide_addr; |
48 | void __iomem *alt_addr; | 48 | void __iomem *alt_addr; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | const struct ata_timing initial_timing = | 51 | static const struct ata_timing initial_timing = |
52 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; | 52 | {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; |
53 | 53 | ||
54 | static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) | 54 | static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) |
55 | { | 55 | { |
56 | unsigned long mul; | 56 | unsigned long mul; |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = | 59 | * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = |
60 | * x * (f / 1_000_000_000) = | 60 | * x * (f / 1_000_000_000) = |
61 | * x * ((f * 65536) / 1_000_000_000) / 65536 = | 61 | * x * ((f * 65536) / 1_000_000_000) / 65536 = |
62 | * x * (((f / 10_000) * 65536) / 100_000) / 65536 = | 62 | * x * (((f / 10_000) * 65536) / 100_000) / 65536 = |
63 | */ | 63 | */ |
64 | 64 | ||
65 | mul = (mck_hz / 10000) << 16; | 65 | mul = (mck_hz / 10000) << 16; |
66 | mul /= 100000; | 66 | mul /= 100000; |
67 | 67 | ||
68 | return (ns * mul + 65536) >> 16; /* rounding */ | 68 | return (ns * mul + 65536) >> 16; /* rounding */ |
69 | } | 69 | } |
70 | 70 | ||
71 | static void set_smc_mode(struct at91_ide_info *info) | 71 | static void set_smc_mode(struct at91_ide_info *info) |
72 | { | 72 | { |
73 | at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); | 73 | at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); |
74 | return; | 74 | return; |
75 | } | 75 | } |
76 | 76 | ||
77 | static void set_smc_timing(struct device *dev, | 77 | static void set_smc_timing(struct device *dev, |
78 | struct at91_ide_info *info, const struct ata_timing *ata) | 78 | struct at91_ide_info *info, const struct ata_timing *ata) |
79 | { | 79 | { |
80 | int read_cycle, write_cycle, active, recover; | 80 | unsigned long read_cycle, write_cycle, active, recover; |
81 | int nrd_setup, nrd_pulse, nrd_recover; | 81 | unsigned long nrd_setup, nrd_pulse, nrd_recover; |
82 | int nwe_setup, nwe_pulse; | 82 | unsigned long nwe_setup, nwe_pulse; |
83 | 83 | ||
84 | int ncs_write_setup, ncs_write_pulse; | 84 | unsigned long ncs_write_setup, ncs_write_pulse; |
85 | int ncs_read_setup, ncs_read_pulse; | 85 | unsigned long ncs_read_setup, ncs_read_pulse; |
86 | 86 | ||
87 | unsigned int mck_hz; | 87 | unsigned long mck_hz; |
88 | struct clk *mck; | ||
89 | 88 | ||
90 | read_cycle = ata->cyc8b; | 89 | read_cycle = ata->cyc8b; |
91 | nrd_setup = ata->setup; | 90 | nrd_setup = ata->setup; |
92 | nrd_pulse = ata->act8b; | 91 | nrd_pulse = ata->act8b; |
93 | nrd_recover = ata->rec8b; | 92 | nrd_recover = ata->rec8b; |
94 | 93 | ||
95 | mck = clk_get(NULL, "mck"); | 94 | mck_hz = clk_get_rate(info->mck); |
96 | BUG_ON(IS_ERR(mck)); | ||
97 | mck_hz = clk_get_rate(mck); | ||
98 | 95 | ||
99 | read_cycle = calc_mck_cycles(read_cycle, mck_hz); | 96 | read_cycle = calc_mck_cycles(read_cycle, mck_hz); |
100 | nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); | 97 | nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); |
101 | nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); | 98 | nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); |
102 | nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); | 99 | nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); |
103 | 100 | ||
104 | clk_put(mck); | ||
105 | |||
106 | active = nrd_setup + nrd_pulse; | 101 | active = nrd_setup + nrd_pulse; |
107 | recover = read_cycle - active; | 102 | recover = read_cycle - active; |
108 | 103 | ||
@@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev, | |||
121 | ncs_write_setup = ncs_read_setup; | 116 | ncs_write_setup = ncs_read_setup; |
122 | ncs_write_pulse = ncs_read_pulse; | 117 | ncs_write_pulse = ncs_read_pulse; |
123 | 118 | ||
124 | dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", | 119 | dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n", |
125 | nrd_setup, nrd_pulse, read_cycle); | 120 | nrd_setup, nrd_pulse, read_cycle); |
126 | dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", | 121 | dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n", |
127 | nwe_setup, nwe_pulse, write_cycle); | 122 | nwe_setup, nwe_pulse, write_cycle); |
128 | dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", | 123 | dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n", |
129 | ncs_read_setup, ncs_read_pulse); | 124 | ncs_read_setup, ncs_read_pulse); |
130 | dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", | 125 | dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n", |
131 | ncs_write_setup, ncs_write_pulse); | 126 | ncs_write_setup, ncs_write_pulse); |
132 | 127 | ||
133 | at91_sys_write(AT91_SMC_SETUP(info->cs), | 128 | at91_sys_write(AT91_SMC_SETUP(info->cs), |
@@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
217 | struct resource *mem_res; | 212 | struct resource *mem_res; |
218 | struct ata_host *host; | 213 | struct ata_host *host; |
219 | struct ata_port *ap; | 214 | struct ata_port *ap; |
215 | |||
220 | int irq_flags = 0; | 216 | int irq_flags = 0; |
221 | int irq = 0; | 217 | int irq = 0; |
222 | int ret; | 218 | int ret; |
@@ -261,6 +257,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev) | |||
261 | return -ENOMEM; | 257 | return -ENOMEM; |
262 | } | 258 | } |
263 | 259 | ||
260 | info->mck = clk_get(NULL, "mck"); | ||
261 | |||
262 | if (IS_ERR(info->mck)) { | ||
263 | dev_err(dev, "failed to get access to mck clock\n"); | ||
264 | return -ENODEV; | ||
265 | } | ||
266 | |||
264 | info->cs = board->chipselect; | 267 | info->cs = board->chipselect; |
265 | info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | | 268 | info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | |
266 | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | | 269 | AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | |
@@ -304,6 +307,7 @@ err_alt_ioremap: | |||
304 | devm_iounmap(dev, info->ide_addr); | 307 | devm_iounmap(dev, info->ide_addr); |
305 | 308 | ||
306 | err_ide_ioremap: | 309 | err_ide_ioremap: |
310 | clk_put(info->mck); | ||
307 | kfree(info); | 311 | kfree(info); |
308 | 312 | ||
309 | return ret; | 313 | return ret; |
@@ -326,6 +330,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev) | |||
326 | 330 | ||
327 | devm_iounmap(dev, info->ide_addr); | 331 | devm_iounmap(dev, info->ide_addr); |
328 | devm_iounmap(dev, info->alt_addr); | 332 | devm_iounmap(dev, info->alt_addr); |
333 | clk_put(info->mck); | ||
329 | 334 | ||
330 | kfree(info); | 335 | kfree(info); |
331 | return 0; | 336 | return 0; |
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 8d9343accf3c..abdd19fe990a 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c | |||
@@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) | |||
653 | 653 | ||
654 | ap = host->ports[i]; | 654 | ap = host->ports[i]; |
655 | ocd = ap->dev->platform_data; | 655 | ocd = ap->dev->platform_data; |
656 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) | 656 | |
657 | if (ap->flags & ATA_FLAG_DISABLED) | ||
657 | continue; | 658 | continue; |
658 | 659 | ||
659 | ocd = ap->dev->platform_data; | 660 | ocd = ap->dev->platform_data; |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index f4d009ed50ac..dc99e26f8e5b 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
411 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), | 411 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), |
412 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), | 412 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), |
413 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), | 413 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), |
414 | PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), | ||
414 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), | 415 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), |
415 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), | 416 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), |
416 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), | 417 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 23714aefb825..c19417e02208 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) | |||
2514 | char *when = "idle"; | 2514 | char *when = "idle"; |
2515 | 2515 | ||
2516 | ata_ehi_clear_desc(ehi); | 2516 | ata_ehi_clear_desc(ehi); |
2517 | if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { | 2517 | if (ap->flags & ATA_FLAG_DISABLED) { |
2518 | when = "disabled"; | 2518 | when = "disabled"; |
2519 | } else if (edma_was_enabled) { | 2519 | } else if (edma_was_enabled) { |
2520 | when = "EDMA enabled"; | 2520 | when = "EDMA enabled"; |
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 030ec079b184..35bd5cc7f285 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c | |||
@@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance) | |||
532 | struct ata_port *ap = host->ports[i]; | 532 | struct ata_port *ap = host->ports[i]; |
533 | u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); | 533 | u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); |
534 | 534 | ||
535 | if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) | 535 | if (unlikely(ap->flags & ATA_FLAG_DISABLED)) |
536 | continue; | 536 | continue; |
537 | 537 | ||
538 | /* turn off SATA_IRQ if not supported */ | 538 | /* turn off SATA_IRQ if not supported */ |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index f285f441fab9..7376367bcb80 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
180 | goto err; | 180 | goto err; |
181 | } | 181 | } |
182 | /* Pages will be freed by vfree() */ | 182 | /* Pages will be freed by vfree() */ |
183 | fw_priv->pages = NULL; | ||
184 | fw_priv->page_array_size = 0; | 183 | fw_priv->page_array_size = 0; |
185 | fw_priv->nr_pages = 0; | 184 | fw_priv->nr_pages = 0; |
186 | complete(&fw_priv->completion); | 185 | complete(&fw_priv->completion); |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 79a9ae5238ac..0d903909af7e 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev) | |||
275 | drv->add(sysdev); | 275 | drv->add(sysdev); |
276 | } | 276 | } |
277 | mutex_unlock(&sysdev_drivers_lock); | 277 | mutex_unlock(&sysdev_drivers_lock); |
278 | kobject_uevent(&sysdev->kobj, KOBJ_ADD); | ||
278 | } | 279 | } |
279 | 280 | ||
280 | kobject_uevent(&sysdev->kobj, KOBJ_ADD); | ||
281 | return error; | 281 | return error; |
282 | } | 282 | } |
283 | 283 | ||
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index ff47907ff1bf..973be2f44195 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -1583,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty) | |||
1583 | 1583 | ||
1584 | static inline int input_available_p(struct tty_struct *tty, int amt) | 1584 | static inline int input_available_p(struct tty_struct *tty, int amt) |
1585 | { | 1585 | { |
1586 | tty_flush_to_ldisc(tty); | ||
1586 | if (tty->icanon) { | 1587 | if (tty->icanon) { |
1587 | if (tty->canon_data) | 1588 | if (tty->canon_data) |
1588 | return 1; | 1589 | return 1; |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 3850a68f265a..6e6942c45f5b 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -52,7 +52,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp) | |||
52 | return; | 52 | return; |
53 | tty->link->packet = 0; | 53 | tty->link->packet = 0; |
54 | set_bit(TTY_OTHER_CLOSED, &tty->link->flags); | 54 | set_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
55 | tty_flip_buffer_push(tty->link); | ||
56 | wake_up_interruptible(&tty->link->read_wait); | 55 | wake_up_interruptible(&tty->link->read_wait); |
57 | wake_up_interruptible(&tty->link->write_wait); | 56 | wake_up_interruptible(&tty->link->write_wait); |
58 | if (tty->driver->subtype == PTY_TYPE_MASTER) { | 57 | if (tty->driver->subtype == PTY_TYPE_MASTER) { |
@@ -208,7 +207,6 @@ static int pty_open(struct tty_struct *tty, struct file *filp) | |||
208 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); | 207 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
209 | set_bit(TTY_THROTTLED, &tty->flags); | 208 | set_bit(TTY_THROTTLED, &tty->flags); |
210 | retval = 0; | 209 | retval = 0; |
211 | tty->low_latency = 1; | ||
212 | out: | 210 | out: |
213 | return retval; | 211 | return retval; |
214 | } | 212 | } |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 0db35857e4d8..5d7a02f63e1c 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/vt_kern.h> | 36 | #include <linux/vt_kern.h> |
37 | #include <linux/workqueue.h> | 37 | #include <linux/workqueue.h> |
38 | #include <linux/kexec.h> | ||
39 | #include <linux/hrtimer.h> | 38 | #include <linux/hrtimer.h> |
40 | #include <linux/oom.h> | 39 | #include <linux/oom.h> |
41 | 40 | ||
@@ -124,9 +123,12 @@ static struct sysrq_key_op sysrq_unraw_op = { | |||
124 | static void sysrq_handle_crash(int key, struct tty_struct *tty) | 123 | static void sysrq_handle_crash(int key, struct tty_struct *tty) |
125 | { | 124 | { |
126 | char *killer = NULL; | 125 | char *killer = NULL; |
126 | |||
127 | panic_on_oops = 1; /* force panic */ | ||
128 | wmb(); | ||
127 | *killer = 1; | 129 | *killer = 1; |
128 | } | 130 | } |
129 | static struct sysrq_key_op sysrq_crashdump_op = { | 131 | static struct sysrq_key_op sysrq_crash_op = { |
130 | .handler = sysrq_handle_crash, | 132 | .handler = sysrq_handle_crash, |
131 | .help_msg = "Crash", | 133 | .help_msg = "Crash", |
132 | .action_msg = "Trigger a crash", | 134 | .action_msg = "Trigger a crash", |
@@ -401,7 +403,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
401 | */ | 403 | */ |
402 | NULL, /* a */ | 404 | NULL, /* a */ |
403 | &sysrq_reboot_op, /* b */ | 405 | &sysrq_reboot_op, /* b */ |
404 | &sysrq_crashdump_op, /* c & ibm_emac driver debug */ | 406 | &sysrq_crash_op, /* c & ibm_emac driver debug */ |
405 | &sysrq_showlocks_op, /* d */ | 407 | &sysrq_showlocks_op, /* d */ |
406 | &sysrq_term_op, /* e */ | 408 | &sysrq_term_op, /* e */ |
407 | &sysrq_moom_op, /* f */ | 409 | &sysrq_moom_op, /* f */ |
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 810ee25d66a4..3108991c5c8b 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c | |||
@@ -462,6 +462,19 @@ static void flush_to_ldisc(struct work_struct *work) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | /** | 464 | /** |
465 | * tty_flush_to_ldisc | ||
466 | * @tty: tty to push | ||
467 | * | ||
468 | * Push the terminal flip buffers to the line discipline. | ||
469 | * | ||
470 | * Must not be called from IRQ context. | ||
471 | */ | ||
472 | void tty_flush_to_ldisc(struct tty_struct *tty) | ||
473 | { | ||
474 | flush_to_ldisc(&tty->buf.work.work); | ||
475 | } | ||
476 | |||
477 | /** | ||
465 | * tty_flip_buffer_push - terminal | 478 | * tty_flip_buffer_push - terminal |
466 | * @tty: tty to push | 479 | * @tty: tty to push |
467 | * | 480 | * |
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/drivers/char/vr41xx_giu.c +++ /dev/null | |||
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index 2406c2ce2844..d4ec60593176 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c | |||
@@ -30,7 +30,7 @@ | |||
30 | /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ | 30 | /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ |
31 | 31 | ||
32 | #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ | 32 | #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ |
33 | #define X38_MCHBAR_HIGH 0x4b | 33 | #define X38_MCHBAR_HIGH 0x4c |
34 | #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ | 34 | #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ |
35 | #define X38_MMR_WINDOW_SIZE 16384 | 35 | #define X38_MMR_WINDOW_SIZE 16384 |
36 | 36 | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 5fae1e074b4b..013d38059943 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \ | |||
13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ | 13 | radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ |
14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ | 14 | radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ |
15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ | 15 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ |
16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o | 16 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ |
17 | radeon_test.o | ||
17 | 18 | ||
18 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 19 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
19 | 20 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c0080cc9bf8d..74d034f77c6b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -31,6 +31,132 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | 33 | ||
34 | static void atombios_overscan_setup(struct drm_crtc *crtc, | ||
35 | struct drm_display_mode *mode, | ||
36 | struct drm_display_mode *adjusted_mode) | ||
37 | { | ||
38 | struct drm_device *dev = crtc->dev; | ||
39 | struct radeon_device *rdev = dev->dev_private; | ||
40 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
41 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
42 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
43 | int a1, a2; | ||
44 | |||
45 | memset(&args, 0, sizeof(args)); | ||
46 | |||
47 | args.usOverscanRight = 0; | ||
48 | args.usOverscanLeft = 0; | ||
49 | args.usOverscanBottom = 0; | ||
50 | args.usOverscanTop = 0; | ||
51 | args.ucCRTC = radeon_crtc->crtc_id; | ||
52 | |||
53 | switch (radeon_crtc->rmx_type) { | ||
54 | case RMX_CENTER: | ||
55 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
56 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
57 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
58 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
59 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
60 | break; | ||
61 | case RMX_ASPECT: | ||
62 | a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
63 | a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
64 | |||
65 | if (a1 > a2) { | ||
66 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
67 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
68 | } else if (a2 > a1) { | ||
69 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
70 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
71 | } | ||
72 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
73 | break; | ||
74 | case RMX_FULL: | ||
75 | default: | ||
76 | args.usOverscanRight = 0; | ||
77 | args.usOverscanLeft = 0; | ||
78 | args.usOverscanBottom = 0; | ||
79 | args.usOverscanTop = 0; | ||
80 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static void atombios_scaler_setup(struct drm_crtc *crtc) | ||
86 | { | ||
87 | struct drm_device *dev = crtc->dev; | ||
88 | struct radeon_device *rdev = dev->dev_private; | ||
89 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
90 | ENABLE_SCALER_PS_ALLOCATION args; | ||
91 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
92 | /* fixme - fill in enc_priv for atom dac */ | ||
93 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
94 | |||
95 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
96 | return; | ||
97 | |||
98 | memset(&args, 0, sizeof(args)); | ||
99 | |||
100 | args.ucScaler = radeon_crtc->crtc_id; | ||
101 | |||
102 | if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
103 | switch (tv_std) { | ||
104 | case TV_STD_NTSC: | ||
105 | default: | ||
106 | args.ucTVStandard = ATOM_TV_NTSC; | ||
107 | break; | ||
108 | case TV_STD_PAL: | ||
109 | args.ucTVStandard = ATOM_TV_PAL; | ||
110 | break; | ||
111 | case TV_STD_PAL_M: | ||
112 | args.ucTVStandard = ATOM_TV_PALM; | ||
113 | break; | ||
114 | case TV_STD_PAL_60: | ||
115 | args.ucTVStandard = ATOM_TV_PAL60; | ||
116 | break; | ||
117 | case TV_STD_NTSC_J: | ||
118 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
119 | break; | ||
120 | case TV_STD_SCART_PAL: | ||
121 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
122 | break; | ||
123 | case TV_STD_SECAM: | ||
124 | args.ucTVStandard = ATOM_TV_SECAM; | ||
125 | break; | ||
126 | case TV_STD_PAL_CN: | ||
127 | args.ucTVStandard = ATOM_TV_PALCN; | ||
128 | break; | ||
129 | } | ||
130 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
131 | } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
132 | args.ucTVStandard = ATOM_TV_CV; | ||
133 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
134 | } else { | ||
135 | switch (radeon_crtc->rmx_type) { | ||
136 | case RMX_FULL: | ||
137 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
138 | break; | ||
139 | case RMX_CENTER: | ||
140 | args.ucEnable = ATOM_SCALER_CENTER; | ||
141 | break; | ||
142 | case RMX_ASPECT: | ||
143 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
144 | break; | ||
145 | default: | ||
146 | if (ASIC_IS_AVIVO(rdev)) | ||
147 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
148 | else | ||
149 | args.ucEnable = ATOM_SCALER_CENTER; | ||
150 | break; | ||
151 | } | ||
152 | } | ||
153 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
154 | if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
155 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
156 | atom_rv515_force_tv_scaler(rdev); | ||
157 | } | ||
158 | } | ||
159 | |||
34 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) | 160 | static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) |
35 | { | 161 | { |
36 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 162 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
203 | if (ASIC_IS_AVIVO(rdev)) { | 329 | if (ASIC_IS_AVIVO(rdev)) { |
204 | uint32_t ss_cntl; | 330 | uint32_t ss_cntl; |
205 | 331 | ||
332 | if ((rdev->family == CHIP_RS600) || | ||
333 | (rdev->family == CHIP_RS690) || | ||
334 | (rdev->family == CHIP_RS740)) | ||
335 | pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | | ||
336 | RADEON_PLL_PREFER_CLOSEST_LOWER); | ||
337 | |||
206 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ | 338 | if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
207 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 339 | pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
208 | else | 340 | else |
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
321 | struct drm_gem_object *obj; | 453 | struct drm_gem_object *obj; |
322 | struct drm_radeon_gem_object *obj_priv; | 454 | struct drm_radeon_gem_object *obj_priv; |
323 | uint64_t fb_location; | 455 | uint64_t fb_location; |
324 | uint32_t fb_format, fb_pitch_pixels; | 456 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
325 | 457 | ||
326 | if (!crtc->fb) | 458 | if (!crtc->fb) |
327 | return -EINVAL; | 459 | return -EINVAL; |
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
358 | return -EINVAL; | 490 | return -EINVAL; |
359 | } | 491 | } |
360 | 492 | ||
361 | /* TODO tiling */ | 493 | radeon_object_get_tiling_flags(obj->driver_private, |
494 | &tiling_flags, NULL); | ||
495 | if (tiling_flags & RADEON_TILING_MACRO) | ||
496 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
497 | |||
498 | if (tiling_flags & RADEON_TILING_MICRO) | ||
499 | fb_format |= AVIVO_D1GRPH_TILED; | ||
500 | |||
362 | if (radeon_crtc->crtc_id == 0) | 501 | if (radeon_crtc->crtc_id == 0) |
363 | WREG32(AVIVO_D1VGA_CONTROL, 0); | 502 | WREG32(AVIVO_D1VGA_CONTROL, 0); |
364 | else | 503 | else |
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
509 | radeon_crtc_set_base(crtc, x, y, old_fb); | 648 | radeon_crtc_set_base(crtc, x, y, old_fb); |
510 | radeon_legacy_atom_set_surface(crtc); | 649 | radeon_legacy_atom_set_surface(crtc); |
511 | } | 650 | } |
651 | atombios_overscan_setup(crtc, mode, adjusted_mode); | ||
652 | atombios_scaler_setup(crtc); | ||
653 | radeon_bandwidth_update(rdev); | ||
512 | return 0; | 654 | return 0; |
513 | } | 655 | } |
514 | 656 | ||
@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
516 | struct drm_display_mode *mode, | 658 | struct drm_display_mode *mode, |
517 | struct drm_display_mode *adjusted_mode) | 659 | struct drm_display_mode *adjusted_mode) |
518 | { | 660 | { |
661 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
662 | return false; | ||
519 | return true; | 663 | return true; |
520 | } | 664 | } |
521 | 665 | ||
@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev, | |||
548 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; | 692 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; |
549 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); | 693 | drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); |
550 | } | 694 | } |
551 | |||
552 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
553 | struct drm_display_mode *mode1, | ||
554 | uint32_t pixel_bytes1, | ||
555 | struct drm_display_mode *mode2, | ||
556 | uint32_t pixel_bytes2) | ||
557 | { | ||
558 | struct radeon_device *rdev = dev->dev_private; | ||
559 | fixed20_12 min_mem_eff; | ||
560 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; | ||
561 | fixed20_12 sclk_ff, mclk_ff; | ||
562 | uint32_t dc_lb_memory_split, temp; | ||
563 | |||
564 | min_mem_eff.full = rfixed_const_8(0); | ||
565 | if (rdev->disp_priority == 2) { | ||
566 | uint32_t mc_init_misc_lat_timer = 0; | ||
567 | if (rdev->family == CHIP_RV515) | ||
568 | mc_init_misc_lat_timer = | ||
569 | RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); | ||
570 | else if (rdev->family == CHIP_RS690) | ||
571 | mc_init_misc_lat_timer = | ||
572 | RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); | ||
573 | |||
574 | mc_init_misc_lat_timer &= | ||
575 | ~(R300_MC_DISP1R_INIT_LAT_MASK << | ||
576 | R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
577 | mc_init_misc_lat_timer &= | ||
578 | ~(R300_MC_DISP0R_INIT_LAT_MASK << | ||
579 | R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
580 | |||
581 | if (mode2) | ||
582 | mc_init_misc_lat_timer |= | ||
583 | (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
584 | if (mode1) | ||
585 | mc_init_misc_lat_timer |= | ||
586 | (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
587 | |||
588 | if (rdev->family == CHIP_RV515) | ||
589 | WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, | ||
590 | mc_init_misc_lat_timer); | ||
591 | else if (rdev->family == CHIP_RS690) | ||
592 | WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, | ||
593 | mc_init_misc_lat_timer); | ||
594 | } | ||
595 | |||
596 | /* | ||
597 | * determine is there is enough bw for current mode | ||
598 | */ | ||
599 | temp_ff.full = rfixed_const(100); | ||
600 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
601 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
602 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
603 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
604 | |||
605 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
606 | temp_ff.full = rfixed_const(temp); | ||
607 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
608 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
609 | |||
610 | pix_clk.full = 0; | ||
611 | pix_clk2.full = 0; | ||
612 | peak_disp_bw.full = 0; | ||
613 | if (mode1) { | ||
614 | temp_ff.full = rfixed_const(1000); | ||
615 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
616 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
617 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
618 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
619 | } | ||
620 | if (mode2) { | ||
621 | temp_ff.full = rfixed_const(1000); | ||
622 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
623 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
624 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
625 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
626 | } | ||
627 | |||
628 | if (peak_disp_bw.full >= mem_bw.full) { | ||
629 | DRM_ERROR | ||
630 | ("You may not have enough display bandwidth for current mode\n" | ||
631 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
632 | printk("peak disp bw %d, mem_bw %d\n", | ||
633 | rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Line Buffer Setup | ||
638 | * There is a single line buffer shared by both display controllers. | ||
639 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display | ||
640 | * controllers. The paritioning can either be done manually or via one of four | ||
641 | * preset allocations specified in bits 1:0: | ||
642 | * 0 - line buffer is divided in half and shared between each display controller | ||
643 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
644 | * 2 - D1 gets the whole buffer | ||
645 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
646 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. | ||
647 | * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits | ||
648 | * 14:4; D2 allocation follows D1. | ||
649 | */ | ||
650 | |||
651 | /* is auto or manual better ? */ | ||
652 | dc_lb_memory_split = | ||
653 | RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; | ||
654 | dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
655 | #if 1 | ||
656 | /* auto */ | ||
657 | if (mode1 && mode2) { | ||
658 | if (mode1->hdisplay > mode2->hdisplay) { | ||
659 | if (mode1->hdisplay > 2560) | ||
660 | dc_lb_memory_split |= | ||
661 | AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
662 | else | ||
663 | dc_lb_memory_split |= | ||
664 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
665 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
666 | if (mode2->hdisplay > 2560) | ||
667 | dc_lb_memory_split |= | ||
668 | AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
669 | else | ||
670 | dc_lb_memory_split |= | ||
671 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
672 | } else | ||
673 | dc_lb_memory_split |= | ||
674 | AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
675 | } else if (mode1) { | ||
676 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
677 | } else if (mode2) { | ||
678 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
679 | } | ||
680 | #else | ||
681 | /* manual */ | ||
682 | dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
683 | dc_lb_memory_split &= | ||
684 | ~(AVIVO_DC_LB_DISP1_END_ADR_MASK << | ||
685 | AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
686 | if (mode1) { | ||
687 | dc_lb_memory_split |= | ||
688 | ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) | ||
689 | << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
690 | } else if (mode2) { | ||
691 | dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); | ||
692 | } | ||
693 | #endif | ||
694 | WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); | ||
695 | } | ||
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c550932a108f..05a44896dffb 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 110 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
111 | return -EINVAL; | 111 | return -EINVAL; |
112 | } | 112 | } |
113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); | 113 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
116 | 116 | ||
@@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev) | |||
173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); | 173 | DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); |
174 | } | 174 | } |
175 | /* Write VRAM size in case we are limiting it */ | 175 | /* Write VRAM size in case we are limiting it */ |
176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 176 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
177 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 177 | /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, |
178 | * if the aperture is 64MB but we have 32MB VRAM | ||
179 | * we report only 32MB VRAM but we have to set MC_FB_LOCATION | ||
180 | * to 64MB, otherwise the gpu accidentially dies */ | ||
181 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; | ||
178 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 182 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
179 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 183 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
180 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 184 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev) | |||
215 | r100_pci_gart_disable(rdev); | 219 | r100_pci_gart_disable(rdev); |
216 | 220 | ||
217 | /* Setup GPU memory space */ | 221 | /* Setup GPU memory space */ |
218 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
219 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 222 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
220 | if (rdev->flags & RADEON_IS_AGP) { | 223 | if (rdev->flags & RADEON_IS_AGP) { |
221 | r = radeon_agp_init(rdev); | 224 | r = radeon_agp_init(rdev); |
@@ -753,6 +756,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, | |||
753 | } | 756 | } |
754 | 757 | ||
755 | /** | 758 | /** |
759 | * r100_cs_packet_next_vline() - parse userspace VLINE packet | ||
760 | * @parser: parser structure holding parsing context. | ||
761 | * | ||
762 | * Userspace sends a special sequence for VLINE waits. | ||
763 | * PACKET0 - VLINE_START_END + value | ||
764 | * PACKET0 - WAIT_UNTIL +_value | ||
765 | * RELOC (P3) - crtc_id in reloc. | ||
766 | * | ||
767 | * This function parses this and relocates the VLINE START END | ||
768 | * and WAIT UNTIL packets to the correct crtc. | ||
769 | * It also detects a switched off crtc and nulls out the | ||
770 | * wait in that case. | ||
771 | */ | ||
772 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | ||
773 | { | ||
774 | struct radeon_cs_chunk *ib_chunk; | ||
775 | struct drm_mode_object *obj; | ||
776 | struct drm_crtc *crtc; | ||
777 | struct radeon_crtc *radeon_crtc; | ||
778 | struct radeon_cs_packet p3reloc, waitreloc; | ||
779 | int crtc_id; | ||
780 | int r; | ||
781 | uint32_t header, h_idx, reg; | ||
782 | |||
783 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | ||
784 | |||
785 | /* parse the wait until */ | ||
786 | r = r100_cs_packet_parse(p, &waitreloc, p->idx); | ||
787 | if (r) | ||
788 | return r; | ||
789 | |||
790 | /* check its a wait until and only 1 count */ | ||
791 | if (waitreloc.reg != RADEON_WAIT_UNTIL || | ||
792 | waitreloc.count != 0) { | ||
793 | DRM_ERROR("vline wait had illegal wait until segment\n"); | ||
794 | r = -EINVAL; | ||
795 | return r; | ||
796 | } | ||
797 | |||
798 | if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { | ||
799 | DRM_ERROR("vline wait had illegal wait until\n"); | ||
800 | r = -EINVAL; | ||
801 | return r; | ||
802 | } | ||
803 | |||
804 | /* jump over the NOP */ | ||
805 | r = r100_cs_packet_parse(p, &p3reloc, p->idx); | ||
806 | if (r) | ||
807 | return r; | ||
808 | |||
809 | h_idx = p->idx - 2; | ||
810 | p->idx += waitreloc.count; | ||
811 | p->idx += p3reloc.count; | ||
812 | |||
813 | header = ib_chunk->kdata[h_idx]; | ||
814 | crtc_id = ib_chunk->kdata[h_idx + 5]; | ||
815 | reg = ib_chunk->kdata[h_idx] >> 2; | ||
816 | mutex_lock(&p->rdev->ddev->mode_config.mutex); | ||
817 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | ||
818 | if (!obj) { | ||
819 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | ||
820 | r = -EINVAL; | ||
821 | goto out; | ||
822 | } | ||
823 | crtc = obj_to_crtc(obj); | ||
824 | radeon_crtc = to_radeon_crtc(crtc); | ||
825 | crtc_id = radeon_crtc->crtc_id; | ||
826 | |||
827 | if (!crtc->enabled) { | ||
828 | /* if the CRTC isn't enabled - we need to nop out the wait until */ | ||
829 | ib_chunk->kdata[h_idx + 2] = PACKET2(0); | ||
830 | ib_chunk->kdata[h_idx + 3] = PACKET2(0); | ||
831 | } else if (crtc_id == 1) { | ||
832 | switch (reg) { | ||
833 | case AVIVO_D1MODE_VLINE_START_END: | ||
834 | header &= R300_CP_PACKET0_REG_MASK; | ||
835 | header |= AVIVO_D2MODE_VLINE_START_END >> 2; | ||
836 | break; | ||
837 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
838 | header &= R300_CP_PACKET0_REG_MASK; | ||
839 | header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; | ||
840 | break; | ||
841 | default: | ||
842 | DRM_ERROR("unknown crtc reloc\n"); | ||
843 | r = -EINVAL; | ||
844 | goto out; | ||
845 | } | ||
846 | ib_chunk->kdata[h_idx] = header; | ||
847 | ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | ||
848 | } | ||
849 | out: | ||
850 | mutex_unlock(&p->rdev->ddev->mode_config.mutex); | ||
851 | return r; | ||
852 | } | ||
853 | |||
854 | /** | ||
756 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 | 855 | * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 |
757 | * @parser: parser structure holding parsing context. | 856 | * @parser: parser structure holding parsing context. |
758 | * @data: pointer to relocation data | 857 | * @data: pointer to relocation data |
@@ -814,6 +913,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
814 | unsigned idx; | 913 | unsigned idx; |
815 | bool onereg; | 914 | bool onereg; |
816 | int r; | 915 | int r; |
916 | u32 tile_flags = 0; | ||
817 | 917 | ||
818 | ib = p->ib->ptr; | 918 | ib = p->ib->ptr; |
819 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 919 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
@@ -825,6 +925,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
825 | } | 925 | } |
826 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { | 926 | for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { |
827 | switch (reg) { | 927 | switch (reg) { |
928 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
929 | r = r100_cs_packet_parse_vline(p); | ||
930 | if (r) { | ||
931 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
932 | idx, reg); | ||
933 | r100_cs_dump_packet(p, pkt); | ||
934 | return r; | ||
935 | } | ||
936 | break; | ||
828 | /* FIXME: only allow PACKET3 blit? easier to check for out of | 937 | /* FIXME: only allow PACKET3 blit? easier to check for out of |
829 | * range access */ | 938 | * range access */ |
830 | case RADEON_DST_PITCH_OFFSET: | 939 | case RADEON_DST_PITCH_OFFSET: |
@@ -838,7 +947,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
838 | } | 947 | } |
839 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 948 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
840 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 949 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
841 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 950 | |
951 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
952 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
953 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
954 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
955 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
956 | r100_cs_dump_packet(p, pkt); | ||
957 | return -EINVAL; | ||
958 | } | ||
959 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
960 | } | ||
961 | |||
962 | tmp |= tile_flags; | ||
963 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
842 | break; | 964 | break; |
843 | case RADEON_RB3D_DEPTHOFFSET: | 965 | case RADEON_RB3D_DEPTHOFFSET: |
844 | case RADEON_RB3D_COLOROFFSET: | 966 | case RADEON_RB3D_COLOROFFSET: |
@@ -869,6 +991,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
869 | case R300_TX_OFFSET_0+52: | 991 | case R300_TX_OFFSET_0+52: |
870 | case R300_TX_OFFSET_0+56: | 992 | case R300_TX_OFFSET_0+56: |
871 | case R300_TX_OFFSET_0+60: | 993 | case R300_TX_OFFSET_0+60: |
994 | /* rn50 has no 3D engine so fail on any 3d setup */ | ||
995 | if (ASIC_IS_RN50(p->rdev)) { | ||
996 | DRM_ERROR("attempt to use RN50 3D engine failed\n"); | ||
997 | return -EINVAL; | ||
998 | } | ||
872 | r = r100_cs_packet_next_reloc(p, &reloc); | 999 | r = r100_cs_packet_next_reloc(p, &reloc); |
873 | if (r) { | 1000 | if (r) { |
874 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | 1001 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
@@ -878,6 +1005,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
878 | } | 1005 | } |
879 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); | 1006 | ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
880 | break; | 1007 | break; |
1008 | case R300_RB3D_COLORPITCH0: | ||
1009 | case RADEON_RB3D_COLORPITCH: | ||
1010 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1011 | if (r) { | ||
1012 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1013 | idx, reg); | ||
1014 | r100_cs_dump_packet(p, pkt); | ||
1015 | return r; | ||
1016 | } | ||
1017 | |||
1018 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1019 | tile_flags |= RADEON_COLOR_TILE_ENABLE; | ||
1020 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1021 | tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; | ||
1022 | |||
1023 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1024 | tmp |= tile_flags; | ||
1025 | ib[idx] = tmp; | ||
1026 | break; | ||
881 | default: | 1027 | default: |
882 | /* FIXME: we don't want to allow anyothers packet */ | 1028 | /* FIXME: we don't want to allow anyothers packet */ |
883 | break; | 1029 | break; |
@@ -1256,29 +1402,100 @@ static void r100_vram_get_type(struct radeon_device *rdev) | |||
1256 | } | 1402 | } |
1257 | } | 1403 | } |
1258 | 1404 | ||
1259 | void r100_vram_info(struct radeon_device *rdev) | 1405 | static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
1260 | { | 1406 | { |
1261 | r100_vram_get_type(rdev); | 1407 | u32 aper_size; |
1408 | u8 byte; | ||
1409 | |||
1410 | aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1411 | |||
1412 | /* Set HDP_APER_CNTL only on cards that are known not to be broken, | ||
1413 | * that is has the 2nd generation multifunction PCI interface | ||
1414 | */ | ||
1415 | if (rdev->family == CHIP_RV280 || | ||
1416 | rdev->family >= CHIP_RV350) { | ||
1417 | WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, | ||
1418 | ~RADEON_HDP_APER_CNTL); | ||
1419 | DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); | ||
1420 | return aper_size * 2; | ||
1421 | } | ||
1422 | |||
1423 | /* Older cards have all sorts of funny issues to deal with. First | ||
1424 | * check if it's a multifunction card by reading the PCI config | ||
1425 | * header type... Limit those to one aperture size | ||
1426 | */ | ||
1427 | pci_read_config_byte(rdev->pdev, 0xe, &byte); | ||
1428 | if (byte & 0x80) { | ||
1429 | DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); | ||
1430 | DRM_INFO("Limiting VRAM to one aperture\n"); | ||
1431 | return aper_size; | ||
1432 | } | ||
1433 | |||
1434 | /* Single function older card. We read HDP_APER_CNTL to see how the BIOS | ||
1435 | * have set it up. We don't write this as it's broken on some ASICs but | ||
1436 | * we expect the BIOS to have done the right thing (might be too optimistic...) | ||
1437 | */ | ||
1438 | if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) | ||
1439 | return aper_size * 2; | ||
1440 | return aper_size; | ||
1441 | } | ||
1442 | |||
1443 | void r100_vram_init_sizes(struct radeon_device *rdev) | ||
1444 | { | ||
1445 | u64 config_aper_size; | ||
1446 | u32 accessible; | ||
1447 | |||
1448 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | ||
1262 | 1449 | ||
1263 | if (rdev->flags & RADEON_IS_IGP) { | 1450 | if (rdev->flags & RADEON_IS_IGP) { |
1264 | uint32_t tom; | 1451 | uint32_t tom; |
1265 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 1452 | /* read NB_TOM to get the amount of ram stolen for the GPU */ |
1266 | tom = RREG32(RADEON_NB_TOM); | 1453 | tom = RREG32(RADEON_NB_TOM); |
1267 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | 1454 | rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
1268 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1455 | /* for IGPs we need to keep VRAM where it was put by the BIOS */ |
1456 | rdev->mc.vram_location = (tom & 0xffff) << 16; | ||
1457 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); | ||
1458 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1269 | } else { | 1459 | } else { |
1270 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 1460 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
1271 | /* Some production boards of m6 will report 0 | 1461 | /* Some production boards of m6 will report 0 |
1272 | * if it's 8 MB | 1462 | * if it's 8 MB |
1273 | */ | 1463 | */ |
1274 | if (rdev->mc.vram_size == 0) { | 1464 | if (rdev->mc.real_vram_size == 0) { |
1275 | rdev->mc.vram_size = 8192 * 1024; | 1465 | rdev->mc.real_vram_size = 8192 * 1024; |
1276 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 1466 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
1277 | } | 1467 | } |
1468 | /* let driver place VRAM */ | ||
1469 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
1470 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - | ||
1471 | * Novell bug 204882 + along with lots of ubuntu ones */ | ||
1472 | if (config_aper_size > rdev->mc.real_vram_size) | ||
1473 | rdev->mc.mc_vram_size = config_aper_size; | ||
1474 | else | ||
1475 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
1278 | } | 1476 | } |
1279 | 1477 | ||
1478 | /* work out accessible VRAM */ | ||
1479 | accessible = r100_get_accessible_vram(rdev); | ||
1480 | |||
1280 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 1481 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
1281 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 1482 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
1483 | |||
1484 | if (accessible > rdev->mc.aper_size) | ||
1485 | accessible = rdev->mc.aper_size; | ||
1486 | |||
1487 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
1488 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
1489 | |||
1490 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
1491 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
1492 | } | ||
1493 | |||
1494 | void r100_vram_info(struct radeon_device *rdev) | ||
1495 | { | ||
1496 | r100_vram_get_type(rdev); | ||
1497 | |||
1498 | r100_vram_init_sizes(rdev); | ||
1282 | } | 1499 | } |
1283 | 1500 | ||
1284 | 1501 | ||
@@ -1533,3 +1750,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) | |||
1533 | return 0; | 1750 | return 0; |
1534 | #endif | 1751 | #endif |
1535 | } | 1752 | } |
1753 | |||
1754 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
1755 | uint32_t tiling_flags, uint32_t pitch, | ||
1756 | uint32_t offset, uint32_t obj_size) | ||
1757 | { | ||
1758 | int surf_index = reg * 16; | ||
1759 | int flags = 0; | ||
1760 | |||
1761 | /* r100/r200 divide by 16 */ | ||
1762 | if (rdev->family < CHIP_R300) | ||
1763 | flags = pitch / 16; | ||
1764 | else | ||
1765 | flags = pitch / 8; | ||
1766 | |||
1767 | if (rdev->family <= CHIP_RS200) { | ||
1768 | if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1769 | == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) | ||
1770 | flags |= RADEON_SURF_TILE_COLOR_BOTH; | ||
1771 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1772 | flags |= RADEON_SURF_TILE_COLOR_MACRO; | ||
1773 | } else if (rdev->family <= CHIP_RV280) { | ||
1774 | if (tiling_flags & (RADEON_TILING_MACRO)) | ||
1775 | flags |= R200_SURF_TILE_COLOR_MACRO; | ||
1776 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1777 | flags |= R200_SURF_TILE_COLOR_MICRO; | ||
1778 | } else { | ||
1779 | if (tiling_flags & RADEON_TILING_MACRO) | ||
1780 | flags |= R300_SURF_TILE_MACRO; | ||
1781 | if (tiling_flags & RADEON_TILING_MICRO) | ||
1782 | flags |= R300_SURF_TILE_MICRO; | ||
1783 | } | ||
1784 | |||
1785 | DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); | ||
1786 | WREG32(RADEON_SURFACE0_INFO + surf_index, flags); | ||
1787 | WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); | ||
1788 | WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); | ||
1789 | return 0; | ||
1790 | } | ||
1791 | |||
1792 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg) | ||
1793 | { | ||
1794 | int surf_index = reg * 16; | ||
1795 | WREG32(RADEON_SURFACE0_INFO + surf_index, 0); | ||
1796 | } | ||
1797 | |||
1798 | void r100_bandwidth_update(struct radeon_device *rdev) | ||
1799 | { | ||
1800 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
1801 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
1802 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
1803 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
1804 | fixed20_12 memtcas_ff[8] = { | ||
1805 | fixed_init(1), | ||
1806 | fixed_init(2), | ||
1807 | fixed_init(3), | ||
1808 | fixed_init(0), | ||
1809 | fixed_init_half(1), | ||
1810 | fixed_init_half(2), | ||
1811 | fixed_init(0), | ||
1812 | }; | ||
1813 | fixed20_12 memtcas_rs480_ff[8] = { | ||
1814 | fixed_init(0), | ||
1815 | fixed_init(1), | ||
1816 | fixed_init(2), | ||
1817 | fixed_init(3), | ||
1818 | fixed_init(0), | ||
1819 | fixed_init_half(1), | ||
1820 | fixed_init_half(2), | ||
1821 | fixed_init_half(3), | ||
1822 | }; | ||
1823 | fixed20_12 memtcas2_ff[8] = { | ||
1824 | fixed_init(0), | ||
1825 | fixed_init(1), | ||
1826 | fixed_init(2), | ||
1827 | fixed_init(3), | ||
1828 | fixed_init(4), | ||
1829 | fixed_init(5), | ||
1830 | fixed_init(6), | ||
1831 | fixed_init(7), | ||
1832 | }; | ||
1833 | fixed20_12 memtrbs[8] = { | ||
1834 | fixed_init(1), | ||
1835 | fixed_init_half(1), | ||
1836 | fixed_init(2), | ||
1837 | fixed_init_half(2), | ||
1838 | fixed_init(3), | ||
1839 | fixed_init_half(3), | ||
1840 | fixed_init(4), | ||
1841 | fixed_init_half(4) | ||
1842 | }; | ||
1843 | fixed20_12 memtrbs_r4xx[8] = { | ||
1844 | fixed_init(4), | ||
1845 | fixed_init(5), | ||
1846 | fixed_init(6), | ||
1847 | fixed_init(7), | ||
1848 | fixed_init(8), | ||
1849 | fixed_init(9), | ||
1850 | fixed_init(10), | ||
1851 | fixed_init(11) | ||
1852 | }; | ||
1853 | fixed20_12 min_mem_eff; | ||
1854 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
1855 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
1856 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
1857 | disp_drain_rate2, read_return_rate; | ||
1858 | fixed20_12 time_disp1_drop_priority; | ||
1859 | int c; | ||
1860 | int cur_size = 16; /* in octawords */ | ||
1861 | int critical_point = 0, critical_point2; | ||
1862 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
1863 | int stop_req, max_stop_req; | ||
1864 | struct drm_display_mode *mode1 = NULL; | ||
1865 | struct drm_display_mode *mode2 = NULL; | ||
1866 | uint32_t pixel_bytes1 = 0; | ||
1867 | uint32_t pixel_bytes2 = 0; | ||
1868 | |||
1869 | if (rdev->mode_info.crtcs[0]->base.enabled) { | ||
1870 | mode1 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1871 | pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; | ||
1872 | } | ||
1873 | if (rdev->mode_info.crtcs[1]->base.enabled) { | ||
1874 | mode2 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1875 | pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; | ||
1876 | } | ||
1877 | |||
1878 | min_mem_eff.full = rfixed_const_8(0); | ||
1879 | /* get modes */ | ||
1880 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
1881 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
1882 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1883 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1884 | /* check crtc enables */ | ||
1885 | if (mode2) | ||
1886 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
1887 | if (mode1) | ||
1888 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
1889 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
1890 | } | ||
1891 | |||
1892 | /* | ||
1893 | * determine is there is enough bw for current mode | ||
1894 | */ | ||
1895 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
1896 | temp_ff.full = rfixed_const(100); | ||
1897 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
1898 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
1899 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
1900 | |||
1901 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
1902 | temp_ff.full = rfixed_const(temp); | ||
1903 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
1904 | |||
1905 | pix_clk.full = 0; | ||
1906 | pix_clk2.full = 0; | ||
1907 | peak_disp_bw.full = 0; | ||
1908 | if (mode1) { | ||
1909 | temp_ff.full = rfixed_const(1000); | ||
1910 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
1911 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
1912 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
1913 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
1914 | } | ||
1915 | if (mode2) { | ||
1916 | temp_ff.full = rfixed_const(1000); | ||
1917 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
1918 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
1919 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
1920 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
1921 | } | ||
1922 | |||
1923 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
1924 | if (peak_disp_bw.full >= mem_bw.full) { | ||
1925 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
1926 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
1927 | } | ||
1928 | |||
1929 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
1930 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
1931 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
1932 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
1933 | mem_trp = ((temp & 0x3)) + 1; | ||
1934 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
1935 | } else if (rdev->family == CHIP_R300 || | ||
1936 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
1937 | mem_trcd = (temp & 0x7) + 1; | ||
1938 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1939 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
1940 | } else if (rdev->family == CHIP_RV350 || | ||
1941 | rdev->family <= CHIP_RV380) { | ||
1942 | /* rv3x0 */ | ||
1943 | mem_trcd = (temp & 0x7) + 3; | ||
1944 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
1945 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
1946 | } else if (rdev->family == CHIP_R420 || | ||
1947 | rdev->family == CHIP_R423 || | ||
1948 | rdev->family == CHIP_RV410) { | ||
1949 | /* r4xx */ | ||
1950 | mem_trcd = (temp & 0xf) + 3; | ||
1951 | if (mem_trcd > 15) | ||
1952 | mem_trcd = 15; | ||
1953 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
1954 | if (mem_trp > 15) | ||
1955 | mem_trp = 15; | ||
1956 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
1957 | if (mem_tras > 31) | ||
1958 | mem_tras = 31; | ||
1959 | } else { /* RV200, R200 */ | ||
1960 | mem_trcd = (temp & 0x7) + 1; | ||
1961 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
1962 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
1963 | } | ||
1964 | /* convert to FF */ | ||
1965 | trcd_ff.full = rfixed_const(mem_trcd); | ||
1966 | trp_ff.full = rfixed_const(mem_trp); | ||
1967 | tras_ff.full = rfixed_const(mem_tras); | ||
1968 | |||
1969 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
1970 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
1971 | data = (temp & (7 << 20)) >> 20; | ||
1972 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
1973 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
1974 | tcas_ff = memtcas_rs480_ff[data]; | ||
1975 | else | ||
1976 | tcas_ff = memtcas_ff[data]; | ||
1977 | } else | ||
1978 | tcas_ff = memtcas2_ff[data]; | ||
1979 | |||
1980 | if (rdev->family == CHIP_RS400 || | ||
1981 | rdev->family == CHIP_RS480) { | ||
1982 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
1983 | data = (temp >> 23) & 0x7; | ||
1984 | if (data < 5) | ||
1985 | tcas_ff.full += rfixed_const(data); | ||
1986 | } | ||
1987 | |||
1988 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
1989 | /* on the R300, Tcas is included in Trbs. | ||
1990 | */ | ||
1991 | temp = RREG32(RADEON_MEM_CNTL); | ||
1992 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
1993 | if (data == 1) { | ||
1994 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
1995 | temp = RREG32(R300_MC_IND_INDEX); | ||
1996 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
1997 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
1998 | WREG32(R300_MC_IND_INDEX, temp); | ||
1999 | temp = RREG32(R300_MC_IND_DATA); | ||
2000 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
2001 | } else { | ||
2002 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2003 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2004 | } | ||
2005 | } else { | ||
2006 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
2007 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
2008 | } | ||
2009 | if (rdev->family == CHIP_RV410 || | ||
2010 | rdev->family == CHIP_R420 || | ||
2011 | rdev->family == CHIP_R423) | ||
2012 | trbs_ff = memtrbs_r4xx[data]; | ||
2013 | else | ||
2014 | trbs_ff = memtrbs[data]; | ||
2015 | tcas_ff.full += trbs_ff.full; | ||
2016 | } | ||
2017 | |||
2018 | sclk_eff_ff.full = sclk_ff.full; | ||
2019 | |||
2020 | if (rdev->flags & RADEON_IS_AGP) { | ||
2021 | fixed20_12 agpmode_ff; | ||
2022 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
2023 | temp_ff.full = rfixed_const_666(16); | ||
2024 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
2025 | } | ||
2026 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
2027 | |||
2028 | if (ASIC_IS_R300(rdev)) { | ||
2029 | sclk_delay_ff.full = rfixed_const(250); | ||
2030 | } else { | ||
2031 | if ((rdev->family == CHIP_RV100) || | ||
2032 | rdev->flags & RADEON_IS_IGP) { | ||
2033 | if (rdev->mc.vram_is_ddr) | ||
2034 | sclk_delay_ff.full = rfixed_const(41); | ||
2035 | else | ||
2036 | sclk_delay_ff.full = rfixed_const(33); | ||
2037 | } else { | ||
2038 | if (rdev->mc.vram_width == 128) | ||
2039 | sclk_delay_ff.full = rfixed_const(57); | ||
2040 | else | ||
2041 | sclk_delay_ff.full = rfixed_const(41); | ||
2042 | } | ||
2043 | } | ||
2044 | |||
2045 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
2046 | |||
2047 | if (rdev->mc.vram_is_ddr) { | ||
2048 | if (rdev->mc.vram_width == 32) { | ||
2049 | k1.full = rfixed_const(40); | ||
2050 | c = 3; | ||
2051 | } else { | ||
2052 | k1.full = rfixed_const(20); | ||
2053 | c = 1; | ||
2054 | } | ||
2055 | } else { | ||
2056 | k1.full = rfixed_const(40); | ||
2057 | c = 3; | ||
2058 | } | ||
2059 | |||
2060 | temp_ff.full = rfixed_const(2); | ||
2061 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
2062 | temp_ff.full = rfixed_const(c); | ||
2063 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
2064 | temp_ff.full = rfixed_const(4); | ||
2065 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
2066 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
2067 | mc_latency_mclk.full += k1.full; | ||
2068 | |||
2069 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
2070 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
2071 | |||
2072 | /* | ||
2073 | HW cursor time assuming worst case of full size colour cursor. | ||
2074 | */ | ||
2075 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
2076 | temp_ff.full += trcd_ff.full; | ||
2077 | if (temp_ff.full < tras_ff.full) | ||
2078 | temp_ff.full = tras_ff.full; | ||
2079 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
2080 | |||
2081 | temp_ff.full = rfixed_const(cur_size); | ||
2082 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
2083 | /* | ||
2084 | Find the total latency for the display data. | ||
2085 | */ | ||
2086 | disp_latency_overhead.full = rfixed_const(80); | ||
2087 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
2088 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
2089 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
2090 | |||
2091 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
2092 | disp_latency.full = mc_latency_mclk.full; | ||
2093 | else | ||
2094 | disp_latency.full = mc_latency_sclk.full; | ||
2095 | |||
2096 | /* setup Max GRPH_STOP_REQ default value */ | ||
2097 | if (ASIC_IS_RV100(rdev)) | ||
2098 | max_stop_req = 0x5c; | ||
2099 | else | ||
2100 | max_stop_req = 0x7c; | ||
2101 | |||
2102 | if (mode1) { | ||
2103 | /* CRTC1 | ||
2104 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
2105 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
2106 | */ | ||
2107 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
2108 | |||
2109 | if (stop_req > max_stop_req) | ||
2110 | stop_req = max_stop_req; | ||
2111 | |||
2112 | /* | ||
2113 | Find the drain rate of the display buffer. | ||
2114 | */ | ||
2115 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
2116 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
2117 | |||
2118 | /* | ||
2119 | Find the critical point of the display buffer. | ||
2120 | */ | ||
2121 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
2122 | crit_point_ff.full += rfixed_const_half(0); | ||
2123 | |||
2124 | critical_point = rfixed_trunc(crit_point_ff); | ||
2125 | |||
2126 | if (rdev->disp_priority == 2) { | ||
2127 | critical_point = 0; | ||
2128 | } | ||
2129 | |||
2130 | /* | ||
2131 | The critical point should never be above max_stop_req-4. Setting | ||
2132 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
2133 | */ | ||
2134 | if (max_stop_req - critical_point < 4) | ||
2135 | critical_point = 0; | ||
2136 | |||
2137 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
2138 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
2139 | critical_point = 0x10; | ||
2140 | } | ||
2141 | |||
2142 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
2143 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2144 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2145 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2146 | if ((rdev->family == CHIP_R350) && | ||
2147 | (stop_req > 0x15)) { | ||
2148 | stop_req -= 0x10; | ||
2149 | } | ||
2150 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2151 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
2152 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2153 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2154 | RADEON_GRPH_STOP_CNTL); | ||
2155 | /* | ||
2156 | Write the result into the register. | ||
2157 | */ | ||
2158 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2159 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2160 | |||
2161 | #if 0 | ||
2162 | if ((rdev->family == CHIP_RS400) || | ||
2163 | (rdev->family == CHIP_RS480)) { | ||
2164 | /* attempt to program RS400 disp regs correctly ??? */ | ||
2165 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
2166 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
2167 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
2168 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
2169 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2170 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2171 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
2172 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
2173 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
2174 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
2175 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
2176 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
2177 | } | ||
2178 | #endif | ||
2179 | |||
2180 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
2181 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
2182 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
2183 | } | ||
2184 | |||
2185 | if (mode2) { | ||
2186 | u32 grph2_cntl; | ||
2187 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
2188 | |||
2189 | if (stop_req > max_stop_req) | ||
2190 | stop_req = max_stop_req; | ||
2191 | |||
2192 | /* | ||
2193 | Find the drain rate of the display buffer. | ||
2194 | */ | ||
2195 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
2196 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
2197 | |||
2198 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
2199 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
2200 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
2201 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
2202 | if ((rdev->family == CHIP_R350) && | ||
2203 | (stop_req > 0x15)) { | ||
2204 | stop_req -= 0x10; | ||
2205 | } | ||
2206 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
2207 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
2208 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
2209 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
2210 | RADEON_GRPH_STOP_CNTL); | ||
2211 | |||
2212 | if ((rdev->family == CHIP_RS100) || | ||
2213 | (rdev->family == CHIP_RS200)) | ||
2214 | critical_point2 = 0; | ||
2215 | else { | ||
2216 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
2217 | temp_ff.full = rfixed_const(temp); | ||
2218 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
2219 | if (sclk_ff.full < temp_ff.full) | ||
2220 | temp_ff.full = sclk_ff.full; | ||
2221 | |||
2222 | read_return_rate.full = temp_ff.full; | ||
2223 | |||
2224 | if (mode1) { | ||
2225 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
2226 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
2227 | } else { | ||
2228 | time_disp1_drop_priority.full = 0; | ||
2229 | } | ||
2230 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
2231 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
2232 | crit_point_ff.full += rfixed_const_half(0); | ||
2233 | |||
2234 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
2235 | |||
2236 | if (rdev->disp_priority == 2) { | ||
2237 | critical_point2 = 0; | ||
2238 | } | ||
2239 | |||
2240 | if (max_stop_req - critical_point2 < 4) | ||
2241 | critical_point2 = 0; | ||
2242 | |||
2243 | } | ||
2244 | |||
2245 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
2246 | /* some R300 cards have problem with this set to 0 */ | ||
2247 | critical_point2 = 0x10; | ||
2248 | } | ||
2249 | |||
2250 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
2251 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
2252 | |||
2253 | if ((rdev->family == CHIP_RS400) || | ||
2254 | (rdev->family == CHIP_RS480)) { | ||
2255 | #if 0 | ||
2256 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
2257 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
2258 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
2259 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
2260 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
2261 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
2262 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
2263 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
2264 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
2265 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
2266 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
2267 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
2268 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
2269 | #endif | ||
2270 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
2271 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
2272 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
2273 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
2274 | } | ||
2275 | |||
2276 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
2277 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
2278 | } | ||
2279 | } | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc08170..9c8d41534a5d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include "drm.h" | 30 | #include "drm.h" |
31 | #include "radeon_reg.h" | 31 | #include "radeon_reg.h" |
32 | #include "radeon.h" | 32 | #include "radeon.h" |
33 | #include "radeon_drm.h" | ||
34 | #include "radeon_share.h" | ||
33 | 35 | ||
34 | /* r300,r350,rv350,rv370,rv380 depends on : */ | 36 | /* r300,r350,rv350,rv370,rv380 depends on : */ |
35 | void r100_hdp_reset(struct radeon_device *rdev); | 37 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); | |||
44 | int r100_cs_packet_parse(struct radeon_cs_parser *p, | 46 | int r100_cs_packet_parse(struct radeon_cs_parser *p, |
45 | struct radeon_cs_packet *pkt, | 47 | struct radeon_cs_packet *pkt, |
46 | unsigned idx); | 48 | unsigned idx); |
49 | int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); | ||
47 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, | 50 | int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
48 | struct radeon_cs_reloc **cs_reloc); | 51 | struct radeon_cs_reloc **cs_reloc); |
49 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 52 | int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
150 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 153 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
151 | return -EINVAL; | 154 | return -EINVAL; |
152 | } | 155 | } |
153 | addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; | 156 | addr = (lower_32_bits(addr) >> 8) | |
154 | writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); | 157 | ((upper_32_bits(addr) & 0xff) << 24) | |
158 | 0xc; | ||
159 | /* on x86 we want this to be CPU endian, on powerpc | ||
160 | * on powerpc without HW swappers, it'll get swapped on way | ||
161 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | ||
162 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | ||
155 | return 0; | 163 | return 0; |
156 | } | 164 | } |
157 | 165 | ||
@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev) | |||
579 | } else { | 587 | } else { |
580 | rdev->mc.vram_width = 64; | 588 | rdev->mc.vram_width = 64; |
581 | } | 589 | } |
582 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
583 | 590 | ||
584 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 591 | r100_vram_init_sizes(rdev); |
585 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
586 | } | 592 | } |
587 | 593 | ||
588 | 594 | ||
@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) | |||
970 | 976 | ||
971 | static const unsigned r300_reg_safe_bm[159] = { | 977 | static const unsigned r300_reg_safe_bm[159] = { |
972 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 978 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
973 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | 979 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
974 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 980 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
975 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 981 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
976 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 982 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1019 | struct radeon_cs_reloc *reloc; | 1025 | struct radeon_cs_reloc *reloc; |
1020 | struct r300_cs_track *track; | 1026 | struct r300_cs_track *track; |
1021 | volatile uint32_t *ib; | 1027 | volatile uint32_t *ib; |
1022 | uint32_t tmp; | 1028 | uint32_t tmp, tile_flags = 0; |
1023 | unsigned i; | 1029 | unsigned i; |
1024 | int r; | 1030 | int r; |
1025 | 1031 | ||
@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1027 | ib_chunk = &p->chunks[p->chunk_ib_idx]; | 1033 | ib_chunk = &p->chunks[p->chunk_ib_idx]; |
1028 | track = (struct r300_cs_track*)p->track; | 1034 | track = (struct r300_cs_track*)p->track; |
1029 | switch(reg) { | 1035 | switch(reg) { |
1036 | case AVIVO_D1MODE_VLINE_START_END: | ||
1037 | case RADEON_CRTC_GUI_TRIG_VLINE: | ||
1038 | r = r100_cs_packet_parse_vline(p); | ||
1039 | if (r) { | ||
1040 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1041 | idx, reg); | ||
1042 | r100_cs_dump_packet(p, pkt); | ||
1043 | return r; | ||
1044 | } | ||
1045 | break; | ||
1030 | case RADEON_DST_PITCH_OFFSET: | 1046 | case RADEON_DST_PITCH_OFFSET: |
1031 | case RADEON_SRC_PITCH_OFFSET: | 1047 | case RADEON_SRC_PITCH_OFFSET: |
1032 | r = r100_cs_packet_next_reloc(p, &reloc); | 1048 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1038 | } | 1054 | } |
1039 | tmp = ib_chunk->kdata[idx] & 0x003fffff; | 1055 | tmp = ib_chunk->kdata[idx] & 0x003fffff; |
1040 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); | 1056 | tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
1041 | ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; | 1057 | |
1058 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1059 | tile_flags |= RADEON_DST_TILE_MACRO; | ||
1060 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { | ||
1061 | if (reg == RADEON_SRC_PITCH_OFFSET) { | ||
1062 | DRM_ERROR("Cannot src blit from microtiled surface\n"); | ||
1063 | r100_cs_dump_packet(p, pkt); | ||
1064 | return -EINVAL; | ||
1065 | } | ||
1066 | tile_flags |= RADEON_DST_TILE_MICRO; | ||
1067 | } | ||
1068 | tmp |= tile_flags; | ||
1069 | ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; | ||
1042 | break; | 1070 | break; |
1043 | case R300_RB3D_COLOROFFSET0: | 1071 | case R300_RB3D_COLOROFFSET0: |
1044 | case R300_RB3D_COLOROFFSET1: | 1072 | case R300_RB3D_COLOROFFSET1: |
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1127 | /* RB3D_COLORPITCH1 */ | 1155 | /* RB3D_COLORPITCH1 */ |
1128 | /* RB3D_COLORPITCH2 */ | 1156 | /* RB3D_COLORPITCH2 */ |
1129 | /* RB3D_COLORPITCH3 */ | 1157 | /* RB3D_COLORPITCH3 */ |
1158 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1159 | if (r) { | ||
1160 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1161 | idx, reg); | ||
1162 | r100_cs_dump_packet(p, pkt); | ||
1163 | return r; | ||
1164 | } | ||
1165 | |||
1166 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1167 | tile_flags |= R300_COLOR_TILE_ENABLE; | ||
1168 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1169 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; | ||
1170 | |||
1171 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1172 | tmp |= tile_flags; | ||
1173 | ib[idx] = tmp; | ||
1174 | |||
1130 | i = (reg - 0x4E38) >> 2; | 1175 | i = (reg - 0x4E38) >> 2; |
1131 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; | 1176 | track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; |
1132 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { | 1177 | switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { |
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
1182 | break; | 1227 | break; |
1183 | case 0x4F24: | 1228 | case 0x4F24: |
1184 | /* ZB_DEPTHPITCH */ | 1229 | /* ZB_DEPTHPITCH */ |
1230 | r = r100_cs_packet_next_reloc(p, &reloc); | ||
1231 | if (r) { | ||
1232 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n", | ||
1233 | idx, reg); | ||
1234 | r100_cs_dump_packet(p, pkt); | ||
1235 | return r; | ||
1236 | } | ||
1237 | |||
1238 | if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) | ||
1239 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; | ||
1240 | if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) | ||
1241 | tile_flags |= R300_DEPTHMICROTILE_TILED;; | ||
1242 | |||
1243 | tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); | ||
1244 | tmp |= tile_flags; | ||
1245 | ib[idx] = tmp; | ||
1246 | |||
1185 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; | 1247 | track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; |
1186 | break; | 1248 | break; |
1187 | case 0x4104: | 1249 | case 0x4104: |
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515e..4b7afef35a65 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
@@ -27,7 +27,9 @@ | |||
27 | #ifndef _R300_REG_H_ | 27 | #ifndef _R300_REG_H_ |
28 | #define _R300_REG_H_ | 28 | #define _R300_REG_H_ |
29 | 29 | ||
30 | 30 | #define R300_SURF_TILE_MACRO (1<<16) | |
31 | #define R300_SURF_TILE_MICRO (2<<16) | ||
32 | #define R300_SURF_TILE_BOTH (3<<16) | ||
31 | 33 | ||
32 | 34 | ||
33 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 | 35 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 9070a1c2ce23..036691b38cb7 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -445,6 +445,7 @@ | |||
445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 | 445 | #define AVIVO_D1MODE_DATA_FORMAT 0x6528 |
446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) | 446 | # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) |
447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C | 447 | #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C |
448 | #define AVIVO_D1MODE_VLINE_START_END 0x6538 | ||
448 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 | 449 | #define AVIVO_D1MODE_VIEWPORT_START 0x6580 |
449 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 | 450 | #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 |
450 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 | 451 | #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 |
@@ -496,6 +497,7 @@ | |||
496 | #define AVIVO_D2CUR_SIZE 0x6c10 | 497 | #define AVIVO_D2CUR_SIZE 0x6c10 |
497 | #define AVIVO_D2CUR_POSITION 0x6c14 | 498 | #define AVIVO_D2CUR_POSITION 0x6c14 |
498 | 499 | ||
500 | #define AVIVO_D2MODE_VLINE_START_END 0x6d38 | ||
499 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 | 501 | #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
500 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 | 502 | #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
501 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 | 503 | #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 570a244bd88b..09fb0b6ec7dd 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_share.h" | ||
31 | 32 | ||
32 | /* r520,rv530,rv560,rv570,r580 depends on : */ | 33 | /* r520,rv530,rv560,rv570,r580 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 34 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev) | |||
94 | "programming pipes. Bad things might happen.\n"); | 95 | "programming pipes. Bad things might happen.\n"); |
95 | } | 96 | } |
96 | /* Write VRAM size in case we are limiting it */ | 97 | /* Write VRAM size in case we are limiting it */ |
97 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 98 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
98 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 99 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
99 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); | 100 | tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
100 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); | 101 | tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
101 | WREG32_MC(R520_MC_FB_LOCATION, tmp); | 102 | WREG32_MC(R520_MC_FB_LOCATION, tmp); |
@@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev) | |||
226 | 227 | ||
227 | void r520_vram_info(struct radeon_device *rdev) | 228 | void r520_vram_info(struct radeon_device *rdev) |
228 | { | 229 | { |
230 | fixed20_12 a; | ||
231 | |||
229 | r520_vram_get_type(rdev); | 232 | r520_vram_get_type(rdev); |
230 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
231 | 233 | ||
232 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | r100_vram_init_sizes(rdev); |
233 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | /* FIXME: we should enforce default clock in case GPU is not in |
236 | * default setup | ||
237 | */ | ||
238 | a.full = rfixed_const(100); | ||
239 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
240 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
241 | } | ||
242 | |||
243 | void r520_bandwidth_update(struct radeon_device *rdev) | ||
244 | { | ||
245 | rv515_bandwidth_avivo_update(rdev); | ||
234 | } | 246 | } |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c45559fc97fd..538cd907df69 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R600_MC_VM_FB_LOCATION, tmp); |
@@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev) | |||
140 | void r600_vram_info(struct radeon_device *rdev) | 140 | void r600_vram_info(struct radeon_device *rdev) |
141 | { | 141 | { |
142 | r600_vram_get_type(rdev); | 142 | r600_vram_get_type(rdev); |
143 | rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); | 143 | rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); |
144 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
144 | 145 | ||
145 | /* Could aper size report 0 ? */ | 146 | /* Could aper size report 0 ? */ |
146 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 147 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d61f2fc61df5..b1d945b8ed6c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -64,6 +64,7 @@ extern int radeon_agpmode; | |||
64 | extern int radeon_vram_limit; | 64 | extern int radeon_vram_limit; |
65 | extern int radeon_gart_size; | 65 | extern int radeon_gart_size; |
66 | extern int radeon_benchmarking; | 66 | extern int radeon_benchmarking; |
67 | extern int radeon_testing; | ||
67 | extern int radeon_connector_table; | 68 | extern int radeon_connector_table; |
68 | 69 | ||
69 | /* | 70 | /* |
@@ -113,6 +114,7 @@ enum radeon_family { | |||
113 | CHIP_RV770, | 114 | CHIP_RV770, |
114 | CHIP_RV730, | 115 | CHIP_RV730, |
115 | CHIP_RV710, | 116 | CHIP_RV710, |
117 | CHIP_RS880, | ||
116 | CHIP_LAST, | 118 | CHIP_LAST, |
117 | }; | 119 | }; |
118 | 120 | ||
@@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); | |||
201 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); | 203 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); |
202 | void radeon_fence_unref(struct radeon_fence **fence); | 204 | void radeon_fence_unref(struct radeon_fence **fence); |
203 | 205 | ||
206 | /* | ||
207 | * Tiling registers | ||
208 | */ | ||
209 | struct radeon_surface_reg { | ||
210 | struct radeon_object *robj; | ||
211 | }; | ||
212 | |||
213 | #define RADEON_GEM_MAX_SURFACES 8 | ||
204 | 214 | ||
205 | /* | 215 | /* |
206 | * Radeon buffer. | 216 | * Radeon buffer. |
@@ -213,6 +223,7 @@ struct radeon_object_list { | |||
213 | uint64_t gpu_offset; | 223 | uint64_t gpu_offset; |
214 | unsigned rdomain; | 224 | unsigned rdomain; |
215 | unsigned wdomain; | 225 | unsigned wdomain; |
226 | uint32_t tiling_flags; | ||
216 | }; | 227 | }; |
217 | 228 | ||
218 | int radeon_object_init(struct radeon_device *rdev); | 229 | int radeon_object_init(struct radeon_device *rdev); |
@@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head); | |||
242 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | 253 | int radeon_object_fbdev_mmap(struct radeon_object *robj, |
243 | struct vm_area_struct *vma); | 254 | struct vm_area_struct *vma); |
244 | unsigned long radeon_object_size(struct radeon_object *robj); | 255 | unsigned long radeon_object_size(struct radeon_object *robj); |
245 | 256 | void radeon_object_clear_surface_reg(struct radeon_object *robj); | |
246 | 257 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | |
258 | bool force_drop); | ||
259 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
260 | uint32_t tiling_flags, uint32_t pitch); | ||
261 | void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); | ||
262 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
263 | struct ttm_mem_reg *mem); | ||
264 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
247 | /* | 265 | /* |
248 | * GEM objects. | 266 | * GEM objects. |
249 | */ | 267 | */ |
@@ -315,8 +333,11 @@ struct radeon_mc { | |||
315 | unsigned gtt_location; | 333 | unsigned gtt_location; |
316 | unsigned gtt_size; | 334 | unsigned gtt_size; |
317 | unsigned vram_location; | 335 | unsigned vram_location; |
318 | unsigned vram_size; | 336 | /* for some chips with <= 32MB we need to lie |
337 | * about vram size near mc fb location */ | ||
338 | unsigned mc_vram_size; | ||
319 | unsigned vram_width; | 339 | unsigned vram_width; |
340 | unsigned real_vram_size; | ||
320 | int vram_mtrr; | 341 | int vram_mtrr; |
321 | bool vram_is_ddr; | 342 | bool vram_is_ddr; |
322 | }; | 343 | }; |
@@ -474,6 +495,39 @@ struct radeon_wb { | |||
474 | uint64_t gpu_addr; | 495 | uint64_t gpu_addr; |
475 | }; | 496 | }; |
476 | 497 | ||
498 | /** | ||
499 | * struct radeon_pm - power management datas | ||
500 | * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) | ||
501 | * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) | ||
502 | * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) | ||
503 | * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) | ||
504 | * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) | ||
505 | * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) | ||
506 | * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) | ||
507 | * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) | ||
508 | * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) | ||
509 | * @sclk: GPU clock Mhz (core bandwith depends of this clock) | ||
510 | * @needed_bandwidth: current bandwidth needs | ||
511 | * | ||
512 | * It keeps track of various data needed to take powermanagement decision. | ||
513 | * Bandwith need is used to determine minimun clock of the GPU and memory. | ||
514 | * Equation between gpu/memory clock and available bandwidth is hw dependent | ||
515 | * (type of memory, bus size, efficiency, ...) | ||
516 | */ | ||
517 | struct radeon_pm { | ||
518 | fixed20_12 max_bandwidth; | ||
519 | fixed20_12 igp_sideport_mclk; | ||
520 | fixed20_12 igp_system_mclk; | ||
521 | fixed20_12 igp_ht_link_clk; | ||
522 | fixed20_12 igp_ht_link_width; | ||
523 | fixed20_12 k8_bandwidth; | ||
524 | fixed20_12 sideport_bandwidth; | ||
525 | fixed20_12 ht_bandwidth; | ||
526 | fixed20_12 core_bandwidth; | ||
527 | fixed20_12 sclk; | ||
528 | fixed20_12 needed_bandwidth; | ||
529 | }; | ||
530 | |||
477 | 531 | ||
478 | /* | 532 | /* |
479 | * Benchmarking | 533 | * Benchmarking |
@@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev); | |||
482 | 536 | ||
483 | 537 | ||
484 | /* | 538 | /* |
539 | * Testing | ||
540 | */ | ||
541 | void radeon_test_moves(struct radeon_device *rdev); | ||
542 | |||
543 | |||
544 | /* | ||
485 | * Debugfs | 545 | * Debugfs |
486 | */ | 546 | */ |
487 | int radeon_debugfs_add_files(struct radeon_device *rdev, | 547 | int radeon_debugfs_add_files(struct radeon_device *rdev, |
@@ -535,6 +595,11 @@ struct radeon_asic { | |||
535 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); | 595 | void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); |
536 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); | 596 | void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
537 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); | 597 | void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
598 | int (*set_surface_reg)(struct radeon_device *rdev, int reg, | ||
599 | uint32_t tiling_flags, uint32_t pitch, | ||
600 | uint32_t offset, uint32_t obj_size); | ||
601 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | ||
602 | void (*bandwidth_update)(struct radeon_device *rdev); | ||
538 | }; | 603 | }; |
539 | 604 | ||
540 | union radeon_asic_config { | 605 | union radeon_asic_config { |
@@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
566 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | 631 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
567 | struct drm_file *filp); | 632 | struct drm_file *filp); |
568 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 633 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
634 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
635 | struct drm_file *filp); | ||
636 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
637 | struct drm_file *filp); | ||
569 | 638 | ||
570 | 639 | ||
571 | /* | 640 | /* |
@@ -594,8 +663,8 @@ struct radeon_device { | |||
594 | struct radeon_object *fbdev_robj; | 663 | struct radeon_object *fbdev_robj; |
595 | struct radeon_framebuffer *fbdev_rfb; | 664 | struct radeon_framebuffer *fbdev_rfb; |
596 | /* Register mmio */ | 665 | /* Register mmio */ |
597 | unsigned long rmmio_base; | 666 | resource_size_t rmmio_base; |
598 | unsigned long rmmio_size; | 667 | resource_size_t rmmio_size; |
599 | void *rmmio; | 668 | void *rmmio; |
600 | radeon_rreg_t mm_rreg; | 669 | radeon_rreg_t mm_rreg; |
601 | radeon_wreg_t mm_wreg; | 670 | radeon_wreg_t mm_wreg; |
@@ -619,11 +688,14 @@ struct radeon_device { | |||
619 | struct radeon_irq irq; | 688 | struct radeon_irq irq; |
620 | struct radeon_asic *asic; | 689 | struct radeon_asic *asic; |
621 | struct radeon_gem gem; | 690 | struct radeon_gem gem; |
691 | struct radeon_pm pm; | ||
622 | struct mutex cs_mutex; | 692 | struct mutex cs_mutex; |
623 | struct radeon_wb wb; | 693 | struct radeon_wb wb; |
624 | bool gpu_lockup; | 694 | bool gpu_lockup; |
625 | bool shutdown; | 695 | bool shutdown; |
626 | bool suspend; | 696 | bool suspend; |
697 | bool need_dma32; | ||
698 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | ||
627 | }; | 699 | }; |
628 | 700 | ||
629 | int radeon_device_init(struct radeon_device *rdev, | 701 | int radeon_device_init(struct radeon_device *rdev, |
@@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
670 | /* | 742 | /* |
671 | * ASICs helpers. | 743 | * ASICs helpers. |
672 | */ | 744 | */ |
745 | #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ | ||
746 | (rdev->pdev->device == 0x5969)) | ||
673 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ | 747 | #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ |
674 | (rdev->family == CHIP_RV200) || \ | 748 | (rdev->family == CHIP_RV200) || \ |
675 | (rdev->family == CHIP_RS100) || \ | 749 | (rdev->family == CHIP_RS100) || \ |
@@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
796 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 870 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
797 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 871 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
798 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 872 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
873 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | ||
874 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | ||
875 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | ||
799 | 876 | ||
800 | #endif | 877 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df8..9a75876e0c3b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
71 | uint64_t dst_offset, | 71 | uint64_t dst_offset, |
72 | unsigned num_pages, | 72 | unsigned num_pages, |
73 | struct radeon_fence *fence); | 73 | struct radeon_fence *fence); |
74 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | ||
75 | uint32_t tiling_flags, uint32_t pitch, | ||
76 | uint32_t offset, uint32_t obj_size); | ||
77 | int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | ||
78 | void r100_bandwidth_update(struct radeon_device *rdev); | ||
74 | 79 | ||
75 | static struct radeon_asic r100_asic = { | 80 | static struct radeon_asic r100_asic = { |
76 | .init = &r100_init, | 81 | .init = &r100_init, |
@@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = { | |||
100 | .set_memory_clock = NULL, | 105 | .set_memory_clock = NULL, |
101 | .set_pcie_lanes = NULL, | 106 | .set_pcie_lanes = NULL, |
102 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 107 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
108 | .set_surface_reg = r100_set_surface_reg, | ||
109 | .clear_surface_reg = r100_clear_surface_reg, | ||
110 | .bandwidth_update = &r100_bandwidth_update, | ||
103 | }; | 111 | }; |
104 | 112 | ||
105 | 113 | ||
@@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev, | |||
128 | uint64_t dst_offset, | 136 | uint64_t dst_offset, |
129 | unsigned num_pages, | 137 | unsigned num_pages, |
130 | struct radeon_fence *fence); | 138 | struct radeon_fence *fence); |
139 | |||
131 | static struct radeon_asic r300_asic = { | 140 | static struct radeon_asic r300_asic = { |
132 | .init = &r300_init, | 141 | .init = &r300_init, |
133 | .errata = &r300_errata, | 142 | .errata = &r300_errata, |
@@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = { | |||
156 | .set_memory_clock = NULL, | 165 | .set_memory_clock = NULL, |
157 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 166 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
158 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 167 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
168 | .set_surface_reg = r100_set_surface_reg, | ||
169 | .clear_surface_reg = r100_clear_surface_reg, | ||
170 | .bandwidth_update = &r100_bandwidth_update, | ||
159 | }; | 171 | }; |
160 | 172 | ||
161 | /* | 173 | /* |
@@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = { | |||
193 | .set_memory_clock = &radeon_atom_set_memory_clock, | 205 | .set_memory_clock = &radeon_atom_set_memory_clock, |
194 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 206 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
195 | .set_clock_gating = &radeon_atom_set_clock_gating, | 207 | .set_clock_gating = &radeon_atom_set_clock_gating, |
208 | .set_surface_reg = r100_set_surface_reg, | ||
209 | .clear_surface_reg = r100_clear_surface_reg, | ||
210 | .bandwidth_update = &r100_bandwidth_update, | ||
196 | }; | 211 | }; |
197 | 212 | ||
198 | 213 | ||
@@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = { | |||
237 | .set_memory_clock = NULL, | 252 | .set_memory_clock = NULL, |
238 | .set_pcie_lanes = NULL, | 253 | .set_pcie_lanes = NULL, |
239 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 254 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
255 | .set_surface_reg = r100_set_surface_reg, | ||
256 | .clear_surface_reg = r100_clear_surface_reg, | ||
257 | .bandwidth_update = &r100_bandwidth_update, | ||
240 | }; | 258 | }; |
241 | 259 | ||
242 | 260 | ||
@@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev); | |||
254 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | 272 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
255 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 273 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
256 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 274 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
275 | void rs600_bandwidth_update(struct radeon_device *rdev); | ||
257 | static struct radeon_asic rs600_asic = { | 276 | static struct radeon_asic rs600_asic = { |
258 | .init = &r300_init, | 277 | .init = &r300_init, |
259 | .errata = &rs600_errata, | 278 | .errata = &rs600_errata, |
@@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = { | |||
282 | .set_memory_clock = &radeon_atom_set_memory_clock, | 301 | .set_memory_clock = &radeon_atom_set_memory_clock, |
283 | .set_pcie_lanes = NULL, | 302 | .set_pcie_lanes = NULL, |
284 | .set_clock_gating = &radeon_atom_set_clock_gating, | 303 | .set_clock_gating = &radeon_atom_set_clock_gating, |
304 | .bandwidth_update = &rs600_bandwidth_update, | ||
285 | }; | 305 | }; |
286 | 306 | ||
287 | 307 | ||
@@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev); | |||
294 | void rs690_mc_fini(struct radeon_device *rdev); | 314 | void rs690_mc_fini(struct radeon_device *rdev); |
295 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 315 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
296 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 316 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
317 | void rs690_bandwidth_update(struct radeon_device *rdev); | ||
297 | static struct radeon_asic rs690_asic = { | 318 | static struct radeon_asic rs690_asic = { |
298 | .init = &r300_init, | 319 | .init = &r300_init, |
299 | .errata = &rs690_errata, | 320 | .errata = &rs690_errata, |
@@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = { | |||
322 | .set_memory_clock = &radeon_atom_set_memory_clock, | 343 | .set_memory_clock = &radeon_atom_set_memory_clock, |
323 | .set_pcie_lanes = NULL, | 344 | .set_pcie_lanes = NULL, |
324 | .set_clock_gating = &radeon_atom_set_clock_gating, | 345 | .set_clock_gating = &radeon_atom_set_clock_gating, |
346 | .set_surface_reg = r100_set_surface_reg, | ||
347 | .clear_surface_reg = r100_clear_surface_reg, | ||
348 | .bandwidth_update = &rs690_bandwidth_update, | ||
325 | }; | 349 | }; |
326 | 350 | ||
327 | 351 | ||
@@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | |||
339 | void rv515_ring_start(struct radeon_device *rdev); | 363 | void rv515_ring_start(struct radeon_device *rdev); |
340 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); | 364 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
341 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 365 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
366 | void rv515_bandwidth_update(struct radeon_device *rdev); | ||
342 | static struct radeon_asic rv515_asic = { | 367 | static struct radeon_asic rv515_asic = { |
343 | .init = &rv515_init, | 368 | .init = &rv515_init, |
344 | .errata = &rv515_errata, | 369 | .errata = &rv515_errata, |
@@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = { | |||
367 | .set_memory_clock = &radeon_atom_set_memory_clock, | 392 | .set_memory_clock = &radeon_atom_set_memory_clock, |
368 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 393 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
369 | .set_clock_gating = &radeon_atom_set_clock_gating, | 394 | .set_clock_gating = &radeon_atom_set_clock_gating, |
395 | .set_surface_reg = r100_set_surface_reg, | ||
396 | .clear_surface_reg = r100_clear_surface_reg, | ||
397 | .bandwidth_update = &rv515_bandwidth_update, | ||
370 | }; | 398 | }; |
371 | 399 | ||
372 | 400 | ||
@@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev); | |||
377 | void r520_vram_info(struct radeon_device *rdev); | 405 | void r520_vram_info(struct radeon_device *rdev); |
378 | int r520_mc_init(struct radeon_device *rdev); | 406 | int r520_mc_init(struct radeon_device *rdev); |
379 | void r520_mc_fini(struct radeon_device *rdev); | 407 | void r520_mc_fini(struct radeon_device *rdev); |
408 | void r520_bandwidth_update(struct radeon_device *rdev); | ||
380 | static struct radeon_asic r520_asic = { | 409 | static struct radeon_asic r520_asic = { |
381 | .init = &rv515_init, | 410 | .init = &rv515_init, |
382 | .errata = &r520_errata, | 411 | .errata = &r520_errata, |
@@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = { | |||
405 | .set_memory_clock = &radeon_atom_set_memory_clock, | 434 | .set_memory_clock = &radeon_atom_set_memory_clock, |
406 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 435 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
407 | .set_clock_gating = &radeon_atom_set_clock_gating, | 436 | .set_clock_gating = &radeon_atom_set_clock_gating, |
437 | .set_surface_reg = r100_set_surface_reg, | ||
438 | .clear_surface_reg = r100_clear_surface_reg, | ||
439 | .bandwidth_update = &r520_bandwidth_update, | ||
408 | }; | 440 | }; |
409 | 441 | ||
410 | /* | 442 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f5a1a490984..fcfe5c02d744 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | |||
103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, | 103 | static bool radeon_atom_apply_quirks(struct drm_device *dev, |
104 | uint32_t supported_device, | 104 | uint32_t supported_device, |
105 | int *connector_type, | 105 | int *connector_type, |
106 | struct radeon_i2c_bus_rec *i2c_bus) | 106 | struct radeon_i2c_bus_rec *i2c_bus, |
107 | uint8_t *line_mux) | ||
107 | { | 108 | { |
108 | 109 | ||
109 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 110 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
@@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
127 | if ((dev->pdev->device == 0x5653) && | 128 | if ((dev->pdev->device == 0x5653) && |
128 | (dev->pdev->subsystem_vendor == 0x1462) && | 129 | (dev->pdev->subsystem_vendor == 0x1462) && |
129 | (dev->pdev->subsystem_device == 0x0291)) { | 130 | (dev->pdev->subsystem_device == 0x0291)) { |
130 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) | 131 | if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { |
131 | i2c_bus->valid = false; | 132 | i2c_bus->valid = false; |
133 | *line_mux = 53; | ||
134 | } | ||
132 | } | 135 | } |
133 | 136 | ||
134 | /* Funky macbooks */ | 137 | /* Funky macbooks */ |
@@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
526 | 529 | ||
527 | if (!radeon_atom_apply_quirks | 530 | if (!radeon_atom_apply_quirks |
528 | (dev, (1 << i), &bios_connectors[i].connector_type, | 531 | (dev, (1 << i), &bios_connectors[i].connector_type, |
529 | &bios_connectors[i].ddc_bus)) | 532 | &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) |
530 | continue; | 533 | continue; |
531 | 534 | ||
532 | bios_connectors[i].valid = true; | 535 | bios_connectors[i].valid = true; |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c44403a2ca76..2e938f7496fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
63 | if (r) { | 63 | if (r) { |
64 | goto out_cleanup; | 64 | goto out_cleanup; |
65 | } | 65 | } |
66 | r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); | 66 | r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); |
67 | if (r) { | 67 | if (r) { |
68 | goto out_cleanup; | 68 | goto out_cleanup; |
69 | } | 69 | } |
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
88 | if (r) { | 88 | if (r) { |
89 | goto out_cleanup; | 89 | goto out_cleanup; |
90 | } | 90 | } |
91 | r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); | 91 | r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); |
92 | if (r) { | 92 | if (r) { |
93 | goto out_cleanup; | 93 | goto out_cleanup; |
94 | } | 94 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index b843f9bdfb14..a169067efc4e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
127 | sizeof(struct drm_radeon_cs_chunk))) { | 127 | sizeof(struct drm_radeon_cs_chunk))) { |
128 | return -EFAULT; | 128 | return -EFAULT; |
129 | } | 129 | } |
130 | p->chunks[i].length_dw = user_chunk.length_dw; | ||
131 | p->chunks[i].kdata = NULL; | ||
130 | p->chunks[i].chunk_id = user_chunk.chunk_id; | 132 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
133 | |||
131 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { | 134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { |
132 | p->chunk_relocs_idx = i; | 135 | p->chunk_relocs_idx = i; |
133 | } | 136 | } |
134 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { | 137 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
135 | p->chunk_ib_idx = i; | 138 | p->chunk_ib_idx = i; |
139 | /* zero length IB isn't useful */ | ||
140 | if (p->chunks[i].length_dw == 0) | ||
141 | return -EINVAL; | ||
136 | } | 142 | } |
143 | |||
137 | p->chunks[i].length_dw = user_chunk.length_dw; | 144 | p->chunks[i].length_dw = user_chunk.length_dw; |
138 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; | 145 | cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; |
139 | 146 | ||
140 | p->chunks[i].kdata = NULL; | ||
141 | size = p->chunks[i].length_dw * sizeof(uint32_t); | 147 | size = p->chunks[i].length_dw * sizeof(uint32_t); |
142 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); | 148 | p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); |
143 | if (p->chunks[i].kdata == NULL) { | 149 | if (p->chunks[i].kdata == NULL) { |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5232441f119b..b13c79e38bc0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | |||
111 | 111 | ||
112 | if (ASIC_IS_AVIVO(rdev)) | 112 | if (ASIC_IS_AVIVO(rdev)) |
113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); | 113 | WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); |
114 | else | 114 | else { |
115 | radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; | ||
115 | /* offset is from DISP(2)_BASE_ADDRESS */ | 116 | /* offset is from DISP(2)_BASE_ADDRESS */ |
116 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); | 117 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
118 | } | ||
117 | } | 119 | } |
118 | 120 | ||
119 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, | 121 | int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
@@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
245 | (RADEON_CUR_LOCK | 247 | (RADEON_CUR_LOCK |
246 | | ((xorigin ? 0 : x) << 16) | 248 | | ((xorigin ? 0 : x) << 16) |
247 | | (yorigin ? 0 : y))); | 249 | | (yorigin ? 0 : y))); |
250 | /* offset is from DISP(2)_BASE_ADDRESS */ | ||
251 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | ||
252 | (yorigin * 256))); | ||
248 | } | 253 | } |
249 | radeon_lock_cursor(crtc, false); | 254 | radeon_lock_cursor(crtc, false); |
250 | 255 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f97563db4e59..a162ade74b7f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -48,6 +48,8 @@ static void radeon_surface_init(struct radeon_device *rdev) | |||
48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), | 48 | i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), |
49 | 0); | 49 | 0); |
50 | } | 50 | } |
51 | /* enable surfaces */ | ||
52 | WREG32(RADEON_SURFACE_CNTL, 0); | ||
51 | } | 53 | } |
52 | } | 54 | } |
53 | 55 | ||
@@ -119,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
119 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { | 121 | if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
120 | /* vram location was already setup try to put gtt after | 122 | /* vram location was already setup try to put gtt after |
121 | * if it fits */ | 123 | * if it fits */ |
122 | tmp = rdev->mc.vram_location + rdev->mc.vram_size; | 124 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
123 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | 125 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
124 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { | 126 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
125 | rdev->mc.gtt_location = tmp; | 127 | rdev->mc.gtt_location = tmp; |
@@ -134,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
134 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { | 136 | } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
135 | /* gtt location was already setup try to put vram before | 137 | /* gtt location was already setup try to put vram before |
136 | * if it fits */ | 138 | * if it fits */ |
137 | if (rdev->mc.vram_size < rdev->mc.gtt_location) { | 139 | if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
138 | rdev->mc.vram_location = 0; | 140 | rdev->mc.vram_location = 0; |
139 | } else { | 141 | } else { |
140 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; | 142 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
141 | tmp += (rdev->mc.vram_size - 1); | 143 | tmp += (rdev->mc.mc_vram_size - 1); |
142 | tmp &= ~(rdev->mc.vram_size - 1); | 144 | tmp &= ~(rdev->mc.mc_vram_size - 1); |
143 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { | 145 | if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
144 | rdev->mc.vram_location = tmp; | 146 | rdev->mc.vram_location = tmp; |
145 | } else { | 147 | } else { |
146 | printk(KERN_ERR "[drm] vram too big to fit " | 148 | printk(KERN_ERR "[drm] vram too big to fit " |
@@ -150,12 +152,14 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
150 | } | 152 | } |
151 | } else { | 153 | } else { |
152 | rdev->mc.vram_location = 0; | 154 | rdev->mc.vram_location = 0; |
153 | rdev->mc.gtt_location = rdev->mc.vram_size; | 155 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
154 | } | 156 | } |
155 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); | 157 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); |
156 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 158 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
157 | rdev->mc.vram_location, | 159 | rdev->mc.vram_location, |
158 | rdev->mc.vram_location + rdev->mc.vram_size - 1); | 160 | rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); |
161 | if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) | ||
162 | DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); | ||
159 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); | 163 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); |
160 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", | 164 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
161 | rdev->mc.gtt_location, | 165 | rdev->mc.gtt_location, |
@@ -450,6 +454,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
450 | uint32_t flags) | 454 | uint32_t flags) |
451 | { | 455 | { |
452 | int r, ret; | 456 | int r, ret; |
457 | int dma_bits; | ||
453 | 458 | ||
454 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | 459 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
455 | rdev->shutdown = false; | 460 | rdev->shutdown = false; |
@@ -492,8 +497,20 @@ int radeon_device_init(struct radeon_device *rdev, | |||
492 | return r; | 497 | return r; |
493 | } | 498 | } |
494 | 499 | ||
495 | /* Report DMA addressing limitation */ | 500 | /* set DMA mask + need_dma32 flags. |
496 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); | 501 | * PCIE - can handle 40-bits. |
502 | * IGP - can handle 40-bits (in theory) | ||
503 | * AGP - generally dma32 is safest | ||
504 | * PCI - only dma32 | ||
505 | */ | ||
506 | rdev->need_dma32 = false; | ||
507 | if (rdev->flags & RADEON_IS_AGP) | ||
508 | rdev->need_dma32 = true; | ||
509 | if (rdev->flags & RADEON_IS_PCI) | ||
510 | rdev->need_dma32 = true; | ||
511 | |||
512 | dma_bits = rdev->need_dma32 ? 32 : 40; | ||
513 | r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); | ||
497 | if (r) { | 514 | if (r) { |
498 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); | 515 | printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
499 | } | 516 | } |
@@ -546,27 +563,22 @@ int radeon_device_init(struct radeon_device *rdev, | |||
546 | radeon_combios_asic_init(rdev->ddev); | 563 | radeon_combios_asic_init(rdev->ddev); |
547 | } | 564 | } |
548 | } | 565 | } |
566 | /* Initialize clocks */ | ||
567 | r = radeon_clocks_init(rdev); | ||
568 | if (r) { | ||
569 | return r; | ||
570 | } | ||
549 | /* Get vram informations */ | 571 | /* Get vram informations */ |
550 | radeon_vram_info(rdev); | 572 | radeon_vram_info(rdev); |
551 | /* Device is severly broken if aper size > vram size. | 573 | |
552 | * for RN50/M6/M7 - Novell bug 204882 ? | ||
553 | */ | ||
554 | if (rdev->mc.vram_size < rdev->mc.aper_size) { | ||
555 | rdev->mc.aper_size = rdev->mc.vram_size; | ||
556 | } | ||
557 | /* Add an MTRR for the VRAM */ | 574 | /* Add an MTRR for the VRAM */ |
558 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 575 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
559 | MTRR_TYPE_WRCOMB, 1); | 576 | MTRR_TYPE_WRCOMB, 1); |
560 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | 577 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", |
561 | rdev->mc.vram_size >> 20, | 578 | rdev->mc.real_vram_size >> 20, |
562 | (unsigned)rdev->mc.aper_size >> 20); | 579 | (unsigned)rdev->mc.aper_size >> 20); |
563 | DRM_INFO("RAM width %dbits %cDR\n", | 580 | DRM_INFO("RAM width %dbits %cDR\n", |
564 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | 581 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
565 | /* Initialize clocks */ | ||
566 | r = radeon_clocks_init(rdev); | ||
567 | if (r) { | ||
568 | return r; | ||
569 | } | ||
570 | /* Initialize memory controller (also test AGP) */ | 582 | /* Initialize memory controller (also test AGP) */ |
571 | r = radeon_mc_init(rdev); | 583 | r = radeon_mc_init(rdev); |
572 | if (r) { | 584 | if (r) { |
@@ -626,6 +638,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
626 | if (!ret) { | 638 | if (!ret) { |
627 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); | 639 | DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
628 | } | 640 | } |
641 | if (radeon_testing) { | ||
642 | radeon_test_moves(rdev); | ||
643 | } | ||
629 | if (radeon_benchmarking) { | 644 | if (radeon_benchmarking) { |
630 | radeon_benchmark(rdev); | 645 | radeon_benchmark(rdev); |
631 | } | 646 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3efcf1a526be..a8fa1bb84cf7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) | |||
187 | 187 | ||
188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); | 188 | drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); |
189 | radeon_crtc->crtc_id = index; | 189 | radeon_crtc->crtc_id = index; |
190 | rdev->mode_info.crtcs[index] = radeon_crtc; | ||
190 | 191 | ||
191 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; | 192 | radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
192 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); | 193 | radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
@@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll, | |||
491 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; | 492 | tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
492 | current_freq = radeon_div(tmp, ref_div * post_div); | 493 | current_freq = radeon_div(tmp, ref_div * post_div); |
493 | 494 | ||
494 | error = abs(current_freq - freq); | 495 | if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
496 | error = freq - current_freq; | ||
497 | error = error < 0 ? 0xffffffff : error; | ||
498 | } else | ||
499 | error = abs(current_freq - freq); | ||
495 | vco_diff = abs(vco - best_vco); | 500 | vco_diff = abs(vco - best_vco); |
496 | 501 | ||
497 | if ((best_vco == 0 && error < best_error) || | 502 | if ((best_vco == 0 && error < best_error) || |
@@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
657 | } | 662 | } |
658 | } | 663 | } |
659 | 664 | ||
660 | void radeon_init_disp_bandwidth(struct drm_device *dev) | 665 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
666 | struct drm_display_mode *mode, | ||
667 | struct drm_display_mode *adjusted_mode) | ||
661 | { | 668 | { |
662 | struct radeon_device *rdev = dev->dev_private; | 669 | struct drm_device *dev = crtc->dev; |
663 | struct drm_display_mode *modes[2]; | 670 | struct drm_encoder *encoder; |
664 | int pixel_bytes[2]; | 671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
665 | struct drm_crtc *crtc; | 672 | struct radeon_encoder *radeon_encoder; |
666 | 673 | bool first = true; | |
667 | pixel_bytes[0] = pixel_bytes[1] = 0; | ||
668 | modes[0] = modes[1] = NULL; | ||
669 | |||
670 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
671 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
672 | 674 | ||
673 | if (crtc->enabled && crtc->fb) { | 675 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
674 | modes[radeon_crtc->crtc_id] = &crtc->mode; | 676 | radeon_encoder = to_radeon_encoder(encoder); |
675 | pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; | 677 | if (encoder->crtc != crtc) |
678 | continue; | ||
679 | if (first) { | ||
680 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | ||
681 | radeon_crtc->devices = radeon_encoder->devices; | ||
682 | memcpy(&radeon_crtc->native_mode, | ||
683 | &radeon_encoder->native_mode, | ||
684 | sizeof(struct radeon_native_mode)); | ||
685 | first = false; | ||
686 | } else { | ||
687 | if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { | ||
688 | /* WARNING: Right now this can't happen but | ||
689 | * in the future we need to check that scaling | ||
690 | * are consistent accross different encoder | ||
691 | * (ie all encoder can work with the same | ||
692 | * scaling). | ||
693 | */ | ||
694 | DRM_ERROR("Scaling not consistent accross encoder.\n"); | ||
695 | return false; | ||
696 | } | ||
676 | } | 697 | } |
677 | } | 698 | } |
678 | 699 | if (radeon_crtc->rmx_type != RMX_OFF) { | |
679 | if (ASIC_IS_AVIVO(rdev)) { | 700 | fixed20_12 a, b; |
680 | radeon_init_disp_bw_avivo(dev, | 701 | a.full = rfixed_const(crtc->mode.vdisplay); |
681 | modes[0], | 702 | b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); |
682 | pixel_bytes[0], | 703 | radeon_crtc->vsc.full = rfixed_div(a, b); |
683 | modes[1], | 704 | a.full = rfixed_const(crtc->mode.hdisplay); |
684 | pixel_bytes[1]); | 705 | b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); |
706 | radeon_crtc->hsc.full = rfixed_div(a, b); | ||
685 | } else { | 707 | } else { |
686 | radeon_init_disp_bw_legacy(dev, | 708 | radeon_crtc->vsc.full = rfixed_const(1); |
687 | modes[0], | 709 | radeon_crtc->hsc.full = rfixed_const(1); |
688 | pixel_bytes[0], | ||
689 | modes[1], | ||
690 | pixel_bytes[1]); | ||
691 | } | 710 | } |
711 | return true; | ||
692 | } | 712 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84ba69f48784..3cfcee17dc56 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -89,6 +89,7 @@ int radeon_agpmode = 0; | |||
89 | int radeon_vram_limit = 0; | 89 | int radeon_vram_limit = 0; |
90 | int radeon_gart_size = 512; /* default gart size */ | 90 | int radeon_gart_size = 512; /* default gart size */ |
91 | int radeon_benchmarking = 0; | 91 | int radeon_benchmarking = 0; |
92 | int radeon_testing = 0; | ||
92 | int radeon_connector_table = 0; | 93 | int radeon_connector_table = 0; |
93 | #endif | 94 | #endif |
94 | 95 | ||
@@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600); | |||
117 | MODULE_PARM_DESC(benchmark, "Run benchmark"); | 118 | MODULE_PARM_DESC(benchmark, "Run benchmark"); |
118 | module_param_named(benchmark, radeon_benchmarking, int, 0444); | 119 | module_param_named(benchmark, radeon_benchmarking, int, 0444); |
119 | 120 | ||
121 | MODULE_PARM_DESC(test, "Run tests"); | ||
122 | module_param_named(test, radeon_testing, int, 0444); | ||
123 | |||
120 | MODULE_PARM_DESC(connector_table, "Force connector table"); | 124 | MODULE_PARM_DESC(connector_table, "Force connector table"); |
121 | module_param_named(connector_table, radeon_connector_table, int, 0444); | 125 | module_param_named(connector_table, radeon_connector_table, int, 0444); |
122 | #endif | 126 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c8ef0d14ffab..0a92706eac19 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
154 | 154 | ||
155 | if (mode->hdisplay < native_mode->panel_xres || | 155 | if (mode->hdisplay < native_mode->panel_xres || |
156 | mode->vdisplay < native_mode->panel_yres) { | 156 | mode->vdisplay < native_mode->panel_yres) { |
157 | radeon_encoder->flags |= RADEON_USE_RMX; | ||
158 | if (ASIC_IS_AVIVO(rdev)) { | 157 | if (ASIC_IS_AVIVO(rdev)) { |
159 | adjusted_mode->hdisplay = native_mode->panel_xres; | 158 | adjusted_mode->hdisplay = native_mode->panel_xres; |
160 | adjusted_mode->vdisplay = native_mode->panel_yres; | 159 | adjusted_mode->vdisplay = native_mode->panel_yres; |
@@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | |||
197 | } | 196 | } |
198 | } | 197 | } |
199 | 198 | ||
199 | |||
200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | 200 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
201 | struct drm_display_mode *mode, | 201 | struct drm_display_mode *mode, |
202 | struct drm_display_mode *adjusted_mode) | 202 | struct drm_display_mode *adjusted_mode) |
203 | { | 203 | { |
204 | |||
205 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 204 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
206 | 205 | ||
207 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
208 | |||
209 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 206 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
210 | 207 | ||
211 | if (radeon_encoder->rmx_type != RMX_OFF) | 208 | if (radeon_encoder->rmx_type != RMX_OFF) |
@@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
808 | 805 | ||
809 | } | 806 | } |
810 | 807 | ||
811 | static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
812 | { | ||
813 | |||
814 | WREG32(0x659C, 0x0); | ||
815 | WREG32(0x6594, 0x705); | ||
816 | WREG32(0x65A4, 0x10001); | ||
817 | WREG32(0x65D8, 0x0); | ||
818 | WREG32(0x65B0, 0x0); | ||
819 | WREG32(0x65C0, 0x0); | ||
820 | WREG32(0x65D4, 0x0); | ||
821 | WREG32(0x6578, 0x0); | ||
822 | WREG32(0x657C, 0x841880A8); | ||
823 | WREG32(0x6578, 0x1); | ||
824 | WREG32(0x657C, 0x84208680); | ||
825 | WREG32(0x6578, 0x2); | ||
826 | WREG32(0x657C, 0xBFF880B0); | ||
827 | WREG32(0x6578, 0x100); | ||
828 | WREG32(0x657C, 0x83D88088); | ||
829 | WREG32(0x6578, 0x101); | ||
830 | WREG32(0x657C, 0x84608680); | ||
831 | WREG32(0x6578, 0x102); | ||
832 | WREG32(0x657C, 0xBFF080D0); | ||
833 | WREG32(0x6578, 0x200); | ||
834 | WREG32(0x657C, 0x83988068); | ||
835 | WREG32(0x6578, 0x201); | ||
836 | WREG32(0x657C, 0x84A08680); | ||
837 | WREG32(0x6578, 0x202); | ||
838 | WREG32(0x657C, 0xBFF080F8); | ||
839 | WREG32(0x6578, 0x300); | ||
840 | WREG32(0x657C, 0x83588058); | ||
841 | WREG32(0x6578, 0x301); | ||
842 | WREG32(0x657C, 0x84E08660); | ||
843 | WREG32(0x6578, 0x302); | ||
844 | WREG32(0x657C, 0xBFF88120); | ||
845 | WREG32(0x6578, 0x400); | ||
846 | WREG32(0x657C, 0x83188040); | ||
847 | WREG32(0x6578, 0x401); | ||
848 | WREG32(0x657C, 0x85008660); | ||
849 | WREG32(0x6578, 0x402); | ||
850 | WREG32(0x657C, 0xBFF88150); | ||
851 | WREG32(0x6578, 0x500); | ||
852 | WREG32(0x657C, 0x82D88030); | ||
853 | WREG32(0x6578, 0x501); | ||
854 | WREG32(0x657C, 0x85408640); | ||
855 | WREG32(0x6578, 0x502); | ||
856 | WREG32(0x657C, 0xBFF88180); | ||
857 | WREG32(0x6578, 0x600); | ||
858 | WREG32(0x657C, 0x82A08018); | ||
859 | WREG32(0x6578, 0x601); | ||
860 | WREG32(0x657C, 0x85808620); | ||
861 | WREG32(0x6578, 0x602); | ||
862 | WREG32(0x657C, 0xBFF081B8); | ||
863 | WREG32(0x6578, 0x700); | ||
864 | WREG32(0x657C, 0x82608010); | ||
865 | WREG32(0x6578, 0x701); | ||
866 | WREG32(0x657C, 0x85A08600); | ||
867 | WREG32(0x6578, 0x702); | ||
868 | WREG32(0x657C, 0x800081F0); | ||
869 | WREG32(0x6578, 0x800); | ||
870 | WREG32(0x657C, 0x8228BFF8); | ||
871 | WREG32(0x6578, 0x801); | ||
872 | WREG32(0x657C, 0x85E085E0); | ||
873 | WREG32(0x6578, 0x802); | ||
874 | WREG32(0x657C, 0xBFF88228); | ||
875 | WREG32(0x6578, 0x10000); | ||
876 | WREG32(0x657C, 0x82A8BF00); | ||
877 | WREG32(0x6578, 0x10001); | ||
878 | WREG32(0x657C, 0x82A08CC0); | ||
879 | WREG32(0x6578, 0x10002); | ||
880 | WREG32(0x657C, 0x8008BEF8); | ||
881 | WREG32(0x6578, 0x10100); | ||
882 | WREG32(0x657C, 0x81F0BF28); | ||
883 | WREG32(0x6578, 0x10101); | ||
884 | WREG32(0x657C, 0x83608CA0); | ||
885 | WREG32(0x6578, 0x10102); | ||
886 | WREG32(0x657C, 0x8018BED0); | ||
887 | WREG32(0x6578, 0x10200); | ||
888 | WREG32(0x657C, 0x8148BF38); | ||
889 | WREG32(0x6578, 0x10201); | ||
890 | WREG32(0x657C, 0x84408C80); | ||
891 | WREG32(0x6578, 0x10202); | ||
892 | WREG32(0x657C, 0x8008BEB8); | ||
893 | WREG32(0x6578, 0x10300); | ||
894 | WREG32(0x657C, 0x80B0BF78); | ||
895 | WREG32(0x6578, 0x10301); | ||
896 | WREG32(0x657C, 0x85008C20); | ||
897 | WREG32(0x6578, 0x10302); | ||
898 | WREG32(0x657C, 0x8020BEA0); | ||
899 | WREG32(0x6578, 0x10400); | ||
900 | WREG32(0x657C, 0x8028BF90); | ||
901 | WREG32(0x6578, 0x10401); | ||
902 | WREG32(0x657C, 0x85E08BC0); | ||
903 | WREG32(0x6578, 0x10402); | ||
904 | WREG32(0x657C, 0x8018BE90); | ||
905 | WREG32(0x6578, 0x10500); | ||
906 | WREG32(0x657C, 0xBFB8BFB0); | ||
907 | WREG32(0x6578, 0x10501); | ||
908 | WREG32(0x657C, 0x86C08B40); | ||
909 | WREG32(0x6578, 0x10502); | ||
910 | WREG32(0x657C, 0x8010BE90); | ||
911 | WREG32(0x6578, 0x10600); | ||
912 | WREG32(0x657C, 0xBF58BFC8); | ||
913 | WREG32(0x6578, 0x10601); | ||
914 | WREG32(0x657C, 0x87A08AA0); | ||
915 | WREG32(0x6578, 0x10602); | ||
916 | WREG32(0x657C, 0x8010BE98); | ||
917 | WREG32(0x6578, 0x10700); | ||
918 | WREG32(0x657C, 0xBF10BFF0); | ||
919 | WREG32(0x6578, 0x10701); | ||
920 | WREG32(0x657C, 0x886089E0); | ||
921 | WREG32(0x6578, 0x10702); | ||
922 | WREG32(0x657C, 0x8018BEB0); | ||
923 | WREG32(0x6578, 0x10800); | ||
924 | WREG32(0x657C, 0xBED8BFE8); | ||
925 | WREG32(0x6578, 0x10801); | ||
926 | WREG32(0x657C, 0x89408940); | ||
927 | WREG32(0x6578, 0x10802); | ||
928 | WREG32(0x657C, 0xBFE8BED8); | ||
929 | WREG32(0x6578, 0x20000); | ||
930 | WREG32(0x657C, 0x80008000); | ||
931 | WREG32(0x6578, 0x20001); | ||
932 | WREG32(0x657C, 0x90008000); | ||
933 | WREG32(0x6578, 0x20002); | ||
934 | WREG32(0x657C, 0x80008000); | ||
935 | WREG32(0x6578, 0x20003); | ||
936 | WREG32(0x657C, 0x80008000); | ||
937 | WREG32(0x6578, 0x20100); | ||
938 | WREG32(0x657C, 0x80108000); | ||
939 | WREG32(0x6578, 0x20101); | ||
940 | WREG32(0x657C, 0x8FE0BF70); | ||
941 | WREG32(0x6578, 0x20102); | ||
942 | WREG32(0x657C, 0xBFE880C0); | ||
943 | WREG32(0x6578, 0x20103); | ||
944 | WREG32(0x657C, 0x80008000); | ||
945 | WREG32(0x6578, 0x20200); | ||
946 | WREG32(0x657C, 0x8018BFF8); | ||
947 | WREG32(0x6578, 0x20201); | ||
948 | WREG32(0x657C, 0x8F80BF08); | ||
949 | WREG32(0x6578, 0x20202); | ||
950 | WREG32(0x657C, 0xBFD081A0); | ||
951 | WREG32(0x6578, 0x20203); | ||
952 | WREG32(0x657C, 0xBFF88000); | ||
953 | WREG32(0x6578, 0x20300); | ||
954 | WREG32(0x657C, 0x80188000); | ||
955 | WREG32(0x6578, 0x20301); | ||
956 | WREG32(0x657C, 0x8EE0BEC0); | ||
957 | WREG32(0x6578, 0x20302); | ||
958 | WREG32(0x657C, 0xBFB082A0); | ||
959 | WREG32(0x6578, 0x20303); | ||
960 | WREG32(0x657C, 0x80008000); | ||
961 | WREG32(0x6578, 0x20400); | ||
962 | WREG32(0x657C, 0x80188000); | ||
963 | WREG32(0x6578, 0x20401); | ||
964 | WREG32(0x657C, 0x8E00BEA0); | ||
965 | WREG32(0x6578, 0x20402); | ||
966 | WREG32(0x657C, 0xBF8883C0); | ||
967 | WREG32(0x6578, 0x20403); | ||
968 | WREG32(0x657C, 0x80008000); | ||
969 | WREG32(0x6578, 0x20500); | ||
970 | WREG32(0x657C, 0x80188000); | ||
971 | WREG32(0x6578, 0x20501); | ||
972 | WREG32(0x657C, 0x8D00BE90); | ||
973 | WREG32(0x6578, 0x20502); | ||
974 | WREG32(0x657C, 0xBF588500); | ||
975 | WREG32(0x6578, 0x20503); | ||
976 | WREG32(0x657C, 0x80008008); | ||
977 | WREG32(0x6578, 0x20600); | ||
978 | WREG32(0x657C, 0x80188000); | ||
979 | WREG32(0x6578, 0x20601); | ||
980 | WREG32(0x657C, 0x8BC0BE98); | ||
981 | WREG32(0x6578, 0x20602); | ||
982 | WREG32(0x657C, 0xBF308660); | ||
983 | WREG32(0x6578, 0x20603); | ||
984 | WREG32(0x657C, 0x80008008); | ||
985 | WREG32(0x6578, 0x20700); | ||
986 | WREG32(0x657C, 0x80108000); | ||
987 | WREG32(0x6578, 0x20701); | ||
988 | WREG32(0x657C, 0x8A80BEB0); | ||
989 | WREG32(0x6578, 0x20702); | ||
990 | WREG32(0x657C, 0xBF0087C0); | ||
991 | WREG32(0x6578, 0x20703); | ||
992 | WREG32(0x657C, 0x80008008); | ||
993 | WREG32(0x6578, 0x20800); | ||
994 | WREG32(0x657C, 0x80108000); | ||
995 | WREG32(0x6578, 0x20801); | ||
996 | WREG32(0x657C, 0x8920BED0); | ||
997 | WREG32(0x6578, 0x20802); | ||
998 | WREG32(0x657C, 0xBED08920); | ||
999 | WREG32(0x6578, 0x20803); | ||
1000 | WREG32(0x657C, 0x80008010); | ||
1001 | WREG32(0x6578, 0x30000); | ||
1002 | WREG32(0x657C, 0x90008000); | ||
1003 | WREG32(0x6578, 0x30001); | ||
1004 | WREG32(0x657C, 0x80008000); | ||
1005 | WREG32(0x6578, 0x30100); | ||
1006 | WREG32(0x657C, 0x8FE0BF90); | ||
1007 | WREG32(0x6578, 0x30101); | ||
1008 | WREG32(0x657C, 0xBFF880A0); | ||
1009 | WREG32(0x6578, 0x30200); | ||
1010 | WREG32(0x657C, 0x8F60BF40); | ||
1011 | WREG32(0x6578, 0x30201); | ||
1012 | WREG32(0x657C, 0xBFE88180); | ||
1013 | WREG32(0x6578, 0x30300); | ||
1014 | WREG32(0x657C, 0x8EC0BF00); | ||
1015 | WREG32(0x6578, 0x30301); | ||
1016 | WREG32(0x657C, 0xBFC88280); | ||
1017 | WREG32(0x6578, 0x30400); | ||
1018 | WREG32(0x657C, 0x8DE0BEE0); | ||
1019 | WREG32(0x6578, 0x30401); | ||
1020 | WREG32(0x657C, 0xBFA083A0); | ||
1021 | WREG32(0x6578, 0x30500); | ||
1022 | WREG32(0x657C, 0x8CE0BED0); | ||
1023 | WREG32(0x6578, 0x30501); | ||
1024 | WREG32(0x657C, 0xBF7884E0); | ||
1025 | WREG32(0x6578, 0x30600); | ||
1026 | WREG32(0x657C, 0x8BA0BED8); | ||
1027 | WREG32(0x6578, 0x30601); | ||
1028 | WREG32(0x657C, 0xBF508640); | ||
1029 | WREG32(0x6578, 0x30700); | ||
1030 | WREG32(0x657C, 0x8A60BEE8); | ||
1031 | WREG32(0x6578, 0x30701); | ||
1032 | WREG32(0x657C, 0xBF2087A0); | ||
1033 | WREG32(0x6578, 0x30800); | ||
1034 | WREG32(0x657C, 0x8900BF00); | ||
1035 | WREG32(0x6578, 0x30801); | ||
1036 | WREG32(0x657C, 0xBF008900); | ||
1037 | } | ||
1038 | |||
1039 | static void | 808 | static void |
1040 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | 809 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
1041 | { | 810 | { |
@@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | |||
1074 | } | 843 | } |
1075 | 844 | ||
1076 | static void | 845 | static void |
1077 | atombios_overscan_setup(struct drm_encoder *encoder, | ||
1078 | struct drm_display_mode *mode, | ||
1079 | struct drm_display_mode *adjusted_mode) | ||
1080 | { | ||
1081 | struct drm_device *dev = encoder->dev; | ||
1082 | struct radeon_device *rdev = dev->dev_private; | ||
1083 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1084 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1085 | SET_CRTC_OVERSCAN_PS_ALLOCATION args; | ||
1086 | int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); | ||
1087 | |||
1088 | memset(&args, 0, sizeof(args)); | ||
1089 | |||
1090 | args.usOverscanRight = 0; | ||
1091 | args.usOverscanLeft = 0; | ||
1092 | args.usOverscanBottom = 0; | ||
1093 | args.usOverscanTop = 0; | ||
1094 | args.ucCRTC = radeon_crtc->crtc_id; | ||
1095 | |||
1096 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1097 | if (radeon_encoder->rmx_type == RMX_FULL) { | ||
1098 | args.usOverscanRight = 0; | ||
1099 | args.usOverscanLeft = 0; | ||
1100 | args.usOverscanBottom = 0; | ||
1101 | args.usOverscanTop = 0; | ||
1102 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
1103 | args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1104 | args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; | ||
1105 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1106 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; | ||
1107 | } else if (radeon_encoder->rmx_type == RMX_ASPECT) { | ||
1108 | int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; | ||
1109 | int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; | ||
1110 | |||
1111 | if (a1 > a2) { | ||
1112 | args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1113 | args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; | ||
1114 | } else if (a2 > a1) { | ||
1115 | args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1116 | args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; | ||
1117 | } | ||
1118 | } | ||
1119 | } | ||
1120 | |||
1121 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1122 | |||
1123 | } | ||
1124 | |||
1125 | static void | ||
1126 | atombios_scaler_setup(struct drm_encoder *encoder) | ||
1127 | { | ||
1128 | struct drm_device *dev = encoder->dev; | ||
1129 | struct radeon_device *rdev = dev->dev_private; | ||
1130 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1131 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1132 | ENABLE_SCALER_PS_ALLOCATION args; | ||
1133 | int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); | ||
1134 | /* fixme - fill in enc_priv for atom dac */ | ||
1135 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
1136 | |||
1137 | if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) | ||
1138 | return; | ||
1139 | |||
1140 | memset(&args, 0, sizeof(args)); | ||
1141 | |||
1142 | args.ucScaler = radeon_crtc->crtc_id; | ||
1143 | |||
1144 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { | ||
1145 | switch (tv_std) { | ||
1146 | case TV_STD_NTSC: | ||
1147 | default: | ||
1148 | args.ucTVStandard = ATOM_TV_NTSC; | ||
1149 | break; | ||
1150 | case TV_STD_PAL: | ||
1151 | args.ucTVStandard = ATOM_TV_PAL; | ||
1152 | break; | ||
1153 | case TV_STD_PAL_M: | ||
1154 | args.ucTVStandard = ATOM_TV_PALM; | ||
1155 | break; | ||
1156 | case TV_STD_PAL_60: | ||
1157 | args.ucTVStandard = ATOM_TV_PAL60; | ||
1158 | break; | ||
1159 | case TV_STD_NTSC_J: | ||
1160 | args.ucTVStandard = ATOM_TV_NTSCJ; | ||
1161 | break; | ||
1162 | case TV_STD_SCART_PAL: | ||
1163 | args.ucTVStandard = ATOM_TV_PAL; /* ??? */ | ||
1164 | break; | ||
1165 | case TV_STD_SECAM: | ||
1166 | args.ucTVStandard = ATOM_TV_SECAM; | ||
1167 | break; | ||
1168 | case TV_STD_PAL_CN: | ||
1169 | args.ucTVStandard = ATOM_TV_PALCN; | ||
1170 | break; | ||
1171 | } | ||
1172 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1173 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { | ||
1174 | args.ucTVStandard = ATOM_TV_CV; | ||
1175 | args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; | ||
1176 | } else if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
1177 | if (radeon_encoder->rmx_type == RMX_FULL) | ||
1178 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1179 | else if (radeon_encoder->rmx_type == RMX_CENTER) | ||
1180 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1181 | else if (radeon_encoder->rmx_type == RMX_ASPECT) | ||
1182 | args.ucEnable = ATOM_SCALER_EXPANSION; | ||
1183 | } else { | ||
1184 | if (ASIC_IS_AVIVO(rdev)) | ||
1185 | args.ucEnable = ATOM_SCALER_DISABLE; | ||
1186 | else | ||
1187 | args.ucEnable = ATOM_SCALER_CENTER; | ||
1188 | } | ||
1189 | |||
1190 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1191 | |||
1192 | if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) | ||
1193 | && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { | ||
1194 | atom_rv515_force_tv_scaler(rdev); | ||
1195 | } | ||
1196 | |||
1197 | } | ||
1198 | |||
1199 | static void | ||
1200 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | 846 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) |
1201 | { | 847 | { |
1202 | struct drm_device *dev = encoder->dev; | 848 | struct drm_device *dev = encoder->dev; |
@@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1448 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1094 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1449 | 1095 | ||
1450 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 1096 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
1451 | atombios_overscan_setup(encoder, mode, adjusted_mode); | ||
1452 | atombios_scaler_setup(encoder); | ||
1453 | atombios_set_encoder_crtc_source(encoder); | 1097 | atombios_set_encoder_crtc_source(encoder); |
1454 | 1098 | ||
1455 | if (ASIC_IS_AVIVO(rdev)) { | 1099 | if (ASIC_IS_AVIVO(rdev)) { |
@@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1667 | 1311 | ||
1668 | radeon_encoder->encoder_id = encoder_id; | 1312 | radeon_encoder->encoder_id = encoder_id; |
1669 | radeon_encoder->devices = supported_device; | 1313 | radeon_encoder->devices = supported_device; |
1314 | radeon_encoder->rmx_type = RMX_OFF; | ||
1670 | 1315 | ||
1671 | switch (radeon_encoder->encoder_id) { | 1316 | switch (radeon_encoder->encoder_id) { |
1672 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1317 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 9e8f191eb64a..3206c0ad7b6c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno, | |||
101 | break; | 101 | break; |
102 | case 24: | 102 | case 24: |
103 | case 32: | 103 | case 32: |
104 | fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | | 104 | fb->pseudo_palette[regno] = |
105 | (green & 0xff00) | | 105 | (((red >> 8) & 0xff) << info->var.red.offset) | |
106 | ((blue & 0xff00) >> 8); | 106 | (((green >> 8) & 0xff) << info->var.green.offset) | |
107 | (((blue >> 8) & 0xff) << info->var.blue.offset); | ||
107 | break; | 108 | break; |
108 | } | 109 | } |
109 | } | 110 | } |
@@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
154 | var->transp.length = 0; | 155 | var->transp.length = 0; |
155 | var->transp.offset = 0; | 156 | var->transp.offset = 0; |
156 | break; | 157 | break; |
158 | #ifdef __LITTLE_ENDIAN | ||
157 | case 15: | 159 | case 15: |
158 | var->red.offset = 10; | 160 | var->red.offset = 10; |
159 | var->green.offset = 5; | 161 | var->green.offset = 5; |
@@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, | |||
194 | var->transp.length = 8; | 196 | var->transp.length = 8; |
195 | var->transp.offset = 24; | 197 | var->transp.offset = 24; |
196 | break; | 198 | break; |
199 | #else | ||
200 | case 24: | ||
201 | var->red.offset = 8; | ||
202 | var->green.offset = 16; | ||
203 | var->blue.offset = 24; | ||
204 | var->red.length = 8; | ||
205 | var->green.length = 8; | ||
206 | var->blue.length = 8; | ||
207 | var->transp.length = 0; | ||
208 | var->transp.offset = 0; | ||
209 | break; | ||
210 | case 32: | ||
211 | var->red.offset = 8; | ||
212 | var->green.offset = 16; | ||
213 | var->blue.offset = 24; | ||
214 | var->red.length = 8; | ||
215 | var->green.length = 8; | ||
216 | var->blue.length = 8; | ||
217 | var->transp.length = 8; | ||
218 | var->transp.offset = 0; | ||
219 | break; | ||
220 | #endif | ||
197 | default: | 221 | default: |
198 | return -EINVAL; | 222 | return -EINVAL; |
199 | } | 223 | } |
@@ -447,10 +471,10 @@ static struct notifier_block paniced = { | |||
447 | .notifier_call = radeonfb_panic, | 471 | .notifier_call = radeonfb_panic, |
448 | }; | 472 | }; |
449 | 473 | ||
450 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) | 474 | static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
451 | { | 475 | { |
452 | int aligned = width; | 476 | int aligned = width; |
453 | int align_large = (ASIC_IS_AVIVO(rdev)); | 477 | int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
454 | int pitch_mask = 0; | 478 | int pitch_mask = 0; |
455 | 479 | ||
456 | switch (bpp / 8) { | 480 | switch (bpp / 8) { |
@@ -488,12 +512,13 @@ int radeonfb_create(struct radeon_device *rdev, | |||
488 | u64 fb_gpuaddr; | 512 | u64 fb_gpuaddr; |
489 | void *fbptr = NULL; | 513 | void *fbptr = NULL; |
490 | unsigned long tmp; | 514 | unsigned long tmp; |
515 | bool fb_tiled = false; /* useful for testing */ | ||
491 | 516 | ||
492 | mode_cmd.width = surface_width; | 517 | mode_cmd.width = surface_width; |
493 | mode_cmd.height = surface_height; | 518 | mode_cmd.height = surface_height; |
494 | mode_cmd.bpp = 32; | 519 | mode_cmd.bpp = 32; |
495 | /* need to align pitch with crtc limits */ | 520 | /* need to align pitch with crtc limits */ |
496 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); | 521 | mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
497 | mode_cmd.depth = 24; | 522 | mode_cmd.depth = 24; |
498 | 523 | ||
499 | size = mode_cmd.pitch * mode_cmd.height; | 524 | size = mode_cmd.pitch * mode_cmd.height; |
@@ -511,6 +536,8 @@ int radeonfb_create(struct radeon_device *rdev, | |||
511 | } | 536 | } |
512 | robj = gobj->driver_private; | 537 | robj = gobj->driver_private; |
513 | 538 | ||
539 | if (fb_tiled) | ||
540 | radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); | ||
514 | mutex_lock(&rdev->ddev->struct_mutex); | 541 | mutex_lock(&rdev->ddev->struct_mutex); |
515 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); | 542 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
516 | if (fb == NULL) { | 543 | if (fb == NULL) { |
@@ -539,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev, | |||
539 | } | 566 | } |
540 | rfbdev = info->par; | 567 | rfbdev = info->par; |
541 | 568 | ||
569 | if (fb_tiled) | ||
570 | radeon_object_check_tiling(robj, 0, 0); | ||
571 | |||
542 | ret = radeon_object_kmap(robj, &fbptr); | 572 | ret = radeon_object_kmap(robj, &fbptr); |
543 | if (ret) { | 573 | if (ret) { |
544 | goto out_unref; | 574 | goto out_unref; |
@@ -572,6 +602,11 @@ int radeonfb_create(struct radeon_device *rdev, | |||
572 | info->var.width = -1; | 602 | info->var.width = -1; |
573 | info->var.xres = fb_width; | 603 | info->var.xres = fb_width; |
574 | info->var.yres = fb_height; | 604 | info->var.yres = fb_height; |
605 | |||
606 | /* setup aperture base/size for vesafb takeover */ | ||
607 | info->aperture_base = rdev->ddev->mode_config.fb_base; | ||
608 | info->aperture_size = rdev->mc.real_vram_size; | ||
609 | |||
575 | info->fix.mmio_start = 0; | 610 | info->fix.mmio_start = 0; |
576 | info->fix.mmio_len = 0; | 611 | info->fix.mmio_len = 0; |
577 | info->pixmap.size = 64*1024; | 612 | info->pixmap.size = 64*1024; |
@@ -600,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev, | |||
600 | info->var.transp.offset = 0; | 635 | info->var.transp.offset = 0; |
601 | info->var.transp.length = 0; | 636 | info->var.transp.length = 0; |
602 | break; | 637 | break; |
638 | #ifdef __LITTLE_ENDIAN | ||
603 | case 15: | 639 | case 15: |
604 | info->var.red.offset = 10; | 640 | info->var.red.offset = 10; |
605 | info->var.green.offset = 5; | 641 | info->var.green.offset = 5; |
@@ -639,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev, | |||
639 | info->var.transp.offset = 24; | 675 | info->var.transp.offset = 24; |
640 | info->var.transp.length = 8; | 676 | info->var.transp.length = 8; |
641 | break; | 677 | break; |
678 | #else | ||
679 | case 24: | ||
680 | info->var.red.offset = 8; | ||
681 | info->var.green.offset = 16; | ||
682 | info->var.blue.offset = 24; | ||
683 | info->var.red.length = 8; | ||
684 | info->var.green.length = 8; | ||
685 | info->var.blue.length = 8; | ||
686 | info->var.transp.offset = 0; | ||
687 | info->var.transp.length = 0; | ||
688 | break; | ||
689 | case 32: | ||
690 | info->var.red.offset = 8; | ||
691 | info->var.green.offset = 16; | ||
692 | info->var.blue.offset = 24; | ||
693 | info->var.red.length = 8; | ||
694 | info->var.green.length = 8; | ||
695 | info->var.blue.length = 8; | ||
696 | info->var.transp.offset = 0; | ||
697 | info->var.transp.length = 8; | ||
698 | break; | ||
642 | default: | 699 | default: |
700 | #endif | ||
643 | break; | 701 | break; |
644 | } | 702 | } |
645 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2ad..b4e48dd2e859 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -195,7 +195,7 @@ retry: | |||
195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 195 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
196 | radeon_fence_signaled(fence), timeout); | 196 | radeon_fence_signaled(fence), timeout); |
197 | if (unlikely(r == -ERESTARTSYS)) { | 197 | if (unlikely(r == -ERESTARTSYS)) { |
198 | return -ERESTART; | 198 | return -EBUSY; |
199 | } | 199 | } |
200 | } else { | 200 | } else { |
201 | r = wait_event_timeout(rdev->fence_drv.queue, | 201 | r = wait_event_timeout(rdev->fence_drv.queue, |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d343a15316ec..2977539880fb 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
177 | return -ENOMEM; | 177 | return -ENOMEM; |
178 | } | 178 | } |
179 | rdev->gart.pages[p] = pagelist[i]; | 179 | rdev->gart.pages[p] = pagelist[i]; |
180 | page_base = (uint32_t)rdev->gart.pages_addr[p]; | 180 | page_base = rdev->gart.pages_addr[p]; |
181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { | 181 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
182 | radeon_gart_set_page(rdev, t, page_base); | 182 | radeon_gart_set_page(rdev, t, page_base); |
183 | page_base += 4096; | 183 | page_base += 4096; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235d..cded5180c752 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
157 | struct radeon_device *rdev = dev->dev_private; | 157 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 158 | struct drm_radeon_gem_info *args = data; |
159 | 159 | ||
160 | args->vram_size = rdev->mc.vram_size; | 160 | args->vram_size = rdev->mc.real_vram_size; |
161 | /* FIXME: report somethings that makes sense */ | 161 | /* FIXME: report somethings that makes sense */ |
162 | args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); | 162 | args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); |
163 | args->gart_size = rdev->mc.gtt_size; | 163 | args->gart_size = rdev->mc.gtt_size; |
164 | return 0; | 164 | return 0; |
165 | } | 165 | } |
@@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
285 | mutex_unlock(&dev->struct_mutex); | 285 | mutex_unlock(&dev->struct_mutex); |
286 | return r; | 286 | return r; |
287 | } | 287 | } |
288 | |||
289 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | ||
290 | struct drm_file *filp) | ||
291 | { | ||
292 | struct drm_radeon_gem_set_tiling *args = data; | ||
293 | struct drm_gem_object *gobj; | ||
294 | struct radeon_object *robj; | ||
295 | int r = 0; | ||
296 | |||
297 | DRM_DEBUG("%d \n", args->handle); | ||
298 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
299 | if (gobj == NULL) | ||
300 | return -EINVAL; | ||
301 | robj = gobj->driver_private; | ||
302 | radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); | ||
303 | mutex_lock(&dev->struct_mutex); | ||
304 | drm_gem_object_unreference(gobj); | ||
305 | mutex_unlock(&dev->struct_mutex); | ||
306 | return r; | ||
307 | } | ||
308 | |||
309 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | ||
310 | struct drm_file *filp) | ||
311 | { | ||
312 | struct drm_radeon_gem_get_tiling *args = data; | ||
313 | struct drm_gem_object *gobj; | ||
314 | struct radeon_object *robj; | ||
315 | int r = 0; | ||
316 | |||
317 | DRM_DEBUG("\n"); | ||
318 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | ||
319 | if (gobj == NULL) | ||
320 | return -EINVAL; | ||
321 | robj = gobj->driver_private; | ||
322 | radeon_object_get_tiling_flags(robj, &args->tiling_flags, | ||
323 | &args->pitch); | ||
324 | mutex_lock(&dev->struct_mutex); | ||
325 | drm_gem_object_unreference(gobj); | ||
326 | mutex_unlock(&dev->struct_mutex); | ||
327 | return r; | ||
328 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d1..937a2f1cdb46 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -291,5 +291,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
291 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), | 291 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), |
292 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), | 292 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), |
293 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), | 293 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), |
294 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), | ||
295 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), | ||
294 | }; | 296 | }; |
295 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 297 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8086ecf7f03d..7d06dc98a42a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -29,6 +29,171 @@ | |||
29 | #include "radeon_fixed.h" | 29 | #include "radeon_fixed.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | 31 | ||
32 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | ||
33 | struct drm_display_mode *mode, | ||
34 | struct drm_display_mode *adjusted_mode) | ||
35 | { | ||
36 | struct drm_device *dev = crtc->dev; | ||
37 | struct radeon_device *rdev = dev->dev_private; | ||
38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
39 | int xres = mode->hdisplay; | ||
40 | int yres = mode->vdisplay; | ||
41 | bool hscale = true, vscale = true; | ||
42 | int hsync_wid; | ||
43 | int vsync_wid; | ||
44 | int hsync_start; | ||
45 | int blank_width; | ||
46 | u32 scale, inc, crtc_more_cntl; | ||
47 | u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; | ||
48 | u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; | ||
49 | u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; | ||
50 | struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; | ||
51 | |||
52 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
53 | (RADEON_VERT_STRETCH_RESERVED | | ||
54 | RADEON_VERT_AUTO_RATIO_INC); | ||
55 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
56 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
57 | RADEON_HORZ_AUTO_RATIO_INC); | ||
58 | |||
59 | crtc_more_cntl = 0; | ||
60 | if ((rdev->family == CHIP_RS100) || | ||
61 | (rdev->family == CHIP_RS200)) { | ||
62 | /* This is to workaround the asic bug for RMX, some versions | ||
63 | of BIOS dosen't have this register initialized correctly. */ | ||
64 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
65 | } | ||
66 | |||
67 | |||
68 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
69 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
70 | |||
71 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
72 | if (!hsync_wid) | ||
73 | hsync_wid = 1; | ||
74 | hsync_start = mode->crtc_hsync_start - 8; | ||
75 | |||
76 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
77 | | ((hsync_wid & 0x3f) << 16) | ||
78 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
79 | ? RADEON_CRTC_H_SYNC_POL | ||
80 | : 0)); | ||
81 | |||
82 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
83 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
84 | |||
85 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
86 | if (!vsync_wid) | ||
87 | vsync_wid = 1; | ||
88 | |||
89 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
90 | | ((vsync_wid & 0x1f) << 16) | ||
91 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
92 | ? RADEON_CRTC_V_SYNC_POL | ||
93 | : 0)); | ||
94 | |||
95 | fp_horz_vert_active = 0; | ||
96 | |||
97 | if (native_mode->panel_xres == 0 || | ||
98 | native_mode->panel_yres == 0) { | ||
99 | hscale = false; | ||
100 | vscale = false; | ||
101 | } else { | ||
102 | if (xres > native_mode->panel_xres) | ||
103 | xres = native_mode->panel_xres; | ||
104 | if (yres > native_mode->panel_yres) | ||
105 | yres = native_mode->panel_yres; | ||
106 | |||
107 | if (xres == native_mode->panel_xres) | ||
108 | hscale = false; | ||
109 | if (yres == native_mode->panel_yres) | ||
110 | vscale = false; | ||
111 | } | ||
112 | |||
113 | switch (radeon_crtc->rmx_type) { | ||
114 | case RMX_FULL: | ||
115 | case RMX_ASPECT: | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | break; | ||
140 | case RMX_CENTER: | ||
141 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
142 | fp_vert_stretch |= ((yres-1) << 12); | ||
143 | |||
144 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
145 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
146 | |||
147 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
148 | if (blank_width > 110) | ||
149 | blank_width = 110; | ||
150 | |||
151 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
152 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
153 | |||
154 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
155 | if (!hsync_wid) | ||
156 | hsync_wid = 1; | ||
157 | |||
158 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
159 | | ((hsync_wid & 0x3f) << 16) | ||
160 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
161 | ? RADEON_CRTC_H_SYNC_POL | ||
162 | : 0)); | ||
163 | |||
164 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
165 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
166 | |||
167 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
168 | if (!vsync_wid) | ||
169 | vsync_wid = 1; | ||
170 | |||
171 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
172 | | ((vsync_wid & 0x1f) << 16) | ||
173 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
174 | ? RADEON_CRTC_V_SYNC_POL | ||
175 | : 0))); | ||
176 | |||
177 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
178 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
179 | break; | ||
180 | case RMX_OFF: | ||
181 | default: | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
188 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
189 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
190 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
191 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
193 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
194 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
195 | } | ||
196 | |||
32 | void radeon_restore_common_regs(struct drm_device *dev) | 197 | void radeon_restore_common_regs(struct drm_device *dev) |
33 | { | 198 | { |
34 | /* don't need this yet */ | 199 | /* don't need this yet */ |
@@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
235 | uint64_t base; | 400 | uint64_t base; |
236 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; | 401 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
237 | uint32_t crtc_pitch, pitch_pixels; | 402 | uint32_t crtc_pitch, pitch_pixels; |
403 | uint32_t tiling_flags; | ||
238 | 404 | ||
239 | DRM_DEBUG("\n"); | 405 | DRM_DEBUG("\n"); |
240 | 406 | ||
@@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
244 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { | 410 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { |
245 | return -EINVAL; | 411 | return -EINVAL; |
246 | } | 412 | } |
247 | crtc_offset = (u32)base; | 413 | /* if scanout was in GTT this really wouldn't work */ |
414 | /* crtc offset is from display base addr not FB location */ | ||
415 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; | ||
416 | |||
417 | base -= radeon_crtc->legacy_display_base_addr; | ||
418 | |||
248 | crtc_offset_cntl = 0; | 419 | crtc_offset_cntl = 0; |
249 | 420 | ||
250 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); | 421 | pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); |
@@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
253 | (crtc->fb->bits_per_pixel * 8)); | 424 | (crtc->fb->bits_per_pixel * 8)); |
254 | crtc_pitch |= crtc_pitch << 16; | 425 | crtc_pitch |= crtc_pitch << 16; |
255 | 426 | ||
256 | /* TODO tiling */ | 427 | radeon_object_get_tiling_flags(obj->driver_private, |
257 | if (0) { | 428 | &tiling_flags, NULL); |
429 | if (tiling_flags & RADEON_TILING_MICRO) | ||
430 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
431 | |||
432 | if (tiling_flags & RADEON_TILING_MACRO) { | ||
258 | if (ASIC_IS_R300(rdev)) | 433 | if (ASIC_IS_R300(rdev)) |
259 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | | 434 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
260 | R300_CRTC_MICRO_TILE_BUFFER_DIS | | 435 | R300_CRTC_MICRO_TILE_BUFFER_DIS | |
@@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
270 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; | 445 | crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; |
271 | } | 446 | } |
272 | 447 | ||
273 | 448 | if (tiling_flags & RADEON_TILING_MACRO) { | |
274 | /* TODO more tiling */ | ||
275 | if (0) { | ||
276 | if (ASIC_IS_R300(rdev)) { | 449 | if (ASIC_IS_R300(rdev)) { |
277 | crtc_tile_x0_y0 = x | (y << 16); | 450 | crtc_tile_x0_y0 = x | (y << 16); |
278 | base &= ~0x7ff; | 451 | base &= ~0x7ff; |
279 | } else { | 452 | } else { |
280 | int byteshift = crtc->fb->bits_per_pixel >> 4; | 453 | int byteshift = crtc->fb->bits_per_pixel >> 4; |
281 | int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; | 454 | int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; |
282 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); | 455 | base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); |
283 | crtc_offset_cntl |= (y % 16); | 456 | crtc_offset_cntl |= (y % 16); |
284 | } | 457 | } |
@@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
303 | 476 | ||
304 | base &= ~7; | 477 | base &= ~7; |
305 | 478 | ||
306 | /* update sarea TODO */ | ||
307 | |||
308 | crtc_offset = (u32)base; | 479 | crtc_offset = (u32)base; |
309 | 480 | ||
310 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); | 481 | WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); |
311 | 482 | ||
312 | if (ASIC_IS_R300(rdev)) { | 483 | if (ASIC_IS_R300(rdev)) { |
313 | if (radeon_crtc->crtc_id) | 484 | if (radeon_crtc->crtc_id) |
@@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
751 | struct drm_display_mode *mode, | 922 | struct drm_display_mode *mode, |
752 | struct drm_display_mode *adjusted_mode) | 923 | struct drm_display_mode *adjusted_mode) |
753 | { | 924 | { |
925 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
926 | return false; | ||
754 | return true; | 927 | return true; |
755 | } | 928 | } |
756 | 929 | ||
@@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
759 | struct drm_display_mode *adjusted_mode, | 932 | struct drm_display_mode *adjusted_mode, |
760 | int x, int y, struct drm_framebuffer *old_fb) | 933 | int x, int y, struct drm_framebuffer *old_fb) |
761 | { | 934 | { |
762 | 935 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | |
763 | DRM_DEBUG("\n"); | 936 | struct drm_device *dev = crtc->dev; |
937 | struct radeon_device *rdev = dev->dev_private; | ||
764 | 938 | ||
765 | /* TODO TV */ | 939 | /* TODO TV */ |
766 | |||
767 | radeon_crtc_set_base(crtc, x, y, old_fb); | 940 | radeon_crtc_set_base(crtc, x, y, old_fb); |
768 | radeon_set_crtc_timing(crtc, adjusted_mode); | 941 | radeon_set_crtc_timing(crtc, adjusted_mode); |
769 | radeon_set_pll(crtc, adjusted_mode); | 942 | radeon_set_pll(crtc, adjusted_mode); |
770 | radeon_init_disp_bandwidth(crtc->dev); | 943 | radeon_bandwidth_update(rdev); |
771 | 944 | if (radeon_crtc->crtc_id == 0) { | |
945 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | ||
946 | } else { | ||
947 | if (radeon_crtc->rmx_type != RMX_OFF) { | ||
948 | /* FIXME: only first crtc has rmx what should we | ||
949 | * do ? | ||
950 | */ | ||
951 | DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); | ||
952 | } | ||
953 | } | ||
772 | return 0; | 954 | return 0; |
773 | } | 955 | } |
774 | 956 | ||
@@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev, | |||
799 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; | 981 | radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; |
800 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); | 982 | drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); |
801 | } | 983 | } |
802 | |||
803 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | ||
804 | struct drm_display_mode *mode1, | ||
805 | uint32_t pixel_bytes1, | ||
806 | struct drm_display_mode *mode2, | ||
807 | uint32_t pixel_bytes2) | ||
808 | { | ||
809 | struct radeon_device *rdev = dev->dev_private; | ||
810 | fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; | ||
811 | fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; | ||
812 | fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; | ||
813 | uint32_t temp, data, mem_trcd, mem_trp, mem_tras; | ||
814 | fixed20_12 memtcas_ff[8] = { | ||
815 | fixed_init(1), | ||
816 | fixed_init(2), | ||
817 | fixed_init(3), | ||
818 | fixed_init(0), | ||
819 | fixed_init_half(1), | ||
820 | fixed_init_half(2), | ||
821 | fixed_init(0), | ||
822 | }; | ||
823 | fixed20_12 memtcas_rs480_ff[8] = { | ||
824 | fixed_init(0), | ||
825 | fixed_init(1), | ||
826 | fixed_init(2), | ||
827 | fixed_init(3), | ||
828 | fixed_init(0), | ||
829 | fixed_init_half(1), | ||
830 | fixed_init_half(2), | ||
831 | fixed_init_half(3), | ||
832 | }; | ||
833 | fixed20_12 memtcas2_ff[8] = { | ||
834 | fixed_init(0), | ||
835 | fixed_init(1), | ||
836 | fixed_init(2), | ||
837 | fixed_init(3), | ||
838 | fixed_init(4), | ||
839 | fixed_init(5), | ||
840 | fixed_init(6), | ||
841 | fixed_init(7), | ||
842 | }; | ||
843 | fixed20_12 memtrbs[8] = { | ||
844 | fixed_init(1), | ||
845 | fixed_init_half(1), | ||
846 | fixed_init(2), | ||
847 | fixed_init_half(2), | ||
848 | fixed_init(3), | ||
849 | fixed_init_half(3), | ||
850 | fixed_init(4), | ||
851 | fixed_init_half(4) | ||
852 | }; | ||
853 | fixed20_12 memtrbs_r4xx[8] = { | ||
854 | fixed_init(4), | ||
855 | fixed_init(5), | ||
856 | fixed_init(6), | ||
857 | fixed_init(7), | ||
858 | fixed_init(8), | ||
859 | fixed_init(9), | ||
860 | fixed_init(10), | ||
861 | fixed_init(11) | ||
862 | }; | ||
863 | fixed20_12 min_mem_eff; | ||
864 | fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; | ||
865 | fixed20_12 cur_latency_mclk, cur_latency_sclk; | ||
866 | fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, | ||
867 | disp_drain_rate2, read_return_rate; | ||
868 | fixed20_12 time_disp1_drop_priority; | ||
869 | int c; | ||
870 | int cur_size = 16; /* in octawords */ | ||
871 | int critical_point = 0, critical_point2; | ||
872 | /* uint32_t read_return_rate, time_disp1_drop_priority; */ | ||
873 | int stop_req, max_stop_req; | ||
874 | |||
875 | min_mem_eff.full = rfixed_const_8(0); | ||
876 | /* get modes */ | ||
877 | if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { | ||
878 | uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); | ||
879 | mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
880 | mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
881 | /* check crtc enables */ | ||
882 | if (mode2) | ||
883 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); | ||
884 | if (mode1) | ||
885 | mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); | ||
886 | WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); | ||
887 | } | ||
888 | |||
889 | /* | ||
890 | * determine is there is enough bw for current mode | ||
891 | */ | ||
892 | mclk_ff.full = rfixed_const(rdev->clock.default_mclk); | ||
893 | temp_ff.full = rfixed_const(100); | ||
894 | mclk_ff.full = rfixed_div(mclk_ff, temp_ff); | ||
895 | sclk_ff.full = rfixed_const(rdev->clock.default_sclk); | ||
896 | sclk_ff.full = rfixed_div(sclk_ff, temp_ff); | ||
897 | |||
898 | temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); | ||
899 | temp_ff.full = rfixed_const(temp); | ||
900 | mem_bw.full = rfixed_mul(mclk_ff, temp_ff); | ||
901 | |||
902 | pix_clk.full = 0; | ||
903 | pix_clk2.full = 0; | ||
904 | peak_disp_bw.full = 0; | ||
905 | if (mode1) { | ||
906 | temp_ff.full = rfixed_const(1000); | ||
907 | pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ | ||
908 | pix_clk.full = rfixed_div(pix_clk, temp_ff); | ||
909 | temp_ff.full = rfixed_const(pixel_bytes1); | ||
910 | peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); | ||
911 | } | ||
912 | if (mode2) { | ||
913 | temp_ff.full = rfixed_const(1000); | ||
914 | pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ | ||
915 | pix_clk2.full = rfixed_div(pix_clk2, temp_ff); | ||
916 | temp_ff.full = rfixed_const(pixel_bytes2); | ||
917 | peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); | ||
918 | } | ||
919 | |||
920 | mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); | ||
921 | if (peak_disp_bw.full >= mem_bw.full) { | ||
922 | DRM_ERROR("You may not have enough display bandwidth for current mode\n" | ||
923 | "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); | ||
924 | } | ||
925 | |||
926 | /* Get values from the EXT_MEM_CNTL register...converting its contents. */ | ||
927 | temp = RREG32(RADEON_MEM_TIMING_CNTL); | ||
928 | if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ | ||
929 | mem_trcd = ((temp >> 2) & 0x3) + 1; | ||
930 | mem_trp = ((temp & 0x3)) + 1; | ||
931 | mem_tras = ((temp & 0x70) >> 4) + 1; | ||
932 | } else if (rdev->family == CHIP_R300 || | ||
933 | rdev->family == CHIP_R350) { /* r300, r350 */ | ||
934 | mem_trcd = (temp & 0x7) + 1; | ||
935 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
936 | mem_tras = ((temp >> 11) & 0xf) + 4; | ||
937 | } else if (rdev->family == CHIP_RV350 || | ||
938 | rdev->family <= CHIP_RV380) { | ||
939 | /* rv3x0 */ | ||
940 | mem_trcd = (temp & 0x7) + 3; | ||
941 | mem_trp = ((temp >> 8) & 0x7) + 3; | ||
942 | mem_tras = ((temp >> 11) & 0xf) + 6; | ||
943 | } else if (rdev->family == CHIP_R420 || | ||
944 | rdev->family == CHIP_R423 || | ||
945 | rdev->family == CHIP_RV410) { | ||
946 | /* r4xx */ | ||
947 | mem_trcd = (temp & 0xf) + 3; | ||
948 | if (mem_trcd > 15) | ||
949 | mem_trcd = 15; | ||
950 | mem_trp = ((temp >> 8) & 0xf) + 3; | ||
951 | if (mem_trp > 15) | ||
952 | mem_trp = 15; | ||
953 | mem_tras = ((temp >> 12) & 0x1f) + 6; | ||
954 | if (mem_tras > 31) | ||
955 | mem_tras = 31; | ||
956 | } else { /* RV200, R200 */ | ||
957 | mem_trcd = (temp & 0x7) + 1; | ||
958 | mem_trp = ((temp >> 8) & 0x7) + 1; | ||
959 | mem_tras = ((temp >> 12) & 0xf) + 4; | ||
960 | } | ||
961 | /* convert to FF */ | ||
962 | trcd_ff.full = rfixed_const(mem_trcd); | ||
963 | trp_ff.full = rfixed_const(mem_trp); | ||
964 | tras_ff.full = rfixed_const(mem_tras); | ||
965 | |||
966 | /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ | ||
967 | temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); | ||
968 | data = (temp & (7 << 20)) >> 20; | ||
969 | if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { | ||
970 | if (rdev->family == CHIP_RS480) /* don't think rs400 */ | ||
971 | tcas_ff = memtcas_rs480_ff[data]; | ||
972 | else | ||
973 | tcas_ff = memtcas_ff[data]; | ||
974 | } else | ||
975 | tcas_ff = memtcas2_ff[data]; | ||
976 | |||
977 | if (rdev->family == CHIP_RS400 || | ||
978 | rdev->family == CHIP_RS480) { | ||
979 | /* extra cas latency stored in bits 23-25 0-4 clocks */ | ||
980 | data = (temp >> 23) & 0x7; | ||
981 | if (data < 5) | ||
982 | tcas_ff.full += rfixed_const(data); | ||
983 | } | ||
984 | |||
985 | if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { | ||
986 | /* on the R300, Tcas is included in Trbs. | ||
987 | */ | ||
988 | temp = RREG32(RADEON_MEM_CNTL); | ||
989 | data = (R300_MEM_NUM_CHANNELS_MASK & temp); | ||
990 | if (data == 1) { | ||
991 | if (R300_MEM_USE_CD_CH_ONLY & temp) { | ||
992 | temp = RREG32(R300_MC_IND_INDEX); | ||
993 | temp &= ~R300_MC_IND_ADDR_MASK; | ||
994 | temp |= R300_MC_READ_CNTL_CD_mcind; | ||
995 | WREG32(R300_MC_IND_INDEX, temp); | ||
996 | temp = RREG32(R300_MC_IND_DATA); | ||
997 | data = (R300_MEM_RBS_POSITION_C_MASK & temp); | ||
998 | } else { | ||
999 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1000 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1001 | } | ||
1002 | } else { | ||
1003 | temp = RREG32(R300_MC_READ_CNTL_AB); | ||
1004 | data = (R300_MEM_RBS_POSITION_A_MASK & temp); | ||
1005 | } | ||
1006 | if (rdev->family == CHIP_RV410 || | ||
1007 | rdev->family == CHIP_R420 || | ||
1008 | rdev->family == CHIP_R423) | ||
1009 | trbs_ff = memtrbs_r4xx[data]; | ||
1010 | else | ||
1011 | trbs_ff = memtrbs[data]; | ||
1012 | tcas_ff.full += trbs_ff.full; | ||
1013 | } | ||
1014 | |||
1015 | sclk_eff_ff.full = sclk_ff.full; | ||
1016 | |||
1017 | if (rdev->flags & RADEON_IS_AGP) { | ||
1018 | fixed20_12 agpmode_ff; | ||
1019 | agpmode_ff.full = rfixed_const(radeon_agpmode); | ||
1020 | temp_ff.full = rfixed_const_666(16); | ||
1021 | sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); | ||
1022 | } | ||
1023 | /* TODO PCIE lanes may affect this - agpmode == 16?? */ | ||
1024 | |||
1025 | if (ASIC_IS_R300(rdev)) { | ||
1026 | sclk_delay_ff.full = rfixed_const(250); | ||
1027 | } else { | ||
1028 | if ((rdev->family == CHIP_RV100) || | ||
1029 | rdev->flags & RADEON_IS_IGP) { | ||
1030 | if (rdev->mc.vram_is_ddr) | ||
1031 | sclk_delay_ff.full = rfixed_const(41); | ||
1032 | else | ||
1033 | sclk_delay_ff.full = rfixed_const(33); | ||
1034 | } else { | ||
1035 | if (rdev->mc.vram_width == 128) | ||
1036 | sclk_delay_ff.full = rfixed_const(57); | ||
1037 | else | ||
1038 | sclk_delay_ff.full = rfixed_const(41); | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); | ||
1043 | |||
1044 | if (rdev->mc.vram_is_ddr) { | ||
1045 | if (rdev->mc.vram_width == 32) { | ||
1046 | k1.full = rfixed_const(40); | ||
1047 | c = 3; | ||
1048 | } else { | ||
1049 | k1.full = rfixed_const(20); | ||
1050 | c = 1; | ||
1051 | } | ||
1052 | } else { | ||
1053 | k1.full = rfixed_const(40); | ||
1054 | c = 3; | ||
1055 | } | ||
1056 | |||
1057 | temp_ff.full = rfixed_const(2); | ||
1058 | mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); | ||
1059 | temp_ff.full = rfixed_const(c); | ||
1060 | mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); | ||
1061 | temp_ff.full = rfixed_const(4); | ||
1062 | mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); | ||
1063 | mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); | ||
1064 | mc_latency_mclk.full += k1.full; | ||
1065 | |||
1066 | mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); | ||
1067 | mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); | ||
1068 | |||
1069 | /* | ||
1070 | HW cursor time assuming worst case of full size colour cursor. | ||
1071 | */ | ||
1072 | temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); | ||
1073 | temp_ff.full += trcd_ff.full; | ||
1074 | if (temp_ff.full < tras_ff.full) | ||
1075 | temp_ff.full = tras_ff.full; | ||
1076 | cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); | ||
1077 | |||
1078 | temp_ff.full = rfixed_const(cur_size); | ||
1079 | cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); | ||
1080 | /* | ||
1081 | Find the total latency for the display data. | ||
1082 | */ | ||
1083 | disp_latency_overhead.full = rfixed_const(80); | ||
1084 | disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); | ||
1085 | mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; | ||
1086 | mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; | ||
1087 | |||
1088 | if (mc_latency_mclk.full > mc_latency_sclk.full) | ||
1089 | disp_latency.full = mc_latency_mclk.full; | ||
1090 | else | ||
1091 | disp_latency.full = mc_latency_sclk.full; | ||
1092 | |||
1093 | /* setup Max GRPH_STOP_REQ default value */ | ||
1094 | if (ASIC_IS_RV100(rdev)) | ||
1095 | max_stop_req = 0x5c; | ||
1096 | else | ||
1097 | max_stop_req = 0x7c; | ||
1098 | |||
1099 | if (mode1) { | ||
1100 | /* CRTC1 | ||
1101 | Set GRPH_BUFFER_CNTL register using h/w defined optimal values. | ||
1102 | GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] | ||
1103 | */ | ||
1104 | stop_req = mode1->hdisplay * pixel_bytes1 / 16; | ||
1105 | |||
1106 | if (stop_req > max_stop_req) | ||
1107 | stop_req = max_stop_req; | ||
1108 | |||
1109 | /* | ||
1110 | Find the drain rate of the display buffer. | ||
1111 | */ | ||
1112 | temp_ff.full = rfixed_const((16/pixel_bytes1)); | ||
1113 | disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); | ||
1114 | |||
1115 | /* | ||
1116 | Find the critical point of the display buffer. | ||
1117 | */ | ||
1118 | crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); | ||
1119 | crit_point_ff.full += rfixed_const_half(0); | ||
1120 | |||
1121 | critical_point = rfixed_trunc(crit_point_ff); | ||
1122 | |||
1123 | if (rdev->disp_priority == 2) { | ||
1124 | critical_point = 0; | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | The critical point should never be above max_stop_req-4. Setting | ||
1129 | GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. | ||
1130 | */ | ||
1131 | if (max_stop_req - critical_point < 4) | ||
1132 | critical_point = 0; | ||
1133 | |||
1134 | if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { | ||
1135 | /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ | ||
1136 | critical_point = 0x10; | ||
1137 | } | ||
1138 | |||
1139 | temp = RREG32(RADEON_GRPH_BUFFER_CNTL); | ||
1140 | temp &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1141 | temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1142 | temp &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1143 | if ((rdev->family == CHIP_R350) && | ||
1144 | (stop_req > 0x15)) { | ||
1145 | stop_req -= 0x10; | ||
1146 | } | ||
1147 | temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1148 | temp |= RADEON_GRPH_BUFFER_SIZE; | ||
1149 | temp &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1150 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1151 | RADEON_GRPH_STOP_CNTL); | ||
1152 | /* | ||
1153 | Write the result into the register. | ||
1154 | */ | ||
1155 | WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1156 | (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1157 | |||
1158 | #if 0 | ||
1159 | if ((rdev->family == CHIP_RS400) || | ||
1160 | (rdev->family == CHIP_RS480)) { | ||
1161 | /* attempt to program RS400 disp regs correctly ??? */ | ||
1162 | temp = RREG32(RS400_DISP1_REG_CNTL); | ||
1163 | temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | | ||
1164 | RS400_DISP1_STOP_REQ_LEVEL_MASK); | ||
1165 | WREG32(RS400_DISP1_REQ_CNTL1, (temp | | ||
1166 | (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1167 | (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1168 | temp = RREG32(RS400_DMIF_MEM_CNTL1); | ||
1169 | temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | | ||
1170 | RS400_DISP1_CRITICAL_POINT_STOP_MASK); | ||
1171 | WREG32(RS400_DMIF_MEM_CNTL1, (temp | | ||
1172 | (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | | ||
1173 | (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); | ||
1174 | } | ||
1175 | #endif | ||
1176 | |||
1177 | DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", | ||
1178 | /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ | ||
1179 | (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); | ||
1180 | } | ||
1181 | |||
1182 | if (mode2) { | ||
1183 | u32 grph2_cntl; | ||
1184 | stop_req = mode2->hdisplay * pixel_bytes2 / 16; | ||
1185 | |||
1186 | if (stop_req > max_stop_req) | ||
1187 | stop_req = max_stop_req; | ||
1188 | |||
1189 | /* | ||
1190 | Find the drain rate of the display buffer. | ||
1191 | */ | ||
1192 | temp_ff.full = rfixed_const((16/pixel_bytes2)); | ||
1193 | disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); | ||
1194 | |||
1195 | grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); | ||
1196 | grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); | ||
1197 | grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); | ||
1198 | grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); | ||
1199 | if ((rdev->family == CHIP_R350) && | ||
1200 | (stop_req > 0x15)) { | ||
1201 | stop_req -= 0x10; | ||
1202 | } | ||
1203 | grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); | ||
1204 | grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; | ||
1205 | grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | | ||
1206 | RADEON_GRPH_CRITICAL_AT_SOF | | ||
1207 | RADEON_GRPH_STOP_CNTL); | ||
1208 | |||
1209 | if ((rdev->family == CHIP_RS100) || | ||
1210 | (rdev->family == CHIP_RS200)) | ||
1211 | critical_point2 = 0; | ||
1212 | else { | ||
1213 | temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; | ||
1214 | temp_ff.full = rfixed_const(temp); | ||
1215 | temp_ff.full = rfixed_mul(mclk_ff, temp_ff); | ||
1216 | if (sclk_ff.full < temp_ff.full) | ||
1217 | temp_ff.full = sclk_ff.full; | ||
1218 | |||
1219 | read_return_rate.full = temp_ff.full; | ||
1220 | |||
1221 | if (mode1) { | ||
1222 | temp_ff.full = read_return_rate.full - disp_drain_rate.full; | ||
1223 | time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); | ||
1224 | } else { | ||
1225 | time_disp1_drop_priority.full = 0; | ||
1226 | } | ||
1227 | crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; | ||
1228 | crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); | ||
1229 | crit_point_ff.full += rfixed_const_half(0); | ||
1230 | |||
1231 | critical_point2 = rfixed_trunc(crit_point_ff); | ||
1232 | |||
1233 | if (rdev->disp_priority == 2) { | ||
1234 | critical_point2 = 0; | ||
1235 | } | ||
1236 | |||
1237 | if (max_stop_req - critical_point2 < 4) | ||
1238 | critical_point2 = 0; | ||
1239 | |||
1240 | } | ||
1241 | |||
1242 | if (critical_point2 == 0 && rdev->family == CHIP_R300) { | ||
1243 | /* some R300 cards have problem with this set to 0 */ | ||
1244 | critical_point2 = 0x10; | ||
1245 | } | ||
1246 | |||
1247 | WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | | ||
1248 | (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); | ||
1249 | |||
1250 | if ((rdev->family == CHIP_RS400) || | ||
1251 | (rdev->family == CHIP_RS480)) { | ||
1252 | #if 0 | ||
1253 | /* attempt to program RS400 disp2 regs correctly ??? */ | ||
1254 | temp = RREG32(RS400_DISP2_REQ_CNTL1); | ||
1255 | temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | | ||
1256 | RS400_DISP2_STOP_REQ_LEVEL_MASK); | ||
1257 | WREG32(RS400_DISP2_REQ_CNTL1, (temp | | ||
1258 | (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | | ||
1259 | (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); | ||
1260 | temp = RREG32(RS400_DISP2_REQ_CNTL2); | ||
1261 | temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | | ||
1262 | RS400_DISP2_CRITICAL_POINT_STOP_MASK); | ||
1263 | WREG32(RS400_DISP2_REQ_CNTL2, (temp | | ||
1264 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | | ||
1265 | (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); | ||
1266 | #endif | ||
1267 | WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); | ||
1268 | WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); | ||
1269 | WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); | ||
1270 | WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); | ||
1271 | } | ||
1272 | |||
1273 | DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", | ||
1274 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | ||
1275 | } | ||
1276 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2c2f42de1d4c..34d0f58eb944 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -30,170 +30,6 @@ | |||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | 32 | ||
33 | static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, | ||
34 | struct drm_display_mode *mode, | ||
35 | struct drm_display_mode *adjusted_mode) | ||
36 | { | ||
37 | struct drm_device *dev = encoder->dev; | ||
38 | struct radeon_device *rdev = dev->dev_private; | ||
39 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
40 | int xres = mode->hdisplay; | ||
41 | int yres = mode->vdisplay; | ||
42 | bool hscale = true, vscale = true; | ||
43 | int hsync_wid; | ||
44 | int vsync_wid; | ||
45 | int hsync_start; | ||
46 | uint32_t scale, inc; | ||
47 | uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; | ||
48 | uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; | ||
49 | struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; | ||
50 | |||
51 | DRM_DEBUG("\n"); | ||
52 | |||
53 | fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & | ||
54 | (RADEON_VERT_STRETCH_RESERVED | | ||
55 | RADEON_VERT_AUTO_RATIO_INC); | ||
56 | fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & | ||
57 | (RADEON_HORZ_FP_LOOP_STRETCH | | ||
58 | RADEON_HORZ_AUTO_RATIO_INC); | ||
59 | |||
60 | crtc_more_cntl = 0; | ||
61 | if ((rdev->family == CHIP_RS100) || | ||
62 | (rdev->family == CHIP_RS200)) { | ||
63 | /* This is to workaround the asic bug for RMX, some versions | ||
64 | of BIOS dosen't have this register initialized correctly. */ | ||
65 | crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; | ||
66 | } | ||
67 | |||
68 | |||
69 | fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) | ||
70 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
71 | |||
72 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
73 | if (!hsync_wid) | ||
74 | hsync_wid = 1; | ||
75 | hsync_start = mode->crtc_hsync_start - 8; | ||
76 | |||
77 | fp_h_sync_strt_wid = ((hsync_start & 0x1fff) | ||
78 | | ((hsync_wid & 0x3f) << 16) | ||
79 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
80 | ? RADEON_CRTC_H_SYNC_POL | ||
81 | : 0)); | ||
82 | |||
83 | fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) | ||
84 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
85 | |||
86 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
87 | if (!vsync_wid) | ||
88 | vsync_wid = 1; | ||
89 | |||
90 | fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) | ||
91 | | ((vsync_wid & 0x1f) << 16) | ||
92 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
93 | ? RADEON_CRTC_V_SYNC_POL | ||
94 | : 0)); | ||
95 | |||
96 | fp_horz_vert_active = 0; | ||
97 | |||
98 | if (native_mode->panel_xres == 0 || | ||
99 | native_mode->panel_yres == 0) { | ||
100 | hscale = false; | ||
101 | vscale = false; | ||
102 | } else { | ||
103 | if (xres > native_mode->panel_xres) | ||
104 | xres = native_mode->panel_xres; | ||
105 | if (yres > native_mode->panel_yres) | ||
106 | yres = native_mode->panel_yres; | ||
107 | |||
108 | if (xres == native_mode->panel_xres) | ||
109 | hscale = false; | ||
110 | if (yres == native_mode->panel_yres) | ||
111 | vscale = false; | ||
112 | } | ||
113 | |||
114 | if (radeon_encoder->flags & RADEON_USE_RMX) { | ||
115 | if (radeon_encoder->rmx_type != RMX_CENTER) { | ||
116 | if (!hscale) | ||
117 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
118 | else { | ||
119 | inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; | ||
120 | scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) | ||
121 | / native_mode->panel_xres + 1; | ||
122 | fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | | ||
123 | RADEON_HORZ_STRETCH_BLEND | | ||
124 | RADEON_HORZ_STRETCH_ENABLE | | ||
125 | ((native_mode->panel_xres/8-1) << 16)); | ||
126 | } | ||
127 | |||
128 | if (!vscale) | ||
129 | fp_vert_stretch |= ((yres-1) << 12); | ||
130 | else { | ||
131 | inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; | ||
132 | scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) | ||
133 | / native_mode->panel_yres + 1; | ||
134 | fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | | ||
135 | RADEON_VERT_STRETCH_ENABLE | | ||
136 | RADEON_VERT_STRETCH_BLEND | | ||
137 | ((native_mode->panel_yres-1) << 12)); | ||
138 | } | ||
139 | } else if (radeon_encoder->rmx_type == RMX_CENTER) { | ||
140 | int blank_width; | ||
141 | |||
142 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
143 | fp_vert_stretch |= ((yres-1) << 12); | ||
144 | |||
145 | crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | | ||
146 | RADEON_CRTC_AUTO_VERT_CENTER_EN); | ||
147 | |||
148 | blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; | ||
149 | if (blank_width > 110) | ||
150 | blank_width = 110; | ||
151 | |||
152 | fp_crtc_h_total_disp = (((blank_width) & 0x3ff) | ||
153 | | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); | ||
154 | |||
155 | hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; | ||
156 | if (!hsync_wid) | ||
157 | hsync_wid = 1; | ||
158 | |||
159 | fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) | ||
160 | | ((hsync_wid & 0x3f) << 16) | ||
161 | | ((mode->flags & DRM_MODE_FLAG_NHSYNC) | ||
162 | ? RADEON_CRTC_H_SYNC_POL | ||
163 | : 0)); | ||
164 | |||
165 | fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) | ||
166 | | ((mode->crtc_vdisplay - 1) << 16)); | ||
167 | |||
168 | vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; | ||
169 | if (!vsync_wid) | ||
170 | vsync_wid = 1; | ||
171 | |||
172 | fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) | ||
173 | | ((vsync_wid & 0x1f) << 16) | ||
174 | | ((mode->flags & DRM_MODE_FLAG_NVSYNC) | ||
175 | ? RADEON_CRTC_V_SYNC_POL | ||
176 | : 0))); | ||
177 | |||
178 | fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | | ||
179 | (((native_mode->panel_xres / 8) & 0x1ff) << 16)); | ||
180 | } | ||
181 | } else { | ||
182 | fp_horz_stretch |= ((xres/8-1) << 16); | ||
183 | fp_vert_stretch |= ((yres-1) << 12); | ||
184 | } | ||
185 | |||
186 | WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); | ||
187 | WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); | ||
188 | WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); | ||
189 | WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); | ||
190 | WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); | ||
191 | WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); | ||
192 | WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); | ||
193 | WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); | ||
194 | |||
195 | } | ||
196 | |||
197 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) | 33 | static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) |
198 | { | 34 | { |
199 | struct drm_device *dev = encoder->dev; | 35 | struct drm_device *dev = encoder->dev; |
@@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
287 | 123 | ||
288 | DRM_DEBUG("\n"); | 124 | DRM_DEBUG("\n"); |
289 | 125 | ||
290 | if (radeon_crtc->crtc_id == 0) | ||
291 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
292 | |||
293 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); | 126 | lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); |
294 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; | 127 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
295 | 128 | ||
@@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
318 | 151 | ||
319 | if (radeon_crtc->crtc_id == 0) { | 152 | if (radeon_crtc->crtc_id == 0) { |
320 | if (ASIC_IS_R300(rdev)) { | 153 | if (ASIC_IS_R300(rdev)) { |
321 | if (radeon_encoder->flags & RADEON_USE_RMX) | 154 | if (radeon_encoder->rmx_type != RMX_OFF) |
322 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; | 155 | lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; |
323 | } else | 156 | } else |
324 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; | 157 | lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; |
@@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
350 | 183 | ||
351 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 184 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
352 | 185 | ||
353 | radeon_encoder->flags &= ~RADEON_USE_RMX; | ||
354 | |||
355 | if (radeon_encoder->rmx_type != RMX_OFF) | 186 | if (radeon_encoder->rmx_type != RMX_OFF) |
356 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | 187 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); |
357 | 188 | ||
@@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, | |||
455 | 286 | ||
456 | DRM_DEBUG("\n"); | 287 | DRM_DEBUG("\n"); |
457 | 288 | ||
458 | if (radeon_crtc->crtc_id == 0) | ||
459 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
460 | |||
461 | if (radeon_crtc->crtc_id == 0) { | 289 | if (radeon_crtc->crtc_id == 0) { |
462 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { | 290 | if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { |
463 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & | 291 | disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & |
@@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
653 | 481 | ||
654 | DRM_DEBUG("\n"); | 482 | DRM_DEBUG("\n"); |
655 | 483 | ||
656 | if (radeon_crtc->crtc_id == 0) | ||
657 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
658 | |||
659 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); | 484 | tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); |
660 | tmp &= 0xfffff; | 485 | tmp &= 0xfffff; |
661 | if (rdev->family == CHIP_RV280) { | 486 | if (rdev->family == CHIP_RV280) { |
@@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
711 | if (radeon_crtc->crtc_id == 0) { | 536 | if (radeon_crtc->crtc_id == 0) { |
712 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { | 537 | if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
713 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; | 538 | fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
714 | if (radeon_encoder->flags & RADEON_USE_RMX) | 539 | if (radeon_encoder->rmx_type != RMX_OFF) |
715 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; | 540 | fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
716 | else | 541 | else |
717 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; | 542 | fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
@@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
820 | 645 | ||
821 | DRM_DEBUG("\n"); | 646 | DRM_DEBUG("\n"); |
822 | 647 | ||
823 | if (radeon_crtc->crtc_id == 0) | ||
824 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
825 | |||
826 | if (rdev->is_atom_bios) { | 648 | if (rdev->is_atom_bios) { |
827 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 649 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
828 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); | 650 | atombios_external_tmds_setup(encoder, ATOM_ENABLE); |
@@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
856 | if (radeon_crtc->crtc_id == 0) { | 678 | if (radeon_crtc->crtc_id == 0) { |
857 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { | 679 | if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { |
858 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; | 680 | fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; |
859 | if (radeon_encoder->flags & RADEON_USE_RMX) | 681 | if (radeon_encoder->rmx_type != RMX_OFF) |
860 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; | 682 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; |
861 | else | 683 | else |
862 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; | 684 | fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; |
@@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, | |||
1014 | 836 | ||
1015 | DRM_DEBUG("\n"); | 837 | DRM_DEBUG("\n"); |
1016 | 838 | ||
1017 | if (radeon_crtc->crtc_id == 0) | ||
1018 | radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); | ||
1019 | |||
1020 | if (rdev->family != CHIP_R200) { | 839 | if (rdev->family != CHIP_R200) { |
1021 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); | 840 | tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
1022 | if (rdev->family == CHIP_R420 || | 841 | if (rdev->family == CHIP_R420 || |
@@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1243 | 1062 | ||
1244 | radeon_encoder->encoder_id = encoder_id; | 1063 | radeon_encoder->encoder_id = encoder_id; |
1245 | radeon_encoder->devices = supported_device; | 1064 | radeon_encoder->devices = supported_device; |
1065 | radeon_encoder->rmx_type = RMX_OFF; | ||
1246 | 1066 | ||
1247 | switch (radeon_encoder->encoder_id) { | 1067 | switch (radeon_encoder->encoder_id) { |
1248 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | 1068 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9173b687462b..3b09a1f2d8f9 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -36,6 +36,9 @@ | |||
36 | #include <linux/i2c.h> | 36 | #include <linux/i2c.h> |
37 | #include <linux/i2c-id.h> | 37 | #include <linux/i2c-id.h> |
38 | #include <linux/i2c-algo-bit.h> | 38 | #include <linux/i2c-algo-bit.h> |
39 | #include "radeon_fixed.h" | ||
40 | |||
41 | struct radeon_device; | ||
39 | 42 | ||
40 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) | 43 | #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) |
41 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) | 44 | #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) |
@@ -124,6 +127,7 @@ struct radeon_tmds_pll { | |||
124 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) | 127 | #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) |
125 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) | 128 | #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
126 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) | 129 | #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
130 | #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) | ||
127 | 131 | ||
128 | struct radeon_pll { | 132 | struct radeon_pll { |
129 | uint16_t reference_freq; | 133 | uint16_t reference_freq; |
@@ -170,6 +174,18 @@ struct radeon_mode_info { | |||
170 | struct atom_context *atom_context; | 174 | struct atom_context *atom_context; |
171 | enum radeon_connector_table connector_table; | 175 | enum radeon_connector_table connector_table; |
172 | bool mode_config_initialized; | 176 | bool mode_config_initialized; |
177 | struct radeon_crtc *crtcs[2]; | ||
178 | }; | ||
179 | |||
180 | struct radeon_native_mode { | ||
181 | /* preferred mode */ | ||
182 | uint32_t panel_xres, panel_yres; | ||
183 | uint32_t hoverplus, hsync_width; | ||
184 | uint32_t hblank; | ||
185 | uint32_t voverplus, vsync_width; | ||
186 | uint32_t vblank; | ||
187 | uint32_t dotclock; | ||
188 | uint32_t flags; | ||
173 | }; | 189 | }; |
174 | 190 | ||
175 | struct radeon_crtc { | 191 | struct radeon_crtc { |
@@ -185,19 +201,13 @@ struct radeon_crtc { | |||
185 | uint64_t cursor_addr; | 201 | uint64_t cursor_addr; |
186 | int cursor_width; | 202 | int cursor_width; |
187 | int cursor_height; | 203 | int cursor_height; |
188 | }; | 204 | uint32_t legacy_display_base_addr; |
189 | 205 | uint32_t legacy_cursor_offset; | |
190 | #define RADEON_USE_RMX 1 | 206 | enum radeon_rmx_type rmx_type; |
191 | 207 | uint32_t devices; | |
192 | struct radeon_native_mode { | 208 | fixed20_12 vsc; |
193 | /* preferred mode */ | 209 | fixed20_12 hsc; |
194 | uint32_t panel_xres, panel_yres; | 210 | struct radeon_native_mode native_mode; |
195 | uint32_t hoverplus, hsync_width; | ||
196 | uint32_t hblank; | ||
197 | uint32_t voverplus, vsync_width; | ||
198 | uint32_t vblank; | ||
199 | uint32_t dotclock; | ||
200 | uint32_t flags; | ||
201 | }; | 211 | }; |
202 | 212 | ||
203 | struct radeon_encoder_primary_dac { | 213 | struct radeon_encoder_primary_dac { |
@@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder); | |||
383 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 393 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
384 | void radeon_combios_asic_init(struct drm_device *dev); | 394 | void radeon_combios_asic_init(struct drm_device *dev); |
385 | extern int radeon_static_clocks_init(struct drm_device *dev); | 395 | extern int radeon_static_clocks_init(struct drm_device *dev); |
386 | void radeon_init_disp_bw_legacy(struct drm_device *dev, | 396 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
387 | struct drm_display_mode *mode1, | 397 | struct drm_display_mode *mode, |
388 | uint32_t pixel_bytes1, | 398 | struct drm_display_mode *adjusted_mode); |
389 | struct drm_display_mode *mode2, | 399 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev); |
390 | uint32_t pixel_bytes2); | ||
391 | void radeon_init_disp_bw_avivo(struct drm_device *dev, | ||
392 | struct drm_display_mode *mode1, | ||
393 | uint32_t pixel_bytes1, | ||
394 | struct drm_display_mode *mode2, | ||
395 | uint32_t pixel_bytes2); | ||
396 | void radeon_init_disp_bandwidth(struct drm_device *dev); | ||
397 | 400 | ||
398 | #endif | 401 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bac0d06c52ac..dd9ac2fed6d6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -44,6 +44,9 @@ struct radeon_object { | |||
44 | uint64_t gpu_addr; | 44 | uint64_t gpu_addr; |
45 | void *kptr; | 45 | void *kptr; |
46 | bool is_iomem; | 46 | bool is_iomem; |
47 | uint32_t tiling_flags; | ||
48 | uint32_t pitch; | ||
49 | int surface_reg; | ||
47 | }; | 50 | }; |
48 | 51 | ||
49 | int radeon_ttm_init(struct radeon_device *rdev); | 52 | int radeon_ttm_init(struct radeon_device *rdev); |
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | |||
70 | 73 | ||
71 | robj = container_of(tobj, struct radeon_object, tobj); | 74 | robj = container_of(tobj, struct radeon_object, tobj); |
72 | list_del_init(&robj->list); | 75 | list_del_init(&robj->list); |
76 | radeon_object_clear_surface_reg(robj); | ||
73 | kfree(robj); | 77 | kfree(robj); |
74 | } | 78 | } |
75 | 79 | ||
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
99 | { | 103 | { |
100 | uint32_t flags = 0; | 104 | uint32_t flags = 0; |
101 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
102 | flags |= TTM_PL_FLAG_VRAM; | 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
103 | } | 107 | } |
104 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 108 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
105 | flags |= TTM_PL_FLAG_TT; | 109 | flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
106 | } | 110 | } |
107 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 111 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
108 | flags |= TTM_PL_FLAG_SYSTEM; | 112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
109 | } | 113 | } |
110 | if (!flags) { | 114 | if (!flags) { |
111 | flags |= TTM_PL_FLAG_SYSTEM; | 115 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
112 | } | 116 | } |
113 | return flags; | 117 | return flags; |
114 | } | 118 | } |
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, | |||
141 | } | 145 | } |
142 | robj->rdev = rdev; | 146 | robj->rdev = rdev; |
143 | robj->gobj = gobj; | 147 | robj->gobj = gobj; |
148 | robj->surface_reg = -1; | ||
144 | INIT_LIST_HEAD(&robj->list); | 149 | INIT_LIST_HEAD(&robj->list); |
145 | 150 | ||
146 | flags = radeon_object_flags_from_domain(domain); | 151 | flags = radeon_object_flags_from_domain(domain); |
@@ -304,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) | |||
304 | } | 309 | } |
305 | spin_lock(&robj->tobj.lock); | 310 | spin_lock(&robj->tobj.lock); |
306 | if (robj->tobj.sync_obj) { | 311 | if (robj->tobj.sync_obj) { |
307 | r = ttm_bo_wait(&robj->tobj, true, false, false); | 312 | r = ttm_bo_wait(&robj->tobj, true, true, false); |
308 | } | 313 | } |
309 | spin_unlock(&robj->tobj.lock); | 314 | spin_unlock(&robj->tobj.lock); |
310 | radeon_object_unreserve(robj); | 315 | radeon_object_unreserve(robj); |
@@ -403,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
403 | struct radeon_object *robj; | 408 | struct radeon_object *robj; |
404 | struct radeon_fence *old_fence = NULL; | 409 | struct radeon_fence *old_fence = NULL; |
405 | struct list_head *i; | 410 | struct list_head *i; |
406 | uint32_t flags; | ||
407 | int r; | 411 | int r; |
408 | 412 | ||
409 | r = radeon_object_list_reserve(head); | 413 | r = radeon_object_list_reserve(head); |
@@ -414,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
414 | list_for_each(i, head) { | 418 | list_for_each(i, head) { |
415 | lobj = list_entry(i, struct radeon_object_list, list); | 419 | lobj = list_entry(i, struct radeon_object_list, list); |
416 | robj = lobj->robj; | 420 | robj = lobj->robj; |
417 | if (lobj->wdomain) { | ||
418 | flags = radeon_object_flags_from_domain(lobj->wdomain); | ||
419 | flags |= TTM_PL_FLAG_TT; | ||
420 | } else { | ||
421 | flags = radeon_object_flags_from_domain(lobj->rdomain); | ||
422 | flags |= TTM_PL_FLAG_TT; | ||
423 | flags |= TTM_PL_FLAG_VRAM; | ||
424 | } | ||
425 | if (!robj->pin_count) { | 421 | if (!robj->pin_count) { |
426 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; | 422 | if (lobj->wdomain) { |
423 | robj->tobj.proposed_placement = | ||
424 | radeon_object_flags_from_domain(lobj->wdomain); | ||
425 | } else { | ||
426 | robj->tobj.proposed_placement = | ||
427 | radeon_object_flags_from_domain(lobj->rdomain); | ||
428 | } | ||
427 | r = ttm_buffer_object_validate(&robj->tobj, | 429 | r = ttm_buffer_object_validate(&robj->tobj, |
428 | robj->tobj.proposed_placement, | 430 | robj->tobj.proposed_placement, |
429 | true, false); | 431 | true, false); |
430 | if (unlikely(r)) { | 432 | if (unlikely(r)) { |
431 | radeon_object_list_unreserve(head); | ||
432 | DRM_ERROR("radeon: failed to validate.\n"); | 433 | DRM_ERROR("radeon: failed to validate.\n"); |
433 | return r; | 434 | return r; |
434 | } | 435 | } |
435 | radeon_object_gpu_addr(robj); | 436 | radeon_object_gpu_addr(robj); |
436 | } | 437 | } |
437 | lobj->gpu_offset = robj->gpu_addr; | 438 | lobj->gpu_offset = robj->gpu_addr; |
439 | lobj->tiling_flags = robj->tiling_flags; | ||
438 | if (fence) { | 440 | if (fence) { |
439 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 441 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
440 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 442 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
@@ -479,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) | |||
479 | { | 481 | { |
480 | return robj->tobj.num_pages << PAGE_SHIFT; | 482 | return robj->tobj.num_pages << PAGE_SHIFT; |
481 | } | 483 | } |
484 | |||
485 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
486 | { | ||
487 | struct radeon_device *rdev = robj->rdev; | ||
488 | struct radeon_surface_reg *reg; | ||
489 | struct radeon_object *old_object; | ||
490 | int steal; | ||
491 | int i; | ||
492 | |||
493 | if (!robj->tiling_flags) | ||
494 | return 0; | ||
495 | |||
496 | if (robj->surface_reg >= 0) { | ||
497 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
498 | i = robj->surface_reg; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | steal = -1; | ||
503 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | ||
504 | |||
505 | reg = &rdev->surface_regs[i]; | ||
506 | if (!reg->robj) | ||
507 | break; | ||
508 | |||
509 | old_object = reg->robj; | ||
510 | if (old_object->pin_count == 0) | ||
511 | steal = i; | ||
512 | } | ||
513 | |||
514 | /* if we are all out */ | ||
515 | if (i == RADEON_GEM_MAX_SURFACES) { | ||
516 | if (steal == -1) | ||
517 | return -ENOMEM; | ||
518 | /* find someone with a surface reg and nuke their BO */ | ||
519 | reg = &rdev->surface_regs[steal]; | ||
520 | old_object = reg->robj; | ||
521 | /* blow away the mapping */ | ||
522 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | ||
523 | ttm_bo_unmap_virtual(&old_object->tobj); | ||
524 | old_object->surface_reg = -1; | ||
525 | i = steal; | ||
526 | } | ||
527 | |||
528 | robj->surface_reg = i; | ||
529 | reg->robj = robj; | ||
530 | |||
531 | out: | ||
532 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | ||
533 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | ||
534 | robj->tobj.num_pages << PAGE_SHIFT); | ||
535 | return 0; | ||
536 | } | ||
537 | |||
538 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | ||
539 | { | ||
540 | struct radeon_device *rdev = robj->rdev; | ||
541 | struct radeon_surface_reg *reg; | ||
542 | |||
543 | if (robj->surface_reg == -1) | ||
544 | return; | ||
545 | |||
546 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
547 | radeon_clear_surface_reg(rdev, robj->surface_reg); | ||
548 | |||
549 | reg->robj = NULL; | ||
550 | robj->surface_reg = -1; | ||
551 | } | ||
552 | |||
553 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
554 | uint32_t tiling_flags, uint32_t pitch) | ||
555 | { | ||
556 | robj->tiling_flags = tiling_flags; | ||
557 | robj->pitch = pitch; | ||
558 | } | ||
559 | |||
560 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | ||
561 | uint32_t *tiling_flags, | ||
562 | uint32_t *pitch) | ||
563 | { | ||
564 | if (tiling_flags) | ||
565 | *tiling_flags = robj->tiling_flags; | ||
566 | if (pitch) | ||
567 | *pitch = robj->pitch; | ||
568 | } | ||
569 | |||
570 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
571 | bool force_drop) | ||
572 | { | ||
573 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | ||
574 | return 0; | ||
575 | |||
576 | if (force_drop) { | ||
577 | radeon_object_clear_surface_reg(robj); | ||
578 | return 0; | ||
579 | } | ||
580 | |||
581 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | ||
582 | if (!has_moved) | ||
583 | return 0; | ||
584 | |||
585 | if (robj->surface_reg >= 0) | ||
586 | radeon_object_clear_surface_reg(robj); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | if ((robj->surface_reg >= 0) && !has_moved) | ||
591 | return 0; | ||
592 | |||
593 | return radeon_object_get_surface_reg(robj); | ||
594 | } | ||
595 | |||
596 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
597 | struct ttm_mem_reg *mem) | ||
598 | { | ||
599 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
600 | radeon_object_check_tiling(robj, 0, 1); | ||
601 | } | ||
602 | |||
603 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | ||
604 | { | ||
605 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
606 | radeon_object_check_tiling(robj, 0, 0); | ||
607 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a853261d1881..60d159308b88 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) | |||
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | static void radeon_ib_cpu_flush(struct radeon_device *rdev, | ||
130 | struct radeon_ib *ib) | ||
131 | { | ||
132 | unsigned long tmp; | ||
133 | unsigned i; | ||
134 | |||
135 | /* To force CPU cache flush ugly but seems reliable */ | ||
136 | for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { | ||
137 | tmp = readl(&ib->ptr[i]); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 129 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
142 | { | 130 | { |
143 | int r = 0; | 131 | int r = 0; |
144 | 132 | ||
145 | mutex_lock(&rdev->ib_pool.mutex); | 133 | mutex_lock(&rdev->ib_pool.mutex); |
146 | radeon_ib_align(rdev, ib); | 134 | radeon_ib_align(rdev, ib); |
147 | radeon_ib_cpu_flush(rdev, ib); | ||
148 | if (!ib->length_dw || !rdev->cp.ready) { | 135 | if (!ib->length_dw || !rdev->cp.ready) { |
149 | /* TODO: Nothings in the ib we should report. */ | 136 | /* TODO: Nothings in the ib we should report. */ |
150 | mutex_unlock(&rdev->ib_pool.mutex); | 137 | mutex_unlock(&rdev->ib_pool.mutex); |
151 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); | 138 | DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
152 | return -EINVAL; | 139 | return -EINVAL; |
153 | } | 140 | } |
154 | /* 64 dwords should be enought for fence too */ | 141 | /* 64 dwords should be enough for fence too */ |
155 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
156 | if (r) { | 143 | if (r) { |
157 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); | 144 | DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h new file mode 100644 index 000000000000..63a773578f17 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_share.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef __RADEON_SHARE_H__ | ||
29 | #define __RADEON_SHARE_H__ | ||
30 | |||
31 | void r100_vram_init_sizes(struct radeon_device *rdev); | ||
32 | |||
33 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
34 | struct drm_display_mode *mode1, | ||
35 | struct drm_display_mode *mode2); | ||
36 | |||
37 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c new file mode 100644 index 000000000000..03c33cf4e14c --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Copyright 2009 VMware, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Michel Dänzer | ||
23 | */ | ||
24 | #include <drm/drmP.h> | ||
25 | #include <drm/radeon_drm.h> | ||
26 | #include "radeon_reg.h" | ||
27 | #include "radeon.h" | ||
28 | |||
29 | |||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | ||
31 | void radeon_test_moves(struct radeon_device *rdev) | ||
32 | { | ||
33 | struct radeon_object *vram_obj = NULL; | ||
34 | struct radeon_object **gtt_obj = NULL; | ||
35 | struct radeon_fence *fence = NULL; | ||
36 | uint64_t gtt_addr, vram_addr; | ||
37 | unsigned i, n, size; | ||
38 | int r; | ||
39 | |||
40 | size = 1024 * 1024; | ||
41 | |||
42 | /* Number of tests = | ||
43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | ||
44 | */ | ||
45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - | ||
46 | rdev->cp.ring_size) / size; | ||
47 | |||
48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
49 | if (!gtt_obj) { | ||
50 | DRM_ERROR("Failed to allocate %d pointers\n", n); | ||
51 | r = 1; | ||
52 | goto out_cleanup; | ||
53 | } | ||
54 | |||
55 | r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | ||
56 | false, &vram_obj); | ||
57 | if (r) { | ||
58 | DRM_ERROR("Failed to create VRAM object\n"); | ||
59 | goto out_cleanup; | ||
60 | } | ||
61 | |||
62 | r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | ||
63 | if (r) { | ||
64 | DRM_ERROR("Failed to pin VRAM object\n"); | ||
65 | goto out_cleanup; | ||
66 | } | ||
67 | |||
68 | for (i = 0; i < n; i++) { | ||
69 | void *gtt_map, *vram_map; | ||
70 | void **gtt_start, **gtt_end; | ||
71 | void **vram_start, **vram_end; | ||
72 | |||
73 | r = radeon_object_create(rdev, NULL, size, true, | ||
74 | RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); | ||
75 | if (r) { | ||
76 | DRM_ERROR("Failed to create GTT object %d\n", i); | ||
77 | goto out_cleanup; | ||
78 | } | ||
79 | |||
80 | r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | ||
81 | if (r) { | ||
82 | DRM_ERROR("Failed to pin GTT object %d\n", i); | ||
83 | goto out_cleanup; | ||
84 | } | ||
85 | |||
86 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
87 | if (r) { | ||
88 | DRM_ERROR("Failed to map GTT object %d\n", i); | ||
89 | goto out_cleanup; | ||
90 | } | ||
91 | |||
92 | for (gtt_start = gtt_map, gtt_end = gtt_map + size; | ||
93 | gtt_start < gtt_end; | ||
94 | gtt_start++) | ||
95 | *gtt_start = gtt_start; | ||
96 | |||
97 | radeon_object_kunmap(gtt_obj[i]); | ||
98 | |||
99 | r = radeon_fence_create(rdev, &fence); | ||
100 | if (r) { | ||
101 | DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); | ||
102 | goto out_cleanup; | ||
103 | } | ||
104 | |||
105 | r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); | ||
106 | if (r) { | ||
107 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | ||
108 | goto out_cleanup; | ||
109 | } | ||
110 | |||
111 | r = radeon_fence_wait(fence, false); | ||
112 | if (r) { | ||
113 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); | ||
114 | goto out_cleanup; | ||
115 | } | ||
116 | |||
117 | radeon_fence_unref(&fence); | ||
118 | |||
119 | r = radeon_object_kmap(vram_obj, &vram_map); | ||
120 | if (r) { | ||
121 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | ||
122 | goto out_cleanup; | ||
123 | } | ||
124 | |||
125 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
126 | vram_start = vram_map, vram_end = vram_map + size; | ||
127 | vram_start < vram_end; | ||
128 | gtt_start++, vram_start++) { | ||
129 | if (*vram_start != gtt_start) { | ||
130 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " | ||
131 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | ||
132 | i, *vram_start, gtt_start, gtt_map, | ||
133 | gtt_end); | ||
134 | radeon_object_kunmap(vram_obj); | ||
135 | goto out_cleanup; | ||
136 | } | ||
137 | *vram_start = vram_start; | ||
138 | } | ||
139 | |||
140 | radeon_object_kunmap(vram_obj); | ||
141 | |||
142 | r = radeon_fence_create(rdev, &fence); | ||
143 | if (r) { | ||
144 | DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); | ||
145 | goto out_cleanup; | ||
146 | } | ||
147 | |||
148 | r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); | ||
149 | if (r) { | ||
150 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | ||
151 | goto out_cleanup; | ||
152 | } | ||
153 | |||
154 | r = radeon_fence_wait(fence, false); | ||
155 | if (r) { | ||
156 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); | ||
157 | goto out_cleanup; | ||
158 | } | ||
159 | |||
160 | radeon_fence_unref(&fence); | ||
161 | |||
162 | r = radeon_object_kmap(gtt_obj[i], >t_map); | ||
163 | if (r) { | ||
164 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | ||
165 | goto out_cleanup; | ||
166 | } | ||
167 | |||
168 | for (gtt_start = gtt_map, gtt_end = gtt_map + size, | ||
169 | vram_start = vram_map, vram_end = vram_map + size; | ||
170 | gtt_start < gtt_end; | ||
171 | gtt_start++, vram_start++) { | ||
172 | if (*gtt_start != vram_start) { | ||
173 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " | ||
174 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | ||
175 | i, *gtt_start, vram_start, vram_map, | ||
176 | vram_end); | ||
177 | radeon_object_kunmap(gtt_obj[i]); | ||
178 | goto out_cleanup; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | radeon_object_kunmap(gtt_obj[i]); | ||
183 | |||
184 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | ||
185 | gtt_addr - rdev->mc.gtt_location); | ||
186 | } | ||
187 | |||
188 | out_cleanup: | ||
189 | if (vram_obj) { | ||
190 | radeon_object_unpin(vram_obj); | ||
191 | radeon_object_unref(&vram_obj); | ||
192 | } | ||
193 | if (gtt_obj) { | ||
194 | for (i = 0; i < n; i++) { | ||
195 | if (gtt_obj[i]) { | ||
196 | radeon_object_unpin(gtt_obj[i]); | ||
197 | radeon_object_unref(>t_obj[i]); | ||
198 | } | ||
199 | } | ||
200 | kfree(gtt_obj); | ||
201 | } | ||
202 | if (fence) { | ||
203 | radeon_fence_unref(&fence); | ||
204 | } | ||
205 | if (r) { | ||
206 | printk(KERN_WARNING "Error while testing BO move.\n"); | ||
207 | } | ||
208 | } | ||
209 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f5169..15c3531377ed 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
355 | if (!rdev->cp.ready) { | 355 | if (!rdev->cp.ready) { |
356 | /* use memcpy */ | 356 | /* use memcpy */ |
357 | DRM_ERROR("CP is not ready use memcpy.\n"); | 357 | DRM_ERROR("CP is not ready use memcpy.\n"); |
358 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 358 | goto memcpy; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (old_mem->mem_type == TTM_PL_VRAM && | 361 | if (old_mem->mem_type == TTM_PL_VRAM && |
362 | new_mem->mem_type == TTM_PL_SYSTEM) { | 362 | new_mem->mem_type == TTM_PL_SYSTEM) { |
363 | return radeon_move_vram_ram(bo, evict, interruptible, | 363 | r = radeon_move_vram_ram(bo, evict, interruptible, |
364 | no_wait, new_mem); | 364 | no_wait, new_mem); |
365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | 365 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
366 | new_mem->mem_type == TTM_PL_VRAM) { | 366 | new_mem->mem_type == TTM_PL_VRAM) { |
367 | return radeon_move_ram_vram(bo, evict, interruptible, | 367 | r = radeon_move_ram_vram(bo, evict, interruptible, |
368 | no_wait, new_mem); | 368 | no_wait, new_mem); |
369 | } else { | 369 | } else { |
370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | 370 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); |
371 | if (unlikely(r)) { | ||
372 | return r; | ||
373 | } | ||
374 | } | 371 | } |
372 | |||
373 | if (r) { | ||
374 | memcpy: | ||
375 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | ||
376 | } | ||
377 | |||
375 | return r; | 378 | return r; |
376 | } | 379 | } |
377 | 380 | ||
@@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
429 | .sync_obj_flush = &radeon_sync_obj_flush, | 432 | .sync_obj_flush = &radeon_sync_obj_flush, |
430 | .sync_obj_unref = &radeon_sync_obj_unref, | 433 | .sync_obj_unref = &radeon_sync_obj_unref, |
431 | .sync_obj_ref = &radeon_sync_obj_ref, | 434 | .sync_obj_ref = &radeon_sync_obj_ref, |
435 | .move_notify = &radeon_bo_move_notify, | ||
436 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | ||
432 | }; | 437 | }; |
433 | 438 | ||
434 | int radeon_ttm_init(struct radeon_device *rdev) | 439 | int radeon_ttm_init(struct radeon_device *rdev) |
@@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
442 | /* No others user of address space so set it to 0 */ | 447 | /* No others user of address space so set it to 0 */ |
443 | r = ttm_bo_device_init(&rdev->mman.bdev, | 448 | r = ttm_bo_device_init(&rdev->mman.bdev, |
444 | rdev->mman.mem_global_ref.object, | 449 | rdev->mman.mem_global_ref.object, |
445 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); | 450 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
451 | rdev->need_dma32); | ||
446 | if (r) { | 452 | if (r) { |
447 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 453 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
448 | return r; | 454 | return r; |
449 | } | 455 | } |
450 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | 456 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, |
451 | ((rdev->mc.aper_size) >> PAGE_SHIFT)); | 457 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); |
452 | if (r) { | 458 | if (r) { |
453 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 459 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
454 | return r; | 460 | return r; |
@@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
465 | return r; | 471 | return r; |
466 | } | 472 | } |
467 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 473 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
468 | rdev->mc.vram_size / (1024 * 1024)); | 474 | rdev->mc.real_vram_size / (1024 * 1024)); |
469 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | 475 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, |
470 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | 476 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
471 | if (r) { | 477 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index cc074b5a8f74..b29affd9c5d8 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rs400,rs480 depends on : */ | 34 | /* rs400,rs480 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev) | |||
164 | WREG32(RADEON_BUS_CNTL, tmp); | 165 | WREG32(RADEON_BUS_CNTL, tmp); |
165 | } | 166 | } |
166 | /* Table should be in 32bits address space so ignore bits above. */ | 167 | /* Table should be in 32bits address space so ignore bits above. */ |
167 | tmp = rdev->gart.table_addr & 0xfffff000; | 168 | tmp = (u32)rdev->gart.table_addr & 0xfffff000; |
169 | tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; | ||
170 | |||
168 | WREG32_MC(RS480_GART_BASE, tmp); | 171 | WREG32_MC(RS480_GART_BASE, tmp); |
169 | /* TODO: more tweaking here */ | 172 | /* TODO: more tweaking here */ |
170 | WREG32_MC(RS480_GART_FEATURE_ID, | 173 | WREG32_MC(RS480_GART_FEATURE_ID, |
@@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev) | |||
201 | 204 | ||
202 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 205 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
203 | { | 206 | { |
207 | uint32_t entry; | ||
208 | |||
204 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 209 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
205 | return -EINVAL; | 210 | return -EINVAL; |
206 | } | 211 | } |
207 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); | 212 | |
213 | entry = (lower_32_bits(addr) & PAGE_MASK) | | ||
214 | ((upper_32_bits(addr) & 0xff) << 4) | | ||
215 | 0xc; | ||
216 | entry = cpu_to_le32(entry); | ||
217 | rdev->gart.table.ram.ptr[i] = entry; | ||
208 | return 0; | 218 | return 0; |
209 | } | 219 | } |
210 | 220 | ||
@@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
223 | 233 | ||
224 | rs400_gpu_init(rdev); | 234 | rs400_gpu_init(rdev); |
225 | rs400_gart_disable(rdev); | 235 | rs400_gart_disable(rdev); |
226 | rdev->mc.gtt_location = rdev->mc.vram_size; | 236 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
227 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 237 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
228 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 238 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
229 | rdev->mc.vram_location = 0xFFFFFFFFUL; | ||
230 | r = radeon_mc_setup(rdev); | 239 | r = radeon_mc_setup(rdev); |
231 | if (r) { | 240 | if (r) { |
232 | return r; | 241 | return r; |
@@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev) | |||
238 | "programming pipes. Bad things might happen.\n"); | 247 | "programming pipes. Bad things might happen.\n"); |
239 | } | 248 | } |
240 | 249 | ||
241 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 250 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
242 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); | 251 | tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
243 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); | 252 | tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
244 | WREG32(RADEON_MC_FB_LOCATION, tmp); | 253 | WREG32(RADEON_MC_FB_LOCATION, tmp); |
@@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev) | |||
284 | */ | 293 | */ |
285 | void rs400_vram_info(struct radeon_device *rdev) | 294 | void rs400_vram_info(struct radeon_device *rdev) |
286 | { | 295 | { |
287 | uint32_t tom; | ||
288 | |||
289 | rs400_gart_adjust_size(rdev); | 296 | rs400_gart_adjust_size(rdev); |
290 | /* DDR for all card after R300 & IGP */ | 297 | /* DDR for all card after R300 & IGP */ |
291 | rdev->mc.vram_is_ddr = true; | 298 | rdev->mc.vram_is_ddr = true; |
292 | rdev->mc.vram_width = 128; | 299 | rdev->mc.vram_width = 128; |
293 | 300 | ||
294 | /* read NB_TOM to get the amount of ram stolen for the GPU */ | 301 | r100_vram_init_sizes(rdev); |
295 | tom = RREG32(RADEON_NB_TOM); | ||
296 | rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); | ||
297 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | ||
298 | |||
299 | /* Could aper size report 0 ? */ | ||
300 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
301 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
302 | } | 302 | } |
303 | 303 | ||
304 | 304 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index ab0c967553e6..bbea6dee4a94 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev) | |||
223 | printk(KERN_WARNING "Failed to wait MC idle while " | 223 | printk(KERN_WARNING "Failed to wait MC idle while " |
224 | "programming pipes. Bad things might happen.\n"); | 224 | "programming pipes. Bad things might happen.\n"); |
225 | } | 225 | } |
226 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 226 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); | 227 | tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); |
228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); | 228 | tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); |
229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); | 229 | WREG32_MC(RS600_MC_FB_LOCATION, tmp); |
@@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev) | |||
301 | rdev->mc.vram_width = 128; | 301 | rdev->mc.vram_width = 128; |
302 | } | 302 | } |
303 | 303 | ||
304 | void rs600_bandwidth_update(struct radeon_device *rdev) | ||
305 | { | ||
306 | /* FIXME: implement, should this be like rs690 ? */ | ||
307 | } | ||
308 | |||
304 | 309 | ||
305 | /* | 310 | /* |
306 | * Indirect registers accessor | 311 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 79ba85042b5f..839595b00728 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -28,6 +28,9 @@ | |||
28 | #include "drmP.h" | 28 | #include "drmP.h" |
29 | #include "radeon_reg.h" | 29 | #include "radeon_reg.h" |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "rs690r.h" | ||
32 | #include "atom.h" | ||
33 | #include "atom-bits.h" | ||
31 | 34 | ||
32 | /* rs690,rs740 depends on : */ | 35 | /* rs690,rs740 depends on : */ |
33 | void r100_hdp_reset(struct radeon_device *rdev); | 36 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
64 | rs400_gart_disable(rdev); | 67 | rs400_gart_disable(rdev); |
65 | 68 | ||
66 | /* Setup GPU memory space */ | 69 | /* Setup GPU memory space */ |
67 | rdev->mc.gtt_location = rdev->mc.vram_size; | 70 | rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
68 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); | 71 | rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
69 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); | 72 | rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
70 | rdev->mc.vram_location = 0xFFFFFFFFUL; | 73 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
@@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev) | |||
79 | printk(KERN_WARNING "Failed to wait MC idle while " | 82 | printk(KERN_WARNING "Failed to wait MC idle while " |
80 | "programming pipes. Bad things might happen.\n"); | 83 | "programming pipes. Bad things might happen.\n"); |
81 | } | 84 | } |
82 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 85 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
83 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); | 86 | tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); |
84 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); | 87 | tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); |
85 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); | 88 | WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); |
@@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev) | |||
138 | /* | 141 | /* |
139 | * VRAM info. | 142 | * VRAM info. |
140 | */ | 143 | */ |
144 | void rs690_pm_info(struct radeon_device *rdev) | ||
145 | { | ||
146 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
147 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | ||
148 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | ||
149 | void *ptr; | ||
150 | uint16_t data_offset; | ||
151 | uint8_t frev, crev; | ||
152 | fixed20_12 tmp; | ||
153 | |||
154 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | ||
155 | &frev, &crev, &data_offset); | ||
156 | ptr = rdev->mode_info.atom_context->bios + data_offset; | ||
157 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | ||
158 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | ||
159 | /* Get various system informations from bios */ | ||
160 | switch (crev) { | ||
161 | case 1: | ||
162 | tmp.full = rfixed_const(100); | ||
163 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | ||
164 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
165 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | ||
166 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | ||
167 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | ||
168 | break; | ||
169 | case 2: | ||
170 | tmp.full = rfixed_const(100); | ||
171 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | ||
172 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | ||
173 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | ||
174 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
175 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | ||
176 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | ||
177 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | ||
178 | break; | ||
179 | default: | ||
180 | tmp.full = rfixed_const(100); | ||
181 | /* We assume the slower possible clock ie worst case */ | ||
182 | /* DDR 333Mhz */ | ||
183 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | ||
184 | /* FIXME: system clock ? */ | ||
185 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | ||
186 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | ||
187 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | ||
188 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | ||
189 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | ||
190 | break; | ||
191 | } | ||
192 | /* Compute various bandwidth */ | ||
193 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | ||
194 | tmp.full = rfixed_const(4); | ||
195 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | ||
196 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | ||
197 | * = ht_clk * ht_width / 5 | ||
198 | */ | ||
199 | tmp.full = rfixed_const(5); | ||
200 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | ||
201 | rdev->pm.igp_ht_link_width); | ||
202 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | ||
203 | if (tmp.full < rdev->pm.max_bandwidth.full) { | ||
204 | /* HT link is a limiting factor */ | ||
205 | rdev->pm.max_bandwidth.full = tmp.full; | ||
206 | } | ||
207 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | ||
208 | * = (sideport_clk * 14) / 10 | ||
209 | */ | ||
210 | tmp.full = rfixed_const(14); | ||
211 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | ||
212 | tmp.full = rfixed_const(10); | ||
213 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | ||
214 | } | ||
215 | |||
141 | void rs690_vram_info(struct radeon_device *rdev) | 216 | void rs690_vram_info(struct radeon_device *rdev) |
142 | { | 217 | { |
143 | uint32_t tmp; | 218 | uint32_t tmp; |
219 | fixed20_12 a; | ||
144 | 220 | ||
145 | rs400_gart_adjust_size(rdev); | 221 | rs400_gart_adjust_size(rdev); |
146 | /* DDR for all card after R300 & IGP */ | 222 | /* DDR for all card after R300 & IGP */ |
@@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev) | |||
152 | } else { | 228 | } else { |
153 | rdev->mc.vram_width = 64; | 229 | rdev->mc.vram_width = 64; |
154 | } | 230 | } |
155 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 231 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
232 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
156 | 233 | ||
157 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 234 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
158 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 235 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
236 | rs690_pm_info(rdev); | ||
237 | /* FIXME: we should enforce default clock in case GPU is not in | ||
238 | * default setup | ||
239 | */ | ||
240 | a.full = rfixed_const(100); | ||
241 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
242 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
243 | a.full = rfixed_const(16); | ||
244 | /* core_bandwidth = sclk(Mhz) * 16 */ | ||
245 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | ||
246 | } | ||
247 | |||
248 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | ||
249 | struct drm_display_mode *mode1, | ||
250 | struct drm_display_mode *mode2) | ||
251 | { | ||
252 | u32 tmp; | ||
253 | |||
254 | /* | ||
255 | * Line Buffer Setup | ||
256 | * There is a single line buffer shared by both display controllers. | ||
257 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | ||
258 | * the display controllers. The paritioning can either be done | ||
259 | * manually or via one of four preset allocations specified in bits 1:0: | ||
260 | * 0 - line buffer is divided in half and shared between crtc | ||
261 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | ||
262 | * 2 - D1 gets the whole buffer | ||
263 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | ||
264 | * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual | ||
265 | * allocation mode. In manual allocation mode, D1 always starts at 0, | ||
266 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | ||
267 | */ | ||
268 | tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; | ||
269 | tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; | ||
270 | /* auto */ | ||
271 | if (mode1 && mode2) { | ||
272 | if (mode1->hdisplay > mode2->hdisplay) { | ||
273 | if (mode1->hdisplay > 2560) | ||
274 | tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; | ||
275 | else | ||
276 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
277 | } else if (mode2->hdisplay > mode1->hdisplay) { | ||
278 | if (mode2->hdisplay > 2560) | ||
279 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
280 | else | ||
281 | tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
282 | } else | ||
283 | tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; | ||
284 | } else if (mode1) { | ||
285 | tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; | ||
286 | } else if (mode2) { | ||
287 | tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | ||
288 | } | ||
289 | WREG32(DC_LB_MEMORY_SPLIT, tmp); | ||
159 | } | 290 | } |
160 | 291 | ||
292 | struct rs690_watermark { | ||
293 | u32 lb_request_fifo_depth; | ||
294 | fixed20_12 num_line_pair; | ||
295 | fixed20_12 estimated_width; | ||
296 | fixed20_12 worst_case_latency; | ||
297 | fixed20_12 consumption_rate; | ||
298 | fixed20_12 active_time; | ||
299 | fixed20_12 dbpp; | ||
300 | fixed20_12 priority_mark_max; | ||
301 | fixed20_12 priority_mark; | ||
302 | fixed20_12 sclk; | ||
303 | }; | ||
304 | |||
305 | void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
306 | struct radeon_crtc *crtc, | ||
307 | struct rs690_watermark *wm) | ||
308 | { | ||
309 | struct drm_display_mode *mode = &crtc->base.mode; | ||
310 | fixed20_12 a, b, c; | ||
311 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
312 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
313 | /* FIXME: detect IGP with sideport memory, i don't think there is any | ||
314 | * such product available | ||
315 | */ | ||
316 | bool sideport = false; | ||
317 | |||
318 | if (!crtc->base.enabled) { | ||
319 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
320 | wm->lb_request_fifo_depth = 4; | ||
321 | return; | ||
322 | } | ||
323 | |||
324 | if (crtc->vsc.full > rfixed_const(2)) | ||
325 | wm->num_line_pair.full = rfixed_const(2); | ||
326 | else | ||
327 | wm->num_line_pair.full = rfixed_const(1); | ||
328 | |||
329 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
330 | c.full = rfixed_const(256); | ||
331 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
332 | request_fifo_depth.full = rfixed_div(a, c); | ||
333 | if (a.full < rfixed_const(4)) { | ||
334 | wm->lb_request_fifo_depth = 4; | ||
335 | } else { | ||
336 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
337 | } | ||
338 | |||
339 | /* Determine consumption rate | ||
340 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
341 | * vtaps = number of vertical taps, | ||
342 | * vsc = vertical scaling ratio, defined as source/destination | ||
343 | * hsc = horizontal scaling ration, defined as source/destination | ||
344 | */ | ||
345 | a.full = rfixed_const(mode->clock); | ||
346 | b.full = rfixed_const(1000); | ||
347 | a.full = rfixed_div(a, b); | ||
348 | pclk.full = rfixed_div(b, a); | ||
349 | if (crtc->rmx_type != RMX_OFF) { | ||
350 | b.full = rfixed_const(2); | ||
351 | if (crtc->vsc.full > b.full) | ||
352 | b.full = crtc->vsc.full; | ||
353 | b.full = rfixed_mul(b, crtc->hsc); | ||
354 | c.full = rfixed_const(2); | ||
355 | b.full = rfixed_div(b, c); | ||
356 | consumption_time.full = rfixed_div(pclk, b); | ||
357 | } else { | ||
358 | consumption_time.full = pclk.full; | ||
359 | } | ||
360 | a.full = rfixed_const(1); | ||
361 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
362 | |||
363 | |||
364 | /* Determine line time | ||
365 | * LineTime = total time for one line of displayhtotal | ||
366 | * LineTime = total number of horizontal pixels | ||
367 | * pclk = pixel clock period(ns) | ||
368 | */ | ||
369 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
370 | line_time.full = rfixed_mul(a, pclk); | ||
371 | |||
372 | /* Determine active time | ||
373 | * ActiveTime = time of active region of display within one line, | ||
374 | * hactive = total number of horizontal active pixels | ||
375 | * htotal = total number of horizontal pixels | ||
376 | */ | ||
377 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
378 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
379 | wm->active_time.full = rfixed_mul(line_time, b); | ||
380 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
381 | |||
382 | /* Maximun bandwidth is the minimun bandwidth of all component */ | ||
383 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | ||
384 | if (sideport) { | ||
385 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | ||
386 | rdev->pm.sideport_bandwidth.full) | ||
387 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | ||
388 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | ||
389 | read_delay_latency.full = rfixed_div(read_delay_latency, | ||
390 | rdev->pm.igp_sideport_mclk); | ||
391 | } else { | ||
392 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | ||
393 | rdev->pm.k8_bandwidth.full) | ||
394 | rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; | ||
395 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | ||
396 | rdev->pm.ht_bandwidth.full) | ||
397 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | ||
398 | read_delay_latency.full = rfixed_const(5000); | ||
399 | } | ||
400 | |||
401 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | ||
402 | a.full = rfixed_const(16); | ||
403 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | ||
404 | a.full = rfixed_const(1000); | ||
405 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | ||
406 | /* Determine chunk time | ||
407 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
408 | * to the LB which consists of pipeline delay and inter chunk gap | ||
409 | * sclk = system clock(ns) | ||
410 | */ | ||
411 | a.full = rfixed_const(256 * 13); | ||
412 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | ||
413 | a.full = rfixed_const(10); | ||
414 | chunk_time.full = rfixed_div(chunk_time, a); | ||
415 | |||
416 | /* Determine the worst case latency | ||
417 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
418 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
419 | * to return data | ||
420 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
421 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
422 | * which consists of pipeline delay and inter chunk gap | ||
423 | */ | ||
424 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
425 | a.full = rfixed_const(3); | ||
426 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
427 | wm->worst_case_latency.full += read_delay_latency.full; | ||
428 | } else { | ||
429 | a.full = rfixed_const(2); | ||
430 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
431 | wm->worst_case_latency.full += read_delay_latency.full; | ||
432 | } | ||
433 | |||
434 | /* Determine the tolerable latency | ||
435 | * TolerableLatency = Any given request has only 1 line time | ||
436 | * for the data to be returned | ||
437 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
438 | * put into the request FIFO for a display | ||
439 | * LineTime = total time for one line of display | ||
440 | * ChunkTime = the time it takes the DCP to send one chunk | ||
441 | * of data to the LB which consists of | ||
442 | * pipeline delay and inter chunk gap | ||
443 | */ | ||
444 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
445 | tolerable_latency.full = line_time.full; | ||
446 | } else { | ||
447 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
448 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
449 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
450 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
451 | } | ||
452 | /* We assume worst case 32bits (4 bytes) */ | ||
453 | wm->dbpp.full = rfixed_const(4 * 8); | ||
454 | |||
455 | /* Determine the maximum priority mark | ||
456 | * width = viewport width in pixels | ||
457 | */ | ||
458 | a.full = rfixed_const(16); | ||
459 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
460 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
461 | |||
462 | /* Determine estimated width */ | ||
463 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
464 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
465 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
466 | wm->priority_mark.full = rfixed_const(10); | ||
467 | } else { | ||
468 | a.full = rfixed_const(16); | ||
469 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
470 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | void rs690_bandwidth_update(struct radeon_device *rdev) | ||
475 | { | ||
476 | struct drm_display_mode *mode0 = NULL; | ||
477 | struct drm_display_mode *mode1 = NULL; | ||
478 | struct rs690_watermark wm0; | ||
479 | struct rs690_watermark wm1; | ||
480 | u32 tmp; | ||
481 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
482 | fixed20_12 a, b; | ||
483 | |||
484 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
485 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
486 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
487 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
488 | /* | ||
489 | * Set display0/1 priority up in the memory controller for | ||
490 | * modes if the user specifies HIGH for displaypriority | ||
491 | * option. | ||
492 | */ | ||
493 | if (rdev->disp_priority == 2) { | ||
494 | tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); | ||
495 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
496 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
497 | if (mode1) | ||
498 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
499 | if (mode0) | ||
500 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
501 | WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); | ||
502 | } | ||
503 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
504 | |||
505 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | ||
506 | WREG32(DCP_CONTROL, 0); | ||
507 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | ||
508 | WREG32(DCP_CONTROL, 2); | ||
509 | |||
510 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
511 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
512 | |||
513 | tmp = (wm0.lb_request_fifo_depth - 1); | ||
514 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | ||
515 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
516 | |||
517 | if (mode0 && mode1) { | ||
518 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
519 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
520 | else | ||
521 | a.full = wm0.num_line_pair.full; | ||
522 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
523 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
524 | else | ||
525 | b.full = wm1.num_line_pair.full; | ||
526 | a.full += b.full; | ||
527 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
528 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
529 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
530 | b.full = rfixed_mul(b, wm0.active_time); | ||
531 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
532 | wm0.consumption_rate); | ||
533 | a.full = a.full + b.full; | ||
534 | b.full = rfixed_const(16 * 1000); | ||
535 | priority_mark02.full = rfixed_div(a, b); | ||
536 | } else { | ||
537 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
538 | wm0.consumption_rate); | ||
539 | b.full = rfixed_const(16 * 1000); | ||
540 | priority_mark02.full = rfixed_div(a, b); | ||
541 | } | ||
542 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
543 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
544 | b.full = rfixed_mul(b, wm1.active_time); | ||
545 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
546 | wm1.consumption_rate); | ||
547 | a.full = a.full + b.full; | ||
548 | b.full = rfixed_const(16 * 1000); | ||
549 | priority_mark12.full = rfixed_div(a, b); | ||
550 | } else { | ||
551 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
552 | wm1.consumption_rate); | ||
553 | b.full = rfixed_const(16 * 1000); | ||
554 | priority_mark12.full = rfixed_div(a, b); | ||
555 | } | ||
556 | if (wm0.priority_mark.full > priority_mark02.full) | ||
557 | priority_mark02.full = wm0.priority_mark.full; | ||
558 | if (rfixed_trunc(priority_mark02) < 0) | ||
559 | priority_mark02.full = 0; | ||
560 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
561 | priority_mark02.full = wm0.priority_mark_max.full; | ||
562 | if (wm1.priority_mark.full > priority_mark12.full) | ||
563 | priority_mark12.full = wm1.priority_mark.full; | ||
564 | if (rfixed_trunc(priority_mark12) < 0) | ||
565 | priority_mark12.full = 0; | ||
566 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
567 | priority_mark12.full = wm1.priority_mark_max.full; | ||
568 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
569 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
570 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
571 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
572 | } else if (mode0) { | ||
573 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
574 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | ||
575 | else | ||
576 | a.full = wm0.num_line_pair.full; | ||
577 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
578 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
579 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
580 | b.full = rfixed_mul(b, wm0.active_time); | ||
581 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
582 | wm0.consumption_rate); | ||
583 | a.full = a.full + b.full; | ||
584 | b.full = rfixed_const(16 * 1000); | ||
585 | priority_mark02.full = rfixed_div(a, b); | ||
586 | } else { | ||
587 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
588 | wm0.consumption_rate); | ||
589 | b.full = rfixed_const(16 * 1000); | ||
590 | priority_mark02.full = rfixed_div(a, b); | ||
591 | } | ||
592 | if (wm0.priority_mark.full > priority_mark02.full) | ||
593 | priority_mark02.full = wm0.priority_mark.full; | ||
594 | if (rfixed_trunc(priority_mark02) < 0) | ||
595 | priority_mark02.full = 0; | ||
596 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
597 | priority_mark02.full = wm0.priority_mark_max.full; | ||
598 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
599 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
600 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
601 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
602 | } else { | ||
603 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
604 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | ||
605 | else | ||
606 | a.full = wm1.num_line_pair.full; | ||
607 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
608 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
609 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
610 | b.full = rfixed_mul(b, wm1.active_time); | ||
611 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
612 | wm1.consumption_rate); | ||
613 | a.full = a.full + b.full; | ||
614 | b.full = rfixed_const(16 * 1000); | ||
615 | priority_mark12.full = rfixed_div(a, b); | ||
616 | } else { | ||
617 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
618 | wm1.consumption_rate); | ||
619 | b.full = rfixed_const(16 * 1000); | ||
620 | priority_mark12.full = rfixed_div(a, b); | ||
621 | } | ||
622 | if (wm1.priority_mark.full > priority_mark12.full) | ||
623 | priority_mark12.full = wm1.priority_mark.full; | ||
624 | if (rfixed_trunc(priority_mark12) < 0) | ||
625 | priority_mark12.full = 0; | ||
626 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
627 | priority_mark12.full = wm1.priority_mark_max.full; | ||
628 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
629 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
630 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
631 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
632 | } | ||
633 | } | ||
161 | 634 | ||
162 | /* | 635 | /* |
163 | * Indirect registers accessor | 636 | * Indirect registers accessor |
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 000000000000..c0d9faa2175b --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690r.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RS690R_H | ||
29 | #define RS690R_H | ||
30 | |||
31 | /* RS690/RS740 registers */ | ||
32 | #define MC_INDEX 0x0078 | ||
33 | # define MC_INDEX_MASK 0x1FF | ||
34 | # define MC_INDEX_WR_EN (1 << 9) | ||
35 | # define MC_INDEX_WR_ACK 0x7F | ||
36 | #define MC_DATA 0x007C | ||
37 | #define HDP_FB_LOCATION 0x0134 | ||
38 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
39 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
40 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
41 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
42 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
43 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
44 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
45 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
46 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
47 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
48 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
49 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
50 | #define MODE_PRIORITY_OFF (1 << 16) | ||
51 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
52 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
53 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
54 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
55 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
56 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
57 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
58 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
59 | #define DCP_CONTROL 0x6C9C | ||
60 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
61 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
62 | |||
63 | /* MC indirect registers */ | ||
64 | #define MC_STATUS_IDLE (1 << 0) | ||
65 | #define MC_MISC_CNTL 0x18 | ||
66 | #define DISABLE_GTW (1 << 1) | ||
67 | #define GART_INDEX_REG_EN (1 << 12) | ||
68 | #define BLOCK_GFX_D3_EN (1 << 14) | ||
69 | #define GART_FEATURE_ID 0x2B | ||
70 | #define HANG_EN (1 << 11) | ||
71 | #define TLB_ENABLE (1 << 18) | ||
72 | #define P2P_ENABLE (1 << 19) | ||
73 | #define GTW_LAC_EN (1 << 25) | ||
74 | #define LEVEL2_GART (0 << 30) | ||
75 | #define LEVEL1_GART (1 << 30) | ||
76 | #define PDC_EN (1 << 31) | ||
77 | #define GART_BASE 0x2C | ||
78 | #define GART_CACHE_CNTRL 0x2E | ||
79 | # define GART_CACHE_INVALIDATE (1 << 0) | ||
80 | #define MC_STATUS 0x90 | ||
81 | #define MCCFG_FB_LOCATION 0x100 | ||
82 | #define MC_FB_START_MASK 0x0000FFFF | ||
83 | #define MC_FB_START_SHIFT 0 | ||
84 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
85 | #define MC_FB_TOP_SHIFT 16 | ||
86 | #define MCCFG_AGP_LOCATION 0x101 | ||
87 | #define MC_AGP_START_MASK 0x0000FFFF | ||
88 | #define MC_AGP_START_SHIFT 0 | ||
89 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
90 | #define MC_AGP_TOP_SHIFT 16 | ||
91 | #define MCCFG_AGP_BASE 0x102 | ||
92 | #define MCCFG_AGP_BASE_2 0x103 | ||
93 | #define MC_INIT_MISC_LAT_TIMER 0x104 | ||
94 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
95 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
96 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
97 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
98 | |||
99 | #endif | ||
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffea37b1b3e2..551e608702e4 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -27,8 +27,9 @@ | |||
27 | */ | 27 | */ |
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "rv515r.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_share.h" | ||
32 | 33 | ||
33 | /* rv515 depends on : */ | 34 | /* rv515 depends on : */ |
34 | void r100_hdp_reset(struct radeon_device *rdev); | 35 | void r100_hdp_reset(struct radeon_device *rdev); |
@@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev) | |||
99 | "programming pipes. Bad things might happen.\n"); | 100 | "programming pipes. Bad things might happen.\n"); |
100 | } | 101 | } |
101 | /* Write VRAM size in case we are limiting it */ | 102 | /* Write VRAM size in case we are limiting it */ |
102 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); | 103 | WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
103 | tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 104 | tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
104 | WREG32(0x134, tmp); | 105 | WREG32(0x134, tmp); |
105 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 106 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
106 | tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); | 107 | tmp = REG_SET(MC_FB_TOP, tmp >> 16); |
107 | tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); | 108 | tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
108 | WREG32_MC(RV515_MC_FB_LOCATION, tmp); | 109 | WREG32_MC(MC_FB_LOCATION, tmp); |
109 | WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); | 110 | WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
110 | WREG32(0x310, rdev->mc.vram_location); | 111 | WREG32(0x310, rdev->mc.vram_location); |
111 | if (rdev->flags & RADEON_IS_AGP) { | 112 | if (rdev->flags & RADEON_IS_AGP) { |
112 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 113 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
113 | tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); | 114 | tmp = REG_SET(MC_AGP_TOP, tmp >> 16); |
114 | tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); | 115 | tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); |
115 | WREG32_MC(RV515_MC_AGP_LOCATION, tmp); | 116 | WREG32_MC(MC_AGP_LOCATION, tmp); |
116 | WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); | 117 | WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); |
117 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 118 | WREG32_MC(MC_AGP_BASE_2, 0); |
118 | } else { | 119 | } else { |
119 | WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); | 120 | WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); |
120 | WREG32_MC(RV515_MC_AGP_BASE, 0); | 121 | WREG32_MC(MC_AGP_BASE, 0); |
121 | WREG32_MC(RV515_MC_AGP_BASE_2, 0); | 122 | WREG32_MC(MC_AGP_BASE_2, 0); |
122 | } | 123 | } |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
@@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev) | |||
136 | */ | 137 | */ |
137 | void rv515_ring_start(struct radeon_device *rdev) | 138 | void rv515_ring_start(struct radeon_device *rdev) |
138 | { | 139 | { |
139 | unsigned gb_tile_config; | ||
140 | int r; | 140 | int r; |
141 | 141 | ||
142 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ | ||
143 | gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; | ||
144 | switch (rdev->num_gb_pipes) { | ||
145 | case 2: | ||
146 | gb_tile_config |= R300_PIPE_COUNT_R300; | ||
147 | break; | ||
148 | case 3: | ||
149 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; | ||
150 | break; | ||
151 | case 4: | ||
152 | gb_tile_config |= R300_PIPE_COUNT_R420; | ||
153 | break; | ||
154 | case 1: | ||
155 | default: | ||
156 | gb_tile_config |= R300_PIPE_COUNT_RV350; | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | r = radeon_ring_lock(rdev, 64); | 142 | r = radeon_ring_lock(rdev, 64); |
161 | if (r) { | 143 | if (r) { |
162 | return; | 144 | return; |
163 | } | 145 | } |
164 | radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); | 146 | radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); |
165 | radeon_ring_write(rdev, | ||
166 | RADEON_ISYNC_ANY2D_IDLE3D | | ||
167 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
168 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
169 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | ||
170 | radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); | ||
171 | radeon_ring_write(rdev, gb_tile_config); | ||
172 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | ||
173 | radeon_ring_write(rdev, | 147 | radeon_ring_write(rdev, |
174 | RADEON_WAIT_2D_IDLECLEAN | | 148 | ISYNC_ANY2D_IDLE3D | |
175 | RADEON_WAIT_3D_IDLECLEAN); | 149 | ISYNC_ANY3D_IDLE2D | |
150 | ISYNC_WAIT_IDLEGUI | | ||
151 | ISYNC_CPSCRATCH_IDLEGUI); | ||
152 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); | ||
153 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | ||
176 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); | 154 | radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
177 | radeon_ring_write(rdev, 1 << 31); | 155 | radeon_ring_write(rdev, 1 << 31); |
178 | radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); | 156 | radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
179 | radeon_ring_write(rdev, 0); | 157 | radeon_ring_write(rdev, 0); |
180 | radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); | 158 | radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
181 | radeon_ring_write(rdev, 0); | 159 | radeon_ring_write(rdev, 0); |
182 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); | 160 | radeon_ring_write(rdev, PACKET0(0x42C8, 0)); |
183 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); | 161 | radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
184 | radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); | 162 | radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
185 | radeon_ring_write(rdev, 0); | 163 | radeon_ring_write(rdev, 0); |
186 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 164 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
187 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 165 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
188 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 166 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
189 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 167 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
190 | radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); | 168 | radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
191 | radeon_ring_write(rdev, | 169 | radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
192 | RADEON_WAIT_2D_IDLECLEAN | | 170 | radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); |
193 | RADEON_WAIT_3D_IDLECLEAN); | ||
194 | radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); | ||
195 | radeon_ring_write(rdev, 0); | 171 | radeon_ring_write(rdev, 0); |
196 | radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 172 | radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
197 | radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 173 | radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
198 | radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 174 | radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
199 | radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); | 175 | radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
200 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); | 176 | radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); |
201 | radeon_ring_write(rdev, | ||
202 | ((6 << R300_MS_X0_SHIFT) | | ||
203 | (6 << R300_MS_Y0_SHIFT) | | ||
204 | (6 << R300_MS_X1_SHIFT) | | ||
205 | (6 << R300_MS_Y1_SHIFT) | | ||
206 | (6 << R300_MS_X2_SHIFT) | | ||
207 | (6 << R300_MS_Y2_SHIFT) | | ||
208 | (6 << R300_MSBD0_Y_SHIFT) | | ||
209 | (6 << R300_MSBD0_X_SHIFT))); | ||
210 | radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); | ||
211 | radeon_ring_write(rdev, | 177 | radeon_ring_write(rdev, |
212 | ((6 << R300_MS_X3_SHIFT) | | 178 | ((6 << MS_X0_SHIFT) | |
213 | (6 << R300_MS_Y3_SHIFT) | | 179 | (6 << MS_Y0_SHIFT) | |
214 | (6 << R300_MS_X4_SHIFT) | | 180 | (6 << MS_X1_SHIFT) | |
215 | (6 << R300_MS_Y4_SHIFT) | | 181 | (6 << MS_Y1_SHIFT) | |
216 | (6 << R300_MS_X5_SHIFT) | | 182 | (6 << MS_X2_SHIFT) | |
217 | (6 << R300_MS_Y5_SHIFT) | | 183 | (6 << MS_Y2_SHIFT) | |
218 | (6 << R300_MSBD1_SHIFT))); | 184 | (6 << MSBD0_Y_SHIFT) | |
219 | radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); | 185 | (6 << MSBD0_X_SHIFT))); |
220 | radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); | 186 | radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); |
221 | radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); | ||
222 | radeon_ring_write(rdev, | 187 | radeon_ring_write(rdev, |
223 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); | 188 | ((6 << MS_X3_SHIFT) | |
224 | radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); | 189 | (6 << MS_Y3_SHIFT) | |
225 | radeon_ring_write(rdev, | 190 | (6 << MS_X4_SHIFT) | |
226 | R300_GEOMETRY_ROUND_NEAREST | | 191 | (6 << MS_Y4_SHIFT) | |
227 | R300_COLOR_ROUND_NEAREST); | 192 | (6 << MS_X5_SHIFT) | |
193 | (6 << MS_Y5_SHIFT) | | ||
194 | (6 << MSBD1_SHIFT))); | ||
195 | radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); | ||
196 | radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); | ||
197 | radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); | ||
198 | radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); | ||
199 | radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); | ||
200 | radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); | ||
228 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); | 201 | radeon_ring_write(rdev, PACKET0(0x20C8, 0)); |
229 | radeon_ring_write(rdev, 0); | 202 | radeon_ring_write(rdev, 0); |
230 | radeon_ring_unlock_commit(rdev); | 203 | radeon_ring_unlock_commit(rdev); |
@@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
242 | 215 | ||
243 | for (i = 0; i < rdev->usec_timeout; i++) { | 216 | for (i = 0; i < rdev->usec_timeout; i++) { |
244 | /* read MC_STATUS */ | 217 | /* read MC_STATUS */ |
245 | tmp = RREG32_MC(RV515_MC_STATUS); | 218 | tmp = RREG32_MC(MC_STATUS); |
246 | if (tmp & RV515_MC_STATUS_IDLE) { | 219 | if (tmp & MC_STATUS_IDLE) { |
247 | return 0; | 220 | return 0; |
248 | } | 221 | } |
249 | DRM_UDELAY(1); | 222 | DRM_UDELAY(1); |
@@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
291 | reinit_cp = rdev->cp.ready; | 264 | reinit_cp = rdev->cp.ready; |
292 | rdev->cp.ready = false; | 265 | rdev->cp.ready = false; |
293 | for (i = 0; i < rdev->usec_timeout; i++) { | 266 | for (i = 0; i < rdev->usec_timeout; i++) { |
294 | WREG32(RADEON_CP_CSQ_MODE, 0); | 267 | WREG32(CP_CSQ_MODE, 0); |
295 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 268 | WREG32(CP_CSQ_CNTL, 0); |
296 | WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); | 269 | WREG32(RBBM_SOFT_RESET, 0x32005); |
297 | (void)RREG32(RADEON_RBBM_SOFT_RESET); | 270 | (void)RREG32(RBBM_SOFT_RESET); |
298 | udelay(200); | 271 | udelay(200); |
299 | WREG32(RADEON_RBBM_SOFT_RESET, 0); | 272 | WREG32(RBBM_SOFT_RESET, 0); |
300 | /* Wait to prevent race in RBBM_STATUS */ | 273 | /* Wait to prevent race in RBBM_STATUS */ |
301 | mdelay(1); | 274 | mdelay(1); |
302 | tmp = RREG32(RADEON_RBBM_STATUS); | 275 | tmp = RREG32(RBBM_STATUS); |
303 | if (tmp & ((1 << 20) | (1 << 26))) { | 276 | if (tmp & ((1 << 20) | (1 << 26))) { |
304 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); | 277 | DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); |
305 | /* GA still busy soft reset it */ | 278 | /* GA still busy soft reset it */ |
306 | WREG32(0x429C, 0x200); | 279 | WREG32(0x429C, 0x200); |
307 | WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); | 280 | WREG32(VAP_PVS_STATE_FLUSH_REG, 0); |
308 | WREG32(0x43E0, 0); | 281 | WREG32(0x43E0, 0); |
309 | WREG32(0x43E4, 0); | 282 | WREG32(0x43E4, 0); |
310 | WREG32(0x24AC, 0); | 283 | WREG32(0x24AC, 0); |
311 | } | 284 | } |
312 | /* Wait to prevent race in RBBM_STATUS */ | 285 | /* Wait to prevent race in RBBM_STATUS */ |
313 | mdelay(1); | 286 | mdelay(1); |
314 | tmp = RREG32(RADEON_RBBM_STATUS); | 287 | tmp = RREG32(RBBM_STATUS); |
315 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 288 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
316 | break; | 289 | break; |
317 | } | 290 | } |
318 | } | 291 | } |
319 | for (i = 0; i < rdev->usec_timeout; i++) { | 292 | for (i = 0; i < rdev->usec_timeout; i++) { |
320 | tmp = RREG32(RADEON_RBBM_STATUS); | 293 | tmp = RREG32(RBBM_STATUS); |
321 | if (!(tmp & ((1 << 20) | (1 << 26)))) { | 294 | if (!(tmp & ((1 << 20) | (1 << 26)))) { |
322 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", | 295 | DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
323 | tmp); | 296 | tmp); |
@@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev) | |||
331 | } | 304 | } |
332 | DRM_UDELAY(1); | 305 | DRM_UDELAY(1); |
333 | } | 306 | } |
334 | tmp = RREG32(RADEON_RBBM_STATUS); | 307 | tmp = RREG32(RBBM_STATUS); |
335 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); | 308 | DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
336 | return -1; | 309 | return -1; |
337 | } | 310 | } |
@@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
341 | uint32_t status; | 314 | uint32_t status; |
342 | 315 | ||
343 | /* reset order likely matter */ | 316 | /* reset order likely matter */ |
344 | status = RREG32(RADEON_RBBM_STATUS); | 317 | status = RREG32(RBBM_STATUS); |
345 | /* reset HDP */ | 318 | /* reset HDP */ |
346 | r100_hdp_reset(rdev); | 319 | r100_hdp_reset(rdev); |
347 | /* reset rb2d */ | 320 | /* reset rb2d */ |
@@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev) | |||
353 | rv515_ga_reset(rdev); | 326 | rv515_ga_reset(rdev); |
354 | } | 327 | } |
355 | /* reset CP */ | 328 | /* reset CP */ |
356 | status = RREG32(RADEON_RBBM_STATUS); | 329 | status = RREG32(RBBM_STATUS); |
357 | if (status & (1 << 16)) { | 330 | if (status & (1 << 16)) { |
358 | r100_cp_reset(rdev); | 331 | r100_cp_reset(rdev); |
359 | } | 332 | } |
360 | /* Check if GPU is idle */ | 333 | /* Check if GPU is idle */ |
361 | status = RREG32(RADEON_RBBM_STATUS); | 334 | status = RREG32(RBBM_STATUS); |
362 | if (status & (1 << 31)) { | 335 | if (status & (1 << 31)) { |
363 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); | 336 | DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
364 | return -1; | 337 | return -1; |
@@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
377 | 350 | ||
378 | rdev->mc.vram_width = 128; | 351 | rdev->mc.vram_width = 128; |
379 | rdev->mc.vram_is_ddr = true; | 352 | rdev->mc.vram_is_ddr = true; |
380 | tmp = RREG32_MC(RV515_MC_CNTL); | 353 | tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; |
381 | tmp &= RV515_MEM_NUM_CHANNELS_MASK; | ||
382 | switch (tmp) { | 354 | switch (tmp) { |
383 | case 0: | 355 | case 0: |
384 | rdev->mc.vram_width = 64; | 356 | rdev->mc.vram_width = 64; |
@@ -394,11 +366,16 @@ static void rv515_vram_get_type(struct radeon_device *rdev) | |||
394 | 366 | ||
395 | void rv515_vram_info(struct radeon_device *rdev) | 367 | void rv515_vram_info(struct radeon_device *rdev) |
396 | { | 368 | { |
369 | fixed20_12 a; | ||
370 | |||
397 | rv515_vram_get_type(rdev); | 371 | rv515_vram_get_type(rdev); |
398 | rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
399 | 372 | ||
400 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 373 | /* FIXME: we should enforce default clock in case GPU is not in |
401 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 374 | * default setup |
375 | */ | ||
376 | a.full = rfixed_const(100); | ||
377 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | ||
378 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | ||
402 | } | 379 | } |
403 | 380 | ||
404 | 381 | ||
@@ -409,35 +386,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) | |||
409 | { | 386 | { |
410 | uint32_t r; | 387 | uint32_t r; |
411 | 388 | ||
412 | WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); | 389 | WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
413 | r = RREG32(R520_MC_IND_DATA); | 390 | r = RREG32(MC_IND_DATA); |
414 | WREG32(R520_MC_IND_INDEX, 0); | 391 | WREG32(MC_IND_INDEX, 0); |
415 | return r; | 392 | return r; |
416 | } | 393 | } |
417 | 394 | ||
418 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 395 | void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
419 | { | 396 | { |
420 | WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); | 397 | WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
421 | WREG32(R520_MC_IND_DATA, (v)); | 398 | WREG32(MC_IND_DATA, (v)); |
422 | WREG32(R520_MC_IND_INDEX, 0); | 399 | WREG32(MC_IND_INDEX, 0); |
423 | } | 400 | } |
424 | 401 | ||
425 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) | 402 | uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
426 | { | 403 | { |
427 | uint32_t r; | 404 | uint32_t r; |
428 | 405 | ||
429 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 406 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
430 | (void)RREG32(RADEON_PCIE_INDEX); | 407 | (void)RREG32(PCIE_INDEX); |
431 | r = RREG32(RADEON_PCIE_DATA); | 408 | r = RREG32(PCIE_DATA); |
432 | return r; | 409 | return r; |
433 | } | 410 | } |
434 | 411 | ||
435 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | 412 | void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
436 | { | 413 | { |
437 | WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); | 414 | WREG32(PCIE_INDEX, ((reg) & 0x7ff)); |
438 | (void)RREG32(RADEON_PCIE_INDEX); | 415 | (void)RREG32(PCIE_INDEX); |
439 | WREG32(RADEON_PCIE_DATA, (v)); | 416 | WREG32(PCIE_DATA, (v)); |
440 | (void)RREG32(RADEON_PCIE_DATA); | 417 | (void)RREG32(PCIE_DATA); |
441 | } | 418 | } |
442 | 419 | ||
443 | 420 | ||
@@ -452,13 +429,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) | |||
452 | struct radeon_device *rdev = dev->dev_private; | 429 | struct radeon_device *rdev = dev->dev_private; |
453 | uint32_t tmp; | 430 | uint32_t tmp; |
454 | 431 | ||
455 | tmp = RREG32(R400_GB_PIPE_SELECT); | 432 | tmp = RREG32(GB_PIPE_SELECT); |
456 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); | 433 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
457 | tmp = RREG32(R500_SU_REG_DEST); | 434 | tmp = RREG32(SU_REG_DEST); |
458 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); | 435 | seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); |
459 | tmp = RREG32(R300_GB_TILE_CONFIG); | 436 | tmp = RREG32(GB_TILE_CONFIG); |
460 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); | 437 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
461 | tmp = RREG32(R300_DST_PIPE_CONFIG); | 438 | tmp = RREG32(DST_PIPE_CONFIG); |
462 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); | 439 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
463 | return 0; | 440 | return 0; |
464 | } | 441 | } |
@@ -509,9 +486,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) | |||
509 | /* | 486 | /* |
510 | * Asic initialization | 487 | * Asic initialization |
511 | */ | 488 | */ |
512 | static const unsigned r500_reg_safe_bm[159] = { | 489 | static const unsigned r500_reg_safe_bm[219] = { |
490 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
513 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 491 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
514 | 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, | ||
515 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 492 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
516 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 493 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
517 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 494 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
@@ -549,14 +526,575 @@ static const unsigned r500_reg_safe_bm[159] = { | |||
549 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | 526 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
550 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, | 527 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, |
551 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | 528 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, |
552 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, | 529 | 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, |
530 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
531 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
532 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
533 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
534 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
535 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
536 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
537 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
538 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
539 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
540 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
541 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
542 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
543 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
544 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, | ||
553 | }; | 545 | }; |
554 | 546 | ||
555 | |||
556 | |||
557 | int rv515_init(struct radeon_device *rdev) | 547 | int rv515_init(struct radeon_device *rdev) |
558 | { | 548 | { |
559 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; | 549 | rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; |
560 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); | 550 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); |
561 | return 0; | 551 | return 0; |
562 | } | 552 | } |
553 | |||
554 | void atom_rv515_force_tv_scaler(struct radeon_device *rdev) | ||
555 | { | ||
556 | |||
557 | WREG32(0x659C, 0x0); | ||
558 | WREG32(0x6594, 0x705); | ||
559 | WREG32(0x65A4, 0x10001); | ||
560 | WREG32(0x65D8, 0x0); | ||
561 | WREG32(0x65B0, 0x0); | ||
562 | WREG32(0x65C0, 0x0); | ||
563 | WREG32(0x65D4, 0x0); | ||
564 | WREG32(0x6578, 0x0); | ||
565 | WREG32(0x657C, 0x841880A8); | ||
566 | WREG32(0x6578, 0x1); | ||
567 | WREG32(0x657C, 0x84208680); | ||
568 | WREG32(0x6578, 0x2); | ||
569 | WREG32(0x657C, 0xBFF880B0); | ||
570 | WREG32(0x6578, 0x100); | ||
571 | WREG32(0x657C, 0x83D88088); | ||
572 | WREG32(0x6578, 0x101); | ||
573 | WREG32(0x657C, 0x84608680); | ||
574 | WREG32(0x6578, 0x102); | ||
575 | WREG32(0x657C, 0xBFF080D0); | ||
576 | WREG32(0x6578, 0x200); | ||
577 | WREG32(0x657C, 0x83988068); | ||
578 | WREG32(0x6578, 0x201); | ||
579 | WREG32(0x657C, 0x84A08680); | ||
580 | WREG32(0x6578, 0x202); | ||
581 | WREG32(0x657C, 0xBFF080F8); | ||
582 | WREG32(0x6578, 0x300); | ||
583 | WREG32(0x657C, 0x83588058); | ||
584 | WREG32(0x6578, 0x301); | ||
585 | WREG32(0x657C, 0x84E08660); | ||
586 | WREG32(0x6578, 0x302); | ||
587 | WREG32(0x657C, 0xBFF88120); | ||
588 | WREG32(0x6578, 0x400); | ||
589 | WREG32(0x657C, 0x83188040); | ||
590 | WREG32(0x6578, 0x401); | ||
591 | WREG32(0x657C, 0x85008660); | ||
592 | WREG32(0x6578, 0x402); | ||
593 | WREG32(0x657C, 0xBFF88150); | ||
594 | WREG32(0x6578, 0x500); | ||
595 | WREG32(0x657C, 0x82D88030); | ||
596 | WREG32(0x6578, 0x501); | ||
597 | WREG32(0x657C, 0x85408640); | ||
598 | WREG32(0x6578, 0x502); | ||
599 | WREG32(0x657C, 0xBFF88180); | ||
600 | WREG32(0x6578, 0x600); | ||
601 | WREG32(0x657C, 0x82A08018); | ||
602 | WREG32(0x6578, 0x601); | ||
603 | WREG32(0x657C, 0x85808620); | ||
604 | WREG32(0x6578, 0x602); | ||
605 | WREG32(0x657C, 0xBFF081B8); | ||
606 | WREG32(0x6578, 0x700); | ||
607 | WREG32(0x657C, 0x82608010); | ||
608 | WREG32(0x6578, 0x701); | ||
609 | WREG32(0x657C, 0x85A08600); | ||
610 | WREG32(0x6578, 0x702); | ||
611 | WREG32(0x657C, 0x800081F0); | ||
612 | WREG32(0x6578, 0x800); | ||
613 | WREG32(0x657C, 0x8228BFF8); | ||
614 | WREG32(0x6578, 0x801); | ||
615 | WREG32(0x657C, 0x85E085E0); | ||
616 | WREG32(0x6578, 0x802); | ||
617 | WREG32(0x657C, 0xBFF88228); | ||
618 | WREG32(0x6578, 0x10000); | ||
619 | WREG32(0x657C, 0x82A8BF00); | ||
620 | WREG32(0x6578, 0x10001); | ||
621 | WREG32(0x657C, 0x82A08CC0); | ||
622 | WREG32(0x6578, 0x10002); | ||
623 | WREG32(0x657C, 0x8008BEF8); | ||
624 | WREG32(0x6578, 0x10100); | ||
625 | WREG32(0x657C, 0x81F0BF28); | ||
626 | WREG32(0x6578, 0x10101); | ||
627 | WREG32(0x657C, 0x83608CA0); | ||
628 | WREG32(0x6578, 0x10102); | ||
629 | WREG32(0x657C, 0x8018BED0); | ||
630 | WREG32(0x6578, 0x10200); | ||
631 | WREG32(0x657C, 0x8148BF38); | ||
632 | WREG32(0x6578, 0x10201); | ||
633 | WREG32(0x657C, 0x84408C80); | ||
634 | WREG32(0x6578, 0x10202); | ||
635 | WREG32(0x657C, 0x8008BEB8); | ||
636 | WREG32(0x6578, 0x10300); | ||
637 | WREG32(0x657C, 0x80B0BF78); | ||
638 | WREG32(0x6578, 0x10301); | ||
639 | WREG32(0x657C, 0x85008C20); | ||
640 | WREG32(0x6578, 0x10302); | ||
641 | WREG32(0x657C, 0x8020BEA0); | ||
642 | WREG32(0x6578, 0x10400); | ||
643 | WREG32(0x657C, 0x8028BF90); | ||
644 | WREG32(0x6578, 0x10401); | ||
645 | WREG32(0x657C, 0x85E08BC0); | ||
646 | WREG32(0x6578, 0x10402); | ||
647 | WREG32(0x657C, 0x8018BE90); | ||
648 | WREG32(0x6578, 0x10500); | ||
649 | WREG32(0x657C, 0xBFB8BFB0); | ||
650 | WREG32(0x6578, 0x10501); | ||
651 | WREG32(0x657C, 0x86C08B40); | ||
652 | WREG32(0x6578, 0x10502); | ||
653 | WREG32(0x657C, 0x8010BE90); | ||
654 | WREG32(0x6578, 0x10600); | ||
655 | WREG32(0x657C, 0xBF58BFC8); | ||
656 | WREG32(0x6578, 0x10601); | ||
657 | WREG32(0x657C, 0x87A08AA0); | ||
658 | WREG32(0x6578, 0x10602); | ||
659 | WREG32(0x657C, 0x8010BE98); | ||
660 | WREG32(0x6578, 0x10700); | ||
661 | WREG32(0x657C, 0xBF10BFF0); | ||
662 | WREG32(0x6578, 0x10701); | ||
663 | WREG32(0x657C, 0x886089E0); | ||
664 | WREG32(0x6578, 0x10702); | ||
665 | WREG32(0x657C, 0x8018BEB0); | ||
666 | WREG32(0x6578, 0x10800); | ||
667 | WREG32(0x657C, 0xBED8BFE8); | ||
668 | WREG32(0x6578, 0x10801); | ||
669 | WREG32(0x657C, 0x89408940); | ||
670 | WREG32(0x6578, 0x10802); | ||
671 | WREG32(0x657C, 0xBFE8BED8); | ||
672 | WREG32(0x6578, 0x20000); | ||
673 | WREG32(0x657C, 0x80008000); | ||
674 | WREG32(0x6578, 0x20001); | ||
675 | WREG32(0x657C, 0x90008000); | ||
676 | WREG32(0x6578, 0x20002); | ||
677 | WREG32(0x657C, 0x80008000); | ||
678 | WREG32(0x6578, 0x20003); | ||
679 | WREG32(0x657C, 0x80008000); | ||
680 | WREG32(0x6578, 0x20100); | ||
681 | WREG32(0x657C, 0x80108000); | ||
682 | WREG32(0x6578, 0x20101); | ||
683 | WREG32(0x657C, 0x8FE0BF70); | ||
684 | WREG32(0x6578, 0x20102); | ||
685 | WREG32(0x657C, 0xBFE880C0); | ||
686 | WREG32(0x6578, 0x20103); | ||
687 | WREG32(0x657C, 0x80008000); | ||
688 | WREG32(0x6578, 0x20200); | ||
689 | WREG32(0x657C, 0x8018BFF8); | ||
690 | WREG32(0x6578, 0x20201); | ||
691 | WREG32(0x657C, 0x8F80BF08); | ||
692 | WREG32(0x6578, 0x20202); | ||
693 | WREG32(0x657C, 0xBFD081A0); | ||
694 | WREG32(0x6578, 0x20203); | ||
695 | WREG32(0x657C, 0xBFF88000); | ||
696 | WREG32(0x6578, 0x20300); | ||
697 | WREG32(0x657C, 0x80188000); | ||
698 | WREG32(0x6578, 0x20301); | ||
699 | WREG32(0x657C, 0x8EE0BEC0); | ||
700 | WREG32(0x6578, 0x20302); | ||
701 | WREG32(0x657C, 0xBFB082A0); | ||
702 | WREG32(0x6578, 0x20303); | ||
703 | WREG32(0x657C, 0x80008000); | ||
704 | WREG32(0x6578, 0x20400); | ||
705 | WREG32(0x657C, 0x80188000); | ||
706 | WREG32(0x6578, 0x20401); | ||
707 | WREG32(0x657C, 0x8E00BEA0); | ||
708 | WREG32(0x6578, 0x20402); | ||
709 | WREG32(0x657C, 0xBF8883C0); | ||
710 | WREG32(0x6578, 0x20403); | ||
711 | WREG32(0x657C, 0x80008000); | ||
712 | WREG32(0x6578, 0x20500); | ||
713 | WREG32(0x657C, 0x80188000); | ||
714 | WREG32(0x6578, 0x20501); | ||
715 | WREG32(0x657C, 0x8D00BE90); | ||
716 | WREG32(0x6578, 0x20502); | ||
717 | WREG32(0x657C, 0xBF588500); | ||
718 | WREG32(0x6578, 0x20503); | ||
719 | WREG32(0x657C, 0x80008008); | ||
720 | WREG32(0x6578, 0x20600); | ||
721 | WREG32(0x657C, 0x80188000); | ||
722 | WREG32(0x6578, 0x20601); | ||
723 | WREG32(0x657C, 0x8BC0BE98); | ||
724 | WREG32(0x6578, 0x20602); | ||
725 | WREG32(0x657C, 0xBF308660); | ||
726 | WREG32(0x6578, 0x20603); | ||
727 | WREG32(0x657C, 0x80008008); | ||
728 | WREG32(0x6578, 0x20700); | ||
729 | WREG32(0x657C, 0x80108000); | ||
730 | WREG32(0x6578, 0x20701); | ||
731 | WREG32(0x657C, 0x8A80BEB0); | ||
732 | WREG32(0x6578, 0x20702); | ||
733 | WREG32(0x657C, 0xBF0087C0); | ||
734 | WREG32(0x6578, 0x20703); | ||
735 | WREG32(0x657C, 0x80008008); | ||
736 | WREG32(0x6578, 0x20800); | ||
737 | WREG32(0x657C, 0x80108000); | ||
738 | WREG32(0x6578, 0x20801); | ||
739 | WREG32(0x657C, 0x8920BED0); | ||
740 | WREG32(0x6578, 0x20802); | ||
741 | WREG32(0x657C, 0xBED08920); | ||
742 | WREG32(0x6578, 0x20803); | ||
743 | WREG32(0x657C, 0x80008010); | ||
744 | WREG32(0x6578, 0x30000); | ||
745 | WREG32(0x657C, 0x90008000); | ||
746 | WREG32(0x6578, 0x30001); | ||
747 | WREG32(0x657C, 0x80008000); | ||
748 | WREG32(0x6578, 0x30100); | ||
749 | WREG32(0x657C, 0x8FE0BF90); | ||
750 | WREG32(0x6578, 0x30101); | ||
751 | WREG32(0x657C, 0xBFF880A0); | ||
752 | WREG32(0x6578, 0x30200); | ||
753 | WREG32(0x657C, 0x8F60BF40); | ||
754 | WREG32(0x6578, 0x30201); | ||
755 | WREG32(0x657C, 0xBFE88180); | ||
756 | WREG32(0x6578, 0x30300); | ||
757 | WREG32(0x657C, 0x8EC0BF00); | ||
758 | WREG32(0x6578, 0x30301); | ||
759 | WREG32(0x657C, 0xBFC88280); | ||
760 | WREG32(0x6578, 0x30400); | ||
761 | WREG32(0x657C, 0x8DE0BEE0); | ||
762 | WREG32(0x6578, 0x30401); | ||
763 | WREG32(0x657C, 0xBFA083A0); | ||
764 | WREG32(0x6578, 0x30500); | ||
765 | WREG32(0x657C, 0x8CE0BED0); | ||
766 | WREG32(0x6578, 0x30501); | ||
767 | WREG32(0x657C, 0xBF7884E0); | ||
768 | WREG32(0x6578, 0x30600); | ||
769 | WREG32(0x657C, 0x8BA0BED8); | ||
770 | WREG32(0x6578, 0x30601); | ||
771 | WREG32(0x657C, 0xBF508640); | ||
772 | WREG32(0x6578, 0x30700); | ||
773 | WREG32(0x657C, 0x8A60BEE8); | ||
774 | WREG32(0x6578, 0x30701); | ||
775 | WREG32(0x657C, 0xBF2087A0); | ||
776 | WREG32(0x6578, 0x30800); | ||
777 | WREG32(0x657C, 0x8900BF00); | ||
778 | WREG32(0x6578, 0x30801); | ||
779 | WREG32(0x657C, 0xBF008900); | ||
780 | } | ||
781 | |||
782 | struct rv515_watermark { | ||
783 | u32 lb_request_fifo_depth; | ||
784 | fixed20_12 num_line_pair; | ||
785 | fixed20_12 estimated_width; | ||
786 | fixed20_12 worst_case_latency; | ||
787 | fixed20_12 consumption_rate; | ||
788 | fixed20_12 active_time; | ||
789 | fixed20_12 dbpp; | ||
790 | fixed20_12 priority_mark_max; | ||
791 | fixed20_12 priority_mark; | ||
792 | fixed20_12 sclk; | ||
793 | }; | ||
794 | |||
795 | void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, | ||
796 | struct radeon_crtc *crtc, | ||
797 | struct rv515_watermark *wm) | ||
798 | { | ||
799 | struct drm_display_mode *mode = &crtc->base.mode; | ||
800 | fixed20_12 a, b, c; | ||
801 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | ||
802 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | ||
803 | |||
804 | if (!crtc->base.enabled) { | ||
805 | /* FIXME: wouldn't it better to set priority mark to maximum */ | ||
806 | wm->lb_request_fifo_depth = 4; | ||
807 | return; | ||
808 | } | ||
809 | |||
810 | if (crtc->vsc.full > rfixed_const(2)) | ||
811 | wm->num_line_pair.full = rfixed_const(2); | ||
812 | else | ||
813 | wm->num_line_pair.full = rfixed_const(1); | ||
814 | |||
815 | b.full = rfixed_const(mode->crtc_hdisplay); | ||
816 | c.full = rfixed_const(256); | ||
817 | a.full = rfixed_mul(wm->num_line_pair, b); | ||
818 | request_fifo_depth.full = rfixed_div(a, c); | ||
819 | if (a.full < rfixed_const(4)) { | ||
820 | wm->lb_request_fifo_depth = 4; | ||
821 | } else { | ||
822 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | ||
823 | } | ||
824 | |||
825 | /* Determine consumption rate | ||
826 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | ||
827 | * vtaps = number of vertical taps, | ||
828 | * vsc = vertical scaling ratio, defined as source/destination | ||
829 | * hsc = horizontal scaling ration, defined as source/destination | ||
830 | */ | ||
831 | a.full = rfixed_const(mode->clock); | ||
832 | b.full = rfixed_const(1000); | ||
833 | a.full = rfixed_div(a, b); | ||
834 | pclk.full = rfixed_div(b, a); | ||
835 | if (crtc->rmx_type != RMX_OFF) { | ||
836 | b.full = rfixed_const(2); | ||
837 | if (crtc->vsc.full > b.full) | ||
838 | b.full = crtc->vsc.full; | ||
839 | b.full = rfixed_mul(b, crtc->hsc); | ||
840 | c.full = rfixed_const(2); | ||
841 | b.full = rfixed_div(b, c); | ||
842 | consumption_time.full = rfixed_div(pclk, b); | ||
843 | } else { | ||
844 | consumption_time.full = pclk.full; | ||
845 | } | ||
846 | a.full = rfixed_const(1); | ||
847 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | ||
848 | |||
849 | |||
850 | /* Determine line time | ||
851 | * LineTime = total time for one line of displayhtotal | ||
852 | * LineTime = total number of horizontal pixels | ||
853 | * pclk = pixel clock period(ns) | ||
854 | */ | ||
855 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
856 | line_time.full = rfixed_mul(a, pclk); | ||
857 | |||
858 | /* Determine active time | ||
859 | * ActiveTime = time of active region of display within one line, | ||
860 | * hactive = total number of horizontal active pixels | ||
861 | * htotal = total number of horizontal pixels | ||
862 | */ | ||
863 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | ||
864 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
865 | wm->active_time.full = rfixed_mul(line_time, b); | ||
866 | wm->active_time.full = rfixed_div(wm->active_time, a); | ||
867 | |||
868 | /* Determine chunk time | ||
869 | * ChunkTime = the time it takes the DCP to send one chunk of data | ||
870 | * to the LB which consists of pipeline delay and inter chunk gap | ||
871 | * sclk = system clock(Mhz) | ||
872 | */ | ||
873 | a.full = rfixed_const(600 * 1000); | ||
874 | chunk_time.full = rfixed_div(a, rdev->pm.sclk); | ||
875 | read_delay_latency.full = rfixed_const(1000); | ||
876 | |||
877 | /* Determine the worst case latency | ||
878 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | ||
879 | * WorstCaseLatency = worst case time from urgent to when the MC starts | ||
880 | * to return data | ||
881 | * READ_DELAY_IDLE_MAX = constant of 1us | ||
882 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | ||
883 | * which consists of pipeline delay and inter chunk gap | ||
884 | */ | ||
885 | if (rfixed_trunc(wm->num_line_pair) > 1) { | ||
886 | a.full = rfixed_const(3); | ||
887 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | ||
888 | wm->worst_case_latency.full += read_delay_latency.full; | ||
889 | } else { | ||
890 | wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; | ||
891 | } | ||
892 | |||
893 | /* Determine the tolerable latency | ||
894 | * TolerableLatency = Any given request has only 1 line time | ||
895 | * for the data to be returned | ||
896 | * LBRequestFifoDepth = Number of chunk requests the LB can | ||
897 | * put into the request FIFO for a display | ||
898 | * LineTime = total time for one line of display | ||
899 | * ChunkTime = the time it takes the DCP to send one chunk | ||
900 | * of data to the LB which consists of | ||
901 | * pipeline delay and inter chunk gap | ||
902 | */ | ||
903 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | ||
904 | tolerable_latency.full = line_time.full; | ||
905 | } else { | ||
906 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | ||
907 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | ||
908 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | ||
909 | tolerable_latency.full = line_time.full - tolerable_latency.full; | ||
910 | } | ||
911 | /* We assume worst case 32bits (4 bytes) */ | ||
912 | wm->dbpp.full = rfixed_const(2 * 16); | ||
913 | |||
914 | /* Determine the maximum priority mark | ||
915 | * width = viewport width in pixels | ||
916 | */ | ||
917 | a.full = rfixed_const(16); | ||
918 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | ||
919 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | ||
920 | |||
921 | /* Determine estimated width */ | ||
922 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | ||
923 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | ||
924 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | ||
925 | wm->priority_mark.full = rfixed_const(10); | ||
926 | } else { | ||
927 | a.full = rfixed_const(16); | ||
928 | wm->priority_mark.full = rfixed_div(estimated_width, a); | ||
929 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | ||
930 | } | ||
931 | } | ||
932 | |||
933 | void rv515_bandwidth_avivo_update(struct radeon_device *rdev) | ||
934 | { | ||
935 | struct drm_display_mode *mode0 = NULL; | ||
936 | struct drm_display_mode *mode1 = NULL; | ||
937 | struct rv515_watermark wm0; | ||
938 | struct rv515_watermark wm1; | ||
939 | u32 tmp; | ||
940 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | ||
941 | fixed20_12 a, b; | ||
942 | |||
943 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
944 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
945 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
946 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
947 | rs690_line_buffer_adjust(rdev, mode0, mode1); | ||
948 | |||
949 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | ||
950 | rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | ||
951 | |||
952 | tmp = wm0.lb_request_fifo_depth; | ||
953 | tmp |= wm1.lb_request_fifo_depth << 16; | ||
954 | WREG32(LB_MAX_REQ_OUTSTANDING, tmp); | ||
955 | |||
956 | if (mode0 && mode1) { | ||
957 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
958 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
959 | else | ||
960 | a.full = wm0.num_line_pair.full; | ||
961 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
962 | b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
963 | else | ||
964 | b.full = wm1.num_line_pair.full; | ||
965 | a.full += b.full; | ||
966 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
967 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
968 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
969 | b.full = rfixed_mul(b, wm0.active_time); | ||
970 | a.full = rfixed_const(16); | ||
971 | b.full = rfixed_div(b, a); | ||
972 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
973 | wm0.consumption_rate); | ||
974 | priority_mark02.full = a.full + b.full; | ||
975 | } else { | ||
976 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
977 | wm0.consumption_rate); | ||
978 | b.full = rfixed_const(16 * 1000); | ||
979 | priority_mark02.full = rfixed_div(a, b); | ||
980 | } | ||
981 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
982 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
983 | b.full = rfixed_mul(b, wm1.active_time); | ||
984 | a.full = rfixed_const(16); | ||
985 | b.full = rfixed_div(b, a); | ||
986 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
987 | wm1.consumption_rate); | ||
988 | priority_mark12.full = a.full + b.full; | ||
989 | } else { | ||
990 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
991 | wm1.consumption_rate); | ||
992 | b.full = rfixed_const(16 * 1000); | ||
993 | priority_mark12.full = rfixed_div(a, b); | ||
994 | } | ||
995 | if (wm0.priority_mark.full > priority_mark02.full) | ||
996 | priority_mark02.full = wm0.priority_mark.full; | ||
997 | if (rfixed_trunc(priority_mark02) < 0) | ||
998 | priority_mark02.full = 0; | ||
999 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1000 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1001 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1002 | priority_mark12.full = wm1.priority_mark.full; | ||
1003 | if (rfixed_trunc(priority_mark12) < 0) | ||
1004 | priority_mark12.full = 0; | ||
1005 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1006 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1007 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1008 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1009 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1010 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1011 | } else if (mode0) { | ||
1012 | if (rfixed_trunc(wm0.dbpp) > 64) | ||
1013 | a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); | ||
1014 | else | ||
1015 | a.full = wm0.num_line_pair.full; | ||
1016 | fill_rate.full = rfixed_div(wm0.sclk, a); | ||
1017 | if (wm0.consumption_rate.full > fill_rate.full) { | ||
1018 | b.full = wm0.consumption_rate.full - fill_rate.full; | ||
1019 | b.full = rfixed_mul(b, wm0.active_time); | ||
1020 | a.full = rfixed_const(16); | ||
1021 | b.full = rfixed_div(b, a); | ||
1022 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1023 | wm0.consumption_rate); | ||
1024 | priority_mark02.full = a.full + b.full; | ||
1025 | } else { | ||
1026 | a.full = rfixed_mul(wm0.worst_case_latency, | ||
1027 | wm0.consumption_rate); | ||
1028 | b.full = rfixed_const(16); | ||
1029 | priority_mark02.full = rfixed_div(a, b); | ||
1030 | } | ||
1031 | if (wm0.priority_mark.full > priority_mark02.full) | ||
1032 | priority_mark02.full = wm0.priority_mark.full; | ||
1033 | if (rfixed_trunc(priority_mark02) < 0) | ||
1034 | priority_mark02.full = 0; | ||
1035 | if (wm0.priority_mark_max.full > priority_mark02.full) | ||
1036 | priority_mark02.full = wm0.priority_mark_max.full; | ||
1037 | WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); | ||
1038 | WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | ||
1039 | WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1040 | WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1041 | } else { | ||
1042 | if (rfixed_trunc(wm1.dbpp) > 64) | ||
1043 | a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); | ||
1044 | else | ||
1045 | a.full = wm1.num_line_pair.full; | ||
1046 | fill_rate.full = rfixed_div(wm1.sclk, a); | ||
1047 | if (wm1.consumption_rate.full > fill_rate.full) { | ||
1048 | b.full = wm1.consumption_rate.full - fill_rate.full; | ||
1049 | b.full = rfixed_mul(b, wm1.active_time); | ||
1050 | a.full = rfixed_const(16); | ||
1051 | b.full = rfixed_div(b, a); | ||
1052 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1053 | wm1.consumption_rate); | ||
1054 | priority_mark12.full = a.full + b.full; | ||
1055 | } else { | ||
1056 | a.full = rfixed_mul(wm1.worst_case_latency, | ||
1057 | wm1.consumption_rate); | ||
1058 | b.full = rfixed_const(16 * 1000); | ||
1059 | priority_mark12.full = rfixed_div(a, b); | ||
1060 | } | ||
1061 | if (wm1.priority_mark.full > priority_mark12.full) | ||
1062 | priority_mark12.full = wm1.priority_mark.full; | ||
1063 | if (rfixed_trunc(priority_mark12) < 0) | ||
1064 | priority_mark12.full = 0; | ||
1065 | if (wm1.priority_mark_max.full > priority_mark12.full) | ||
1066 | priority_mark12.full = wm1.priority_mark_max.full; | ||
1067 | WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); | ||
1068 | WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); | ||
1069 | WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | ||
1070 | WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | ||
1071 | } | ||
1072 | } | ||
1073 | |||
1074 | void rv515_bandwidth_update(struct radeon_device *rdev) | ||
1075 | { | ||
1076 | uint32_t tmp; | ||
1077 | struct drm_display_mode *mode0 = NULL; | ||
1078 | struct drm_display_mode *mode1 = NULL; | ||
1079 | |||
1080 | if (rdev->mode_info.crtcs[0]->base.enabled) | ||
1081 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | ||
1082 | if (rdev->mode_info.crtcs[1]->base.enabled) | ||
1083 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | ||
1084 | /* | ||
1085 | * Set display0/1 priority up in the memory controller for | ||
1086 | * modes if the user specifies HIGH for displaypriority | ||
1087 | * option. | ||
1088 | */ | ||
1089 | if (rdev->disp_priority == 2) { | ||
1090 | tmp = RREG32_MC(MC_MISC_LAT_TIMER); | ||
1091 | tmp &= ~MC_DISP1R_INIT_LAT_MASK; | ||
1092 | tmp &= ~MC_DISP0R_INIT_LAT_MASK; | ||
1093 | if (mode1) | ||
1094 | tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); | ||
1095 | if (mode0) | ||
1096 | tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); | ||
1097 | WREG32_MC(MC_MISC_LAT_TIMER, tmp); | ||
1098 | } | ||
1099 | rv515_bandwidth_avivo_update(rdev); | ||
1100 | } | ||
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h new file mode 100644 index 000000000000..f3cf84039906 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv515r.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* | ||
2 | * Copyright 2008 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: Dave Airlie | ||
25 | * Alex Deucher | ||
26 | * Jerome Glisse | ||
27 | */ | ||
28 | #ifndef RV515R_H | ||
29 | #define RV515R_H | ||
30 | |||
31 | /* RV515 registers */ | ||
32 | #define PCIE_INDEX 0x0030 | ||
33 | #define PCIE_DATA 0x0034 | ||
34 | #define MC_IND_INDEX 0x0070 | ||
35 | #define MC_IND_WR_EN (1 << 24) | ||
36 | #define MC_IND_DATA 0x0074 | ||
37 | #define RBBM_SOFT_RESET 0x00F0 | ||
38 | #define CONFIG_MEMSIZE 0x00F8 | ||
39 | #define HDP_FB_LOCATION 0x0134 | ||
40 | #define CP_CSQ_CNTL 0x0740 | ||
41 | #define CP_CSQ_MODE 0x0744 | ||
42 | #define CP_CSQ_ADDR 0x07F0 | ||
43 | #define CP_CSQ_DATA 0x07F4 | ||
44 | #define CP_CSQ_STAT 0x07F8 | ||
45 | #define CP_CSQ2_STAT 0x07FC | ||
46 | #define RBBM_STATUS 0x0E40 | ||
47 | #define DST_PIPE_CONFIG 0x170C | ||
48 | #define WAIT_UNTIL 0x1720 | ||
49 | #define WAIT_2D_IDLE (1 << 14) | ||
50 | #define WAIT_3D_IDLE (1 << 15) | ||
51 | #define WAIT_2D_IDLECLEAN (1 << 16) | ||
52 | #define WAIT_3D_IDLECLEAN (1 << 17) | ||
53 | #define ISYNC_CNTL 0x1724 | ||
54 | #define ISYNC_ANY2D_IDLE3D (1 << 0) | ||
55 | #define ISYNC_ANY3D_IDLE2D (1 << 1) | ||
56 | #define ISYNC_TRIG2D_IDLE3D (1 << 2) | ||
57 | #define ISYNC_TRIG3D_IDLE2D (1 << 3) | ||
58 | #define ISYNC_WAIT_IDLEGUI (1 << 4) | ||
59 | #define ISYNC_CPSCRATCH_IDLEGUI (1 << 5) | ||
60 | #define VAP_INDEX_OFFSET 0x208C | ||
61 | #define VAP_PVS_STATE_FLUSH_REG 0x2284 | ||
62 | #define GB_ENABLE 0x4008 | ||
63 | #define GB_MSPOS0 0x4010 | ||
64 | #define MS_X0_SHIFT 0 | ||
65 | #define MS_Y0_SHIFT 4 | ||
66 | #define MS_X1_SHIFT 8 | ||
67 | #define MS_Y1_SHIFT 12 | ||
68 | #define MS_X2_SHIFT 16 | ||
69 | #define MS_Y2_SHIFT 20 | ||
70 | #define MSBD0_Y_SHIFT 24 | ||
71 | #define MSBD0_X_SHIFT 28 | ||
72 | #define GB_MSPOS1 0x4014 | ||
73 | #define MS_X3_SHIFT 0 | ||
74 | #define MS_Y3_SHIFT 4 | ||
75 | #define MS_X4_SHIFT 8 | ||
76 | #define MS_Y4_SHIFT 12 | ||
77 | #define MS_X5_SHIFT 16 | ||
78 | #define MS_Y5_SHIFT 20 | ||
79 | #define MSBD1_SHIFT 24 | ||
80 | #define GB_TILE_CONFIG 0x4018 | ||
81 | #define ENABLE_TILING (1 << 0) | ||
82 | #define PIPE_COUNT_MASK 0x0000000E | ||
83 | #define PIPE_COUNT_SHIFT 1 | ||
84 | #define TILE_SIZE_8 (0 << 4) | ||
85 | #define TILE_SIZE_16 (1 << 4) | ||
86 | #define TILE_SIZE_32 (2 << 4) | ||
87 | #define SUBPIXEL_1_12 (0 << 16) | ||
88 | #define SUBPIXEL_1_16 (1 << 16) | ||
89 | #define GB_SELECT 0x401C | ||
90 | #define GB_AA_CONFIG 0x4020 | ||
91 | #define GB_PIPE_SELECT 0x402C | ||
92 | #define GA_ENHANCE 0x4274 | ||
93 | #define GA_DEADLOCK_CNTL (1 << 0) | ||
94 | #define GA_FASTSYNC_CNTL (1 << 1) | ||
95 | #define GA_POLY_MODE 0x4288 | ||
96 | #define FRONT_PTYPE_POINT (0 << 4) | ||
97 | #define FRONT_PTYPE_LINE (1 << 4) | ||
98 | #define FRONT_PTYPE_TRIANGE (2 << 4) | ||
99 | #define BACK_PTYPE_POINT (0 << 7) | ||
100 | #define BACK_PTYPE_LINE (1 << 7) | ||
101 | #define BACK_PTYPE_TRIANGE (2 << 7) | ||
102 | #define GA_ROUND_MODE 0x428C | ||
103 | #define GEOMETRY_ROUND_TRUNC (0 << 0) | ||
104 | #define GEOMETRY_ROUND_NEAREST (1 << 0) | ||
105 | #define COLOR_ROUND_TRUNC (0 << 2) | ||
106 | #define COLOR_ROUND_NEAREST (1 << 2) | ||
107 | #define SU_REG_DEST 0x42C8 | ||
108 | #define RB3D_DSTCACHE_CTLSTAT 0x4E4C | ||
109 | #define RB3D_DC_FLUSH (2 << 0) | ||
110 | #define RB3D_DC_FREE (2 << 2) | ||
111 | #define RB3D_DC_FINISH (1 << 4) | ||
112 | #define ZB_ZCACHE_CTLSTAT 0x4F18 | ||
113 | #define ZC_FLUSH (1 << 0) | ||
114 | #define ZC_FREE (1 << 1) | ||
115 | #define DC_LB_MEMORY_SPLIT 0x6520 | ||
116 | #define DC_LB_MEMORY_SPLIT_MASK 0x00000003 | ||
117 | #define DC_LB_MEMORY_SPLIT_SHIFT 0 | ||
118 | #define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 | ||
119 | #define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 | ||
120 | #define DC_LB_MEMORY_SPLIT_D1_ONLY 2 | ||
121 | #define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 | ||
122 | #define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) | ||
123 | #define DC_LB_DISP1_END_ADR_SHIFT 4 | ||
124 | #define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 | ||
125 | #define D1MODE_PRIORITY_A_CNT 0x6548 | ||
126 | #define MODE_PRIORITY_MARK_MASK 0x00007FFF | ||
127 | #define MODE_PRIORITY_OFF (1 << 16) | ||
128 | #define MODE_PRIORITY_ALWAYS_ON (1 << 20) | ||
129 | #define MODE_PRIORITY_FORCE_MASK (1 << 24) | ||
130 | #define D1MODE_PRIORITY_B_CNT 0x654C | ||
131 | #define LB_MAX_REQ_OUTSTANDING 0x6D58 | ||
132 | #define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F | ||
133 | #define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 | ||
134 | #define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 | ||
135 | #define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 | ||
136 | #define D2MODE_PRIORITY_A_CNT 0x6D48 | ||
137 | #define D2MODE_PRIORITY_B_CNT 0x6D4C | ||
138 | |||
139 | /* ix[MC] registers */ | ||
140 | #define MC_FB_LOCATION 0x01 | ||
141 | #define MC_FB_START_MASK 0x0000FFFF | ||
142 | #define MC_FB_START_SHIFT 0 | ||
143 | #define MC_FB_TOP_MASK 0xFFFF0000 | ||
144 | #define MC_FB_TOP_SHIFT 16 | ||
145 | #define MC_AGP_LOCATION 0x02 | ||
146 | #define MC_AGP_START_MASK 0x0000FFFF | ||
147 | #define MC_AGP_START_SHIFT 0 | ||
148 | #define MC_AGP_TOP_MASK 0xFFFF0000 | ||
149 | #define MC_AGP_TOP_SHIFT 16 | ||
150 | #define MC_AGP_BASE 0x03 | ||
151 | #define MC_AGP_BASE_2 0x04 | ||
152 | #define MC_CNTL 0x5 | ||
153 | #define MEM_NUM_CHANNELS_MASK 0x00000003 | ||
154 | #define MC_STATUS 0x08 | ||
155 | #define MC_STATUS_IDLE (1 << 4) | ||
156 | #define MC_MISC_LAT_TIMER 0x09 | ||
157 | #define MC_CPR_INIT_LAT_MASK 0x0000000F | ||
158 | #define MC_VF_INIT_LAT_MASK 0x000000F0 | ||
159 | #define MC_DISP0R_INIT_LAT_MASK 0x00000F00 | ||
160 | #define MC_DISP0R_INIT_LAT_SHIFT 8 | ||
161 | #define MC_DISP1R_INIT_LAT_MASK 0x0000F000 | ||
162 | #define MC_DISP1R_INIT_LAT_SHIFT 12 | ||
163 | #define MC_FIXED_INIT_LAT_MASK 0x000F0000 | ||
164 | #define MC_E2R_INIT_LAT_MASK 0x00F00000 | ||
165 | #define SAME_PAGE_PRIO_MASK 0x0F000000 | ||
166 | #define MC_GLOBW_INIT_LAT_MASK 0xF0000000 | ||
167 | |||
168 | |||
169 | #endif | ||
170 | |||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da50cc51ede3..21d8ffd57308 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
67 | "programming pipes. Bad things might happen.\n"); | 67 | "programming pipes. Bad things might happen.\n"); |
68 | } | 68 | } |
69 | 69 | ||
70 | tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; | 70 | tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); | 71 | tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); |
72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); | 72 | tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); |
73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); | 73 | WREG32(R700_MC_VM_FB_LOCATION, tmp); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca3..6538d4236989 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #define TTM_BO_HASH_ORDER 13 | 43 | #define TTM_BO_HASH_ORDER 13 |
44 | 44 | ||
45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); | 45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); |
46 | static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | ||
47 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); | 46 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); |
48 | 47 | ||
49 | static inline uint32_t ttm_bo_type_flags(unsigned type) | 48 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
224 | TTM_ASSERT_LOCKED(&bo->mutex); | 223 | TTM_ASSERT_LOCKED(&bo->mutex); |
225 | bo->ttm = NULL; | 224 | bo->ttm = NULL; |
226 | 225 | ||
226 | if (bdev->need_dma32) | ||
227 | page_flags |= TTM_PAGE_FLAG_DMA32; | ||
228 | |||
227 | switch (bo->type) { | 229 | switch (bo->type) { |
228 | case ttm_bo_type_device: | 230 | case ttm_bo_type_device: |
229 | if (zero_alloc) | 231 | if (zero_alloc) |
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
304 | 306 | ||
305 | } | 307 | } |
306 | 308 | ||
309 | if (bdev->driver->move_notify) | ||
310 | bdev->driver->move_notify(bo, mem); | ||
311 | |||
307 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && | 312 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
308 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) | 313 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
309 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); | 314 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); |
@@ -655,31 +660,52 @@ retry_pre_get: | |||
655 | return 0; | 660 | return 0; |
656 | } | 661 | } |
657 | 662 | ||
663 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, | ||
664 | uint32_t cur_placement, | ||
665 | uint32_t proposed_placement) | ||
666 | { | ||
667 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; | ||
668 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; | ||
669 | |||
670 | /** | ||
671 | * Keep current caching if possible. | ||
672 | */ | ||
673 | |||
674 | if ((cur_placement & caching) != 0) | ||
675 | result |= (cur_placement & caching); | ||
676 | else if ((man->default_caching & caching) != 0) | ||
677 | result |= man->default_caching; | ||
678 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) | ||
679 | result |= TTM_PL_FLAG_CACHED; | ||
680 | else if ((TTM_PL_FLAG_WC & caching) != 0) | ||
681 | result |= TTM_PL_FLAG_WC; | ||
682 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) | ||
683 | result |= TTM_PL_FLAG_UNCACHED; | ||
684 | |||
685 | return result; | ||
686 | } | ||
687 | |||
688 | |||
658 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | 689 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
659 | bool disallow_fixed, | 690 | bool disallow_fixed, |
660 | uint32_t mem_type, | 691 | uint32_t mem_type, |
661 | uint32_t mask, uint32_t *res_mask) | 692 | uint32_t proposed_placement, |
693 | uint32_t *masked_placement) | ||
662 | { | 694 | { |
663 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); | 695 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
664 | 696 | ||
665 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) | 697 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) |
666 | return false; | 698 | return false; |
667 | 699 | ||
668 | if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) | 700 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
669 | return false; | 701 | return false; |
670 | 702 | ||
671 | if ((mask & man->available_caching) == 0) | 703 | if ((proposed_placement & man->available_caching) == 0) |
672 | return false; | 704 | return false; |
673 | if (mask & man->default_caching) | ||
674 | cur_flags |= man->default_caching; | ||
675 | else if (mask & TTM_PL_FLAG_CACHED) | ||
676 | cur_flags |= TTM_PL_FLAG_CACHED; | ||
677 | else if (mask & TTM_PL_FLAG_WC) | ||
678 | cur_flags |= TTM_PL_FLAG_WC; | ||
679 | else | ||
680 | cur_flags |= TTM_PL_FLAG_UNCACHED; | ||
681 | 705 | ||
682 | *res_mask = cur_flags; | 706 | cur_flags |= (proposed_placement & man->available_caching); |
707 | |||
708 | *masked_placement = cur_flags; | ||
683 | return true; | 709 | return true; |
684 | } | 710 | } |
685 | 711 | ||
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
723 | if (!type_ok) | 749 | if (!type_ok) |
724 | continue; | 750 | continue; |
725 | 751 | ||
752 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
753 | cur_flags); | ||
754 | |||
726 | if (mem_type == TTM_PL_SYSTEM) | 755 | if (mem_type == TTM_PL_SYSTEM) |
727 | break; | 756 | break; |
728 | 757 | ||
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
779 | proposed_placement, &cur_flags)) | 808 | proposed_placement, &cur_flags)) |
780 | continue; | 809 | continue; |
781 | 810 | ||
811 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | ||
812 | cur_flags); | ||
813 | |||
782 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | 814 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, |
783 | interruptible, no_wait); | 815 | interruptible, no_wait); |
784 | 816 | ||
@@ -1305,7 +1337,8 @@ EXPORT_SYMBOL(ttm_bo_device_release); | |||
1305 | 1337 | ||
1306 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | 1338 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1307 | struct ttm_mem_global *mem_glob, | 1339 | struct ttm_mem_global *mem_glob, |
1308 | struct ttm_bo_driver *driver, uint64_t file_page_offset) | 1340 | struct ttm_bo_driver *driver, uint64_t file_page_offset, |
1341 | bool need_dma32) | ||
1309 | { | 1342 | { |
1310 | int ret = -EINVAL; | 1343 | int ret = -EINVAL; |
1311 | 1344 | ||
@@ -1342,6 +1375,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1342 | INIT_LIST_HEAD(&bdev->ddestroy); | 1375 | INIT_LIST_HEAD(&bdev->ddestroy); |
1343 | INIT_LIST_HEAD(&bdev->swap_lru); | 1376 | INIT_LIST_HEAD(&bdev->swap_lru); |
1344 | bdev->dev_mapping = NULL; | 1377 | bdev->dev_mapping = NULL; |
1378 | bdev->need_dma32 = need_dma32; | ||
1345 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); | 1379 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); |
1346 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); | 1380 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); |
1347 | if (unlikely(ret != 0)) { | 1381 | if (unlikely(ret != 0)) { |
@@ -1419,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1419 | 1453 | ||
1420 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1454 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1421 | } | 1455 | } |
1456 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | ||
1422 | 1457 | ||
1423 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1458 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
1424 | { | 1459 | { |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index bdec583901eb..ce2e6f38ea01 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | 138 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
139 | unsigned long page) | 139 | unsigned long page, |
140 | pgprot_t prot) | ||
140 | { | 141 | { |
141 | struct page *d = ttm_tt_get_page(ttm, page); | 142 | struct page *d = ttm_tt_get_page(ttm, page); |
142 | void *dst; | 143 | void *dst; |
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
145 | return -ENOMEM; | 146 | return -ENOMEM; |
146 | 147 | ||
147 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | 148 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
148 | dst = kmap(d); | 149 | |
150 | #ifdef CONFIG_X86 | ||
151 | dst = kmap_atomic_prot(d, KM_USER0, prot); | ||
152 | #else | ||
153 | if (prot != PAGE_KERNEL) | ||
154 | dst = vmap(&d, 1, 0, prot); | ||
155 | else | ||
156 | dst = kmap(d); | ||
157 | #endif | ||
149 | if (!dst) | 158 | if (!dst) |
150 | return -ENOMEM; | 159 | return -ENOMEM; |
151 | 160 | ||
152 | memcpy_fromio(dst, src, PAGE_SIZE); | 161 | memcpy_fromio(dst, src, PAGE_SIZE); |
153 | kunmap(d); | 162 | |
163 | #ifdef CONFIG_X86 | ||
164 | kunmap_atomic(dst, KM_USER0); | ||
165 | #else | ||
166 | if (prot != PAGE_KERNEL) | ||
167 | vunmap(dst); | ||
168 | else | ||
169 | kunmap(d); | ||
170 | #endif | ||
171 | |||
154 | return 0; | 172 | return 0; |
155 | } | 173 | } |
156 | 174 | ||
157 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | 175 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
158 | unsigned long page) | 176 | unsigned long page, |
177 | pgprot_t prot) | ||
159 | { | 178 | { |
160 | struct page *s = ttm_tt_get_page(ttm, page); | 179 | struct page *s = ttm_tt_get_page(ttm, page); |
161 | void *src; | 180 | void *src; |
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
164 | return -ENOMEM; | 183 | return -ENOMEM; |
165 | 184 | ||
166 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | 185 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
167 | src = kmap(s); | 186 | #ifdef CONFIG_X86 |
187 | src = kmap_atomic_prot(s, KM_USER0, prot); | ||
188 | #else | ||
189 | if (prot != PAGE_KERNEL) | ||
190 | src = vmap(&s, 1, 0, prot); | ||
191 | else | ||
192 | src = kmap(s); | ||
193 | #endif | ||
168 | if (!src) | 194 | if (!src) |
169 | return -ENOMEM; | 195 | return -ENOMEM; |
170 | 196 | ||
171 | memcpy_toio(dst, src, PAGE_SIZE); | 197 | memcpy_toio(dst, src, PAGE_SIZE); |
172 | kunmap(s); | 198 | |
199 | #ifdef CONFIG_X86 | ||
200 | kunmap_atomic(src, KM_USER0); | ||
201 | #else | ||
202 | if (prot != PAGE_KERNEL) | ||
203 | vunmap(src); | ||
204 | else | ||
205 | kunmap(s); | ||
206 | #endif | ||
207 | |||
173 | return 0; | 208 | return 0; |
174 | } | 209 | } |
175 | 210 | ||
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
214 | 249 | ||
215 | for (i = 0; i < new_mem->num_pages; ++i) { | 250 | for (i = 0; i < new_mem->num_pages; ++i) { |
216 | page = i * dir + add; | 251 | page = i * dir + add; |
217 | if (old_iomap == NULL) | 252 | if (old_iomap == NULL) { |
218 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); | 253 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
219 | else if (new_iomap == NULL) | 254 | PAGE_KERNEL); |
220 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); | 255 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
221 | else | 256 | prot); |
257 | } else if (new_iomap == NULL) { | ||
258 | pgprot_t prot = ttm_io_prot(new_mem->placement, | ||
259 | PAGE_KERNEL); | ||
260 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, | ||
261 | prot); | ||
262 | } else | ||
222 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); | 263 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
223 | if (ret) | 264 | if (ret) |
224 | goto out1; | 265 | goto out1; |
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
509 | if (evict) { | 550 | if (evict) { |
510 | ret = ttm_bo_wait(bo, false, false, false); | 551 | ret = ttm_bo_wait(bo, false, false, false); |
511 | spin_unlock(&bo->lock); | 552 | spin_unlock(&bo->lock); |
512 | driver->sync_obj_unref(&bo->sync_obj); | 553 | if (tmp_obj) |
513 | 554 | driver->sync_obj_unref(&tmp_obj); | |
514 | if (ret) | 555 | if (ret) |
515 | return ret; | 556 | return ret; |
516 | 557 | ||
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
532 | 573 | ||
533 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 574 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
534 | spin_unlock(&bo->lock); | 575 | spin_unlock(&bo->lock); |
576 | if (tmp_obj) | ||
577 | driver->sync_obj_unref(&tmp_obj); | ||
535 | 578 | ||
536 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 579 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
537 | if (ret) | 580 | if (ret) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe949a12fe40..33de7637c0c6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
101 | return VM_FAULT_NOPAGE; | 101 | return VM_FAULT_NOPAGE; |
102 | } | 102 | } |
103 | 103 | ||
104 | if (bdev->driver->fault_reserve_notify) | ||
105 | bdev->driver->fault_reserve_notify(bo); | ||
106 | |||
104 | /* | 107 | /* |
105 | * Wait for buffer data in transit, due to a pipelined | 108 | * Wait for buffer data in transit, due to a pipelined |
106 | * move. | 109 | * move. |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 75dc8bd24592..b8b6c4a5f983 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) | |||
86 | unsigned long i; | 86 | unsigned long i; |
87 | 87 | ||
88 | for (i = 0; i < num_pages; ++i) { | 88 | for (i = 0; i < num_pages; ++i) { |
89 | if (pages[i]) { | 89 | struct page *page = pages[i]; |
90 | unsigned long start = (unsigned long)page_address(pages[i]); | 90 | void *page_virtual; |
91 | flush_dcache_range(start, start + PAGE_SIZE); | 91 | |
92 | } | 92 | if (unlikely(page == NULL)) |
93 | continue; | ||
94 | |||
95 | page_virtual = kmap_atomic(page, KM_USER0); | ||
96 | flush_dcache_range((unsigned long) page_virtual, | ||
97 | (unsigned long) page_virtual + PAGE_SIZE); | ||
98 | kunmap_atomic(page_virtual, KM_USER0); | ||
93 | } | 99 | } |
94 | #else | 100 | #else |
95 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) | 101 | if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) |
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | |||
131 | 137 | ||
132 | static struct page *ttm_tt_alloc_page(unsigned page_flags) | 138 | static struct page *ttm_tt_alloc_page(unsigned page_flags) |
133 | { | 139 | { |
140 | gfp_t gfp_flags = GFP_USER; | ||
141 | |||
134 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | 142 | if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) |
135 | return alloc_page(GFP_HIGHUSER | __GFP_ZERO); | 143 | gfp_flags |= __GFP_ZERO; |
144 | |||
145 | if (page_flags & TTM_PAGE_FLAG_DMA32) | ||
146 | gfp_flags |= __GFP_DMA32; | ||
147 | else | ||
148 | gfp_flags |= __GFP_HIGHMEM; | ||
136 | 149 | ||
137 | return alloc_page(GFP_HIGHUSER); | 150 | return alloc_page(gfp_flags); |
138 | } | 151 | } |
139 | 152 | ||
140 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 153 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index bff0103610c1..fe4fa29c9219 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
@@ -593,7 +593,11 @@ static int atk_add_sensor(struct atk_data *data, union acpi_object *obj) | |||
593 | sensor->data = data; | 593 | sensor->data = data; |
594 | sensor->id = flags->integer.value; | 594 | sensor->id = flags->integer.value; |
595 | sensor->limit1 = limit1->integer.value; | 595 | sensor->limit1 = limit1->integer.value; |
596 | sensor->limit2 = limit2->integer.value; | 596 | if (data->old_interface) |
597 | sensor->limit2 = limit2->integer.value; | ||
598 | else | ||
599 | /* The upper limit is expressed as delta from lower limit */ | ||
600 | sensor->limit2 = sensor->limit1 + limit2->integer.value; | ||
597 | 601 | ||
598 | snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, | 602 | snprintf(sensor->input_attr_name, ATTR_NAME_SIZE, |
599 | "%s%d_input", base_name, start + *num); | 603 | "%s%d_input", base_name, start + *num); |
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c index a92dbb97ee99..ba75bfcf14ce 100644 --- a/drivers/hwmon/smsc47m1.c +++ b/drivers/hwmon/smsc47m1.c | |||
@@ -86,6 +86,7 @@ superio_exit(void) | |||
86 | #define SUPERIO_REG_ACT 0x30 | 86 | #define SUPERIO_REG_ACT 0x30 |
87 | #define SUPERIO_REG_BASE 0x60 | 87 | #define SUPERIO_REG_BASE 0x60 |
88 | #define SUPERIO_REG_DEVID 0x20 | 88 | #define SUPERIO_REG_DEVID 0x20 |
89 | #define SUPERIO_REG_DEVREV 0x21 | ||
89 | 90 | ||
90 | /* Logical device registers */ | 91 | /* Logical device registers */ |
91 | 92 | ||
@@ -429,6 +430,9 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
429 | * The LPC47M292 (device id 0x6B) is somewhat compatible, but it | 430 | * The LPC47M292 (device id 0x6B) is somewhat compatible, but it |
430 | * supports a 3rd fan, and the pin configuration registers are | 431 | * supports a 3rd fan, and the pin configuration registers are |
431 | * unfortunately different. | 432 | * unfortunately different. |
433 | * The LPC47M233 has the same device id (0x6B) but is not compatible. | ||
434 | * We check the high bit of the device revision register to | ||
435 | * differentiate them. | ||
432 | */ | 436 | */ |
433 | switch (val) { | 437 | switch (val) { |
434 | case 0x51: | 438 | case 0x51: |
@@ -448,6 +452,13 @@ static int __init smsc47m1_find(unsigned short *addr, | |||
448 | sio_data->type = smsc47m1; | 452 | sio_data->type = smsc47m1; |
449 | break; | 453 | break; |
450 | case 0x6B: | 454 | case 0x6B: |
455 | if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) { | ||
456 | pr_debug(DRVNAME ": " | ||
457 | "Found SMSC LPC47M233, unsupported\n"); | ||
458 | superio_exit(); | ||
459 | return -ENODEV; | ||
460 | } | ||
461 | |||
451 | pr_info(DRVNAME ": Found SMSC LPC47M292\n"); | 462 | pr_info(DRVNAME ": Found SMSC LPC47M292\n"); |
452 | sio_data->type = smsc47m2; | 463 | sio_data->type = smsc47m2; |
453 | break; | 464 | break; |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index fdd83277c8a8..d258b02aef44 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -672,9 +672,10 @@ omap_i2c_isr(int this_irq, void *dev_id) | |||
672 | break; | 672 | break; |
673 | } | 673 | } |
674 | 674 | ||
675 | err = 0; | ||
676 | complete: | ||
675 | omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); | 677 | omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); |
676 | 678 | ||
677 | err = 0; | ||
678 | if (stat & OMAP_I2C_STAT_NACK) { | 679 | if (stat & OMAP_I2C_STAT_NACK) { |
679 | err |= OMAP_I2C_STAT_NACK; | 680 | err |= OMAP_I2C_STAT_NACK; |
680 | omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, | 681 | omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, |
@@ -685,16 +686,19 @@ omap_i2c_isr(int this_irq, void *dev_id) | |||
685 | err |= OMAP_I2C_STAT_AL; | 686 | err |= OMAP_I2C_STAT_AL; |
686 | } | 687 | } |
687 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | | 688 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | |
688 | OMAP_I2C_STAT_AL)) | 689 | OMAP_I2C_STAT_AL)) { |
689 | omap_i2c_complete_cmd(dev, err); | 690 | omap_i2c_complete_cmd(dev, err); |
691 | return IRQ_HANDLED; | ||
692 | } | ||
690 | if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { | 693 | if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { |
691 | u8 num_bytes = 1; | 694 | u8 num_bytes = 1; |
692 | if (dev->fifo_size) { | 695 | if (dev->fifo_size) { |
693 | if (stat & OMAP_I2C_STAT_RRDY) | 696 | if (stat & OMAP_I2C_STAT_RRDY) |
694 | num_bytes = dev->fifo_size; | 697 | num_bytes = dev->fifo_size; |
695 | else | 698 | else /* read RXSTAT on RDR interrupt */ |
696 | num_bytes = omap_i2c_read_reg(dev, | 699 | num_bytes = (omap_i2c_read_reg(dev, |
697 | OMAP_I2C_BUFSTAT_REG); | 700 | OMAP_I2C_BUFSTAT_REG) |
701 | >> 8) & 0x3F; | ||
698 | } | 702 | } |
699 | while (num_bytes) { | 703 | while (num_bytes) { |
700 | num_bytes--; | 704 | num_bytes--; |
@@ -731,9 +735,10 @@ omap_i2c_isr(int this_irq, void *dev_id) | |||
731 | if (dev->fifo_size) { | 735 | if (dev->fifo_size) { |
732 | if (stat & OMAP_I2C_STAT_XRDY) | 736 | if (stat & OMAP_I2C_STAT_XRDY) |
733 | num_bytes = dev->fifo_size; | 737 | num_bytes = dev->fifo_size; |
734 | else | 738 | else /* read TXSTAT on XDR interrupt */ |
735 | num_bytes = omap_i2c_read_reg(dev, | 739 | num_bytes = omap_i2c_read_reg(dev, |
736 | OMAP_I2C_BUFSTAT_REG); | 740 | OMAP_I2C_BUFSTAT_REG) |
741 | & 0x3F; | ||
737 | } | 742 | } |
738 | while (num_bytes) { | 743 | while (num_bytes) { |
739 | num_bytes--; | 744 | num_bytes--; |
@@ -760,6 +765,27 @@ omap_i2c_isr(int this_irq, void *dev_id) | |||
760 | "data to send\n"); | 765 | "data to send\n"); |
761 | break; | 766 | break; |
762 | } | 767 | } |
768 | |||
769 | /* | ||
770 | * OMAP3430 Errata 1.153: When an XRDY/XDR | ||
771 | * is hit, wait for XUDF before writing data | ||
772 | * to DATA_REG. Otherwise some data bytes can | ||
773 | * be lost while transferring them from the | ||
774 | * memory to the I2C interface. | ||
775 | */ | ||
776 | |||
777 | if (cpu_is_omap34xx()) { | ||
778 | while (!(stat & OMAP_I2C_STAT_XUDF)) { | ||
779 | if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { | ||
780 | omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | ||
781 | err |= OMAP_I2C_STAT_XUDF; | ||
782 | goto complete; | ||
783 | } | ||
784 | cpu_relax(); | ||
785 | stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); | ||
786 | } | ||
787 | } | ||
788 | |||
763 | omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); | 789 | omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); |
764 | } | 790 | } |
765 | omap_i2c_ack_stat(dev, | 791 | omap_i2c_ack_stat(dev, |
@@ -879,7 +905,7 @@ omap_i2c_probe(struct platform_device *pdev) | |||
879 | i2c_set_adapdata(adap, dev); | 905 | i2c_set_adapdata(adap, dev); |
880 | adap->owner = THIS_MODULE; | 906 | adap->owner = THIS_MODULE; |
881 | adap->class = I2C_CLASS_HWMON; | 907 | adap->class = I2C_CLASS_HWMON; |
882 | strncpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); | 908 | strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); |
883 | adap->algo = &omap_i2c_algo; | 909 | adap->algo = &omap_i2c_algo; |
884 | adap->dev.parent = &pdev->dev; | 910 | adap->dev.parent = &pdev->dev; |
885 | 911 | ||
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 8f42a4536cdf..20bb0ceb027b 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -763,11 +763,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c) | |||
763 | dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); | 763 | dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); |
764 | dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); | 764 | dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); |
765 | 765 | ||
766 | /* check for s3c2440 i2c controller */ | ||
767 | |||
768 | if (s3c24xx_i2c_is2440(i2c)) | ||
769 | writel(0x0, i2c->regs + S3C2440_IICLC); | ||
770 | |||
771 | return 0; | 766 | return 0; |
772 | } | 767 | } |
773 | 768 | ||
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 4f3d99cd1692..820487d0d5c7 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -637,7 +637,7 @@ static void __exit sh_mobile_i2c_adap_exit(void) | |||
637 | platform_driver_unregister(&sh_mobile_i2c_driver); | 637 | platform_driver_unregister(&sh_mobile_i2c_driver); |
638 | } | 638 | } |
639 | 639 | ||
640 | module_init(sh_mobile_i2c_adap_init); | 640 | subsys_initcall(sh_mobile_i2c_adap_init); |
641 | module_exit(sh_mobile_i2c_adap_exit); | 641 | module_exit(sh_mobile_i2c_adap_exit); |
642 | 642 | ||
643 | MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); | 643 | MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); |
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c index 1a9cc135219f..b96f3025e588 100644 --- a/drivers/i2c/chips/tsl2550.c +++ b/drivers/i2c/chips/tsl2550.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | 28 | ||
29 | #define TSL2550_DRV_NAME "tsl2550" | 29 | #define TSL2550_DRV_NAME "tsl2550" |
30 | #define DRIVER_VERSION "1.1.1" | 30 | #define DRIVER_VERSION "1.1.2" |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * Defines | 33 | * Defines |
@@ -189,13 +189,16 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) | |||
189 | u8 r = 128; | 189 | u8 r = 128; |
190 | 190 | ||
191 | /* Avoid division by 0 and count 1 cannot be greater than count 0 */ | 191 | /* Avoid division by 0 and count 1 cannot be greater than count 0 */ |
192 | if (c0 && (c1 <= c0)) | 192 | if (c1 <= c0) |
193 | r = c1 * 128 / c0; | 193 | if (c0) { |
194 | r = c1 * 128 / c0; | ||
195 | |||
196 | /* Calculate LUX */ | ||
197 | lux = ((c0 - c1) * ratio_lut[r]) / 256; | ||
198 | } else | ||
199 | lux = 0; | ||
194 | else | 200 | else |
195 | return -1; | 201 | return -EAGAIN; |
196 | |||
197 | /* Calculate LUX */ | ||
198 | lux = ((c0 - c1) * ratio_lut[r]) / 256; | ||
199 | 202 | ||
200 | /* LUX range check */ | 203 | /* LUX range check */ |
201 | return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; | 204 | return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 990e6a7e6674..c3b661a666cb 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
@@ -731,10 +731,10 @@ l1oip_socket_thread(void *data) | |||
731 | while (!signal_pending(current)) { | 731 | while (!signal_pending(current)) { |
732 | struct kvec iov = { | 732 | struct kvec iov = { |
733 | .iov_base = recvbuf, | 733 | .iov_base = recvbuf, |
734 | .iov_len = sizeof(recvbuf), | 734 | .iov_len = recvbuf_size, |
735 | }; | 735 | }; |
736 | recvlen = kernel_recvmsg(socket, &msg, &iov, 1, | 736 | recvlen = kernel_recvmsg(socket, &msg, &iov, 1, |
737 | sizeof(recvbuf), 0); | 737 | recvbuf_size, 0); |
738 | if (recvlen > 0) { | 738 | if (recvlen > 0) { |
739 | l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); | 739 | l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); |
740 | } else { | 740 | } else { |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index a6974e9b8ebf..1e2cb846b3c9 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /*P:400 This contains run_guest() which actually calls into the Host<->Guest | 1 | /*P:400 |
2 | * This contains run_guest() which actually calls into the Host<->Guest | ||
2 | * Switcher and analyzes the return, such as determining if the Guest wants the | 3 | * Switcher and analyzes the return, such as determining if the Guest wants the |
3 | * Host to do something. This file also contains useful helper routines. :*/ | 4 | * Host to do something. This file also contains useful helper routines. |
5 | :*/ | ||
4 | #include <linux/module.h> | 6 | #include <linux/module.h> |
5 | #include <linux/stringify.h> | 7 | #include <linux/stringify.h> |
6 | #include <linux/stddef.h> | 8 | #include <linux/stddef.h> |
@@ -24,7 +26,8 @@ static struct page **switcher_page; | |||
24 | /* This One Big lock protects all inter-guest data structures. */ | 26 | /* This One Big lock protects all inter-guest data structures. */ |
25 | DEFINE_MUTEX(lguest_lock); | 27 | DEFINE_MUTEX(lguest_lock); |
26 | 28 | ||
27 | /*H:010 We need to set up the Switcher at a high virtual address. Remember the | 29 | /*H:010 |
30 | * We need to set up the Switcher at a high virtual address. Remember the | ||
28 | * Switcher is a few hundred bytes of assembler code which actually changes the | 31 | * Switcher is a few hundred bytes of assembler code which actually changes the |
29 | * CPU to run the Guest, and then changes back to the Host when a trap or | 32 | * CPU to run the Guest, and then changes back to the Host when a trap or |
30 | * interrupt happens. | 33 | * interrupt happens. |
@@ -33,7 +36,8 @@ DEFINE_MUTEX(lguest_lock); | |||
33 | * Host since it will be running as the switchover occurs. | 36 | * Host since it will be running as the switchover occurs. |
34 | * | 37 | * |
35 | * Trying to map memory at a particular address is an unusual thing to do, so | 38 | * Trying to map memory at a particular address is an unusual thing to do, so |
36 | * it's not a simple one-liner. */ | 39 | * it's not a simple one-liner. |
40 | */ | ||
37 | static __init int map_switcher(void) | 41 | static __init int map_switcher(void) |
38 | { | 42 | { |
39 | int i, err; | 43 | int i, err; |
@@ -47,8 +51,10 @@ static __init int map_switcher(void) | |||
47 | * easy. | 51 | * easy. |
48 | */ | 52 | */ |
49 | 53 | ||
50 | /* We allocate an array of struct page pointers. map_vm_area() wants | 54 | /* |
51 | * this, rather than just an array of pages. */ | 55 | * We allocate an array of struct page pointers. map_vm_area() wants |
56 | * this, rather than just an array of pages. | ||
57 | */ | ||
52 | switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, | 58 | switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES, |
53 | GFP_KERNEL); | 59 | GFP_KERNEL); |
54 | if (!switcher_page) { | 60 | if (!switcher_page) { |
@@ -56,8 +62,10 @@ static __init int map_switcher(void) | |||
56 | goto out; | 62 | goto out; |
57 | } | 63 | } |
58 | 64 | ||
59 | /* Now we actually allocate the pages. The Guest will see these pages, | 65 | /* |
60 | * so we make sure they're zeroed. */ | 66 | * Now we actually allocate the pages. The Guest will see these pages, |
67 | * so we make sure they're zeroed. | ||
68 | */ | ||
61 | for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { | 69 | for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { |
62 | unsigned long addr = get_zeroed_page(GFP_KERNEL); | 70 | unsigned long addr = get_zeroed_page(GFP_KERNEL); |
63 | if (!addr) { | 71 | if (!addr) { |
@@ -67,19 +75,23 @@ static __init int map_switcher(void) | |||
67 | switcher_page[i] = virt_to_page(addr); | 75 | switcher_page[i] = virt_to_page(addr); |
68 | } | 76 | } |
69 | 77 | ||
70 | /* First we check that the Switcher won't overlap the fixmap area at | 78 | /* |
79 | * First we check that the Switcher won't overlap the fixmap area at | ||
71 | * the top of memory. It's currently nowhere near, but it could have | 80 | * the top of memory. It's currently nowhere near, but it could have |
72 | * very strange effects if it ever happened. */ | 81 | * very strange effects if it ever happened. |
82 | */ | ||
73 | if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ | 83 | if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ |
74 | err = -ENOMEM; | 84 | err = -ENOMEM; |
75 | printk("lguest: mapping switcher would thwack fixmap\n"); | 85 | printk("lguest: mapping switcher would thwack fixmap\n"); |
76 | goto free_pages; | 86 | goto free_pages; |
77 | } | 87 | } |
78 | 88 | ||
79 | /* Now we reserve the "virtual memory area" we want: 0xFFC00000 | 89 | /* |
90 | * Now we reserve the "virtual memory area" we want: 0xFFC00000 | ||
80 | * (SWITCHER_ADDR). We might not get it in theory, but in practice | 91 | * (SWITCHER_ADDR). We might not get it in theory, but in practice |
81 | * it's worked so far. The end address needs +1 because __get_vm_area | 92 | * it's worked so far. The end address needs +1 because __get_vm_area |
82 | * allocates an extra guard page, so we need space for that. */ | 93 | * allocates an extra guard page, so we need space for that. |
94 | */ | ||
83 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, | 95 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, |
84 | VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR | 96 | VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR |
85 | + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); | 97 | + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); |
@@ -89,11 +101,13 @@ static __init int map_switcher(void) | |||
89 | goto free_pages; | 101 | goto free_pages; |
90 | } | 102 | } |
91 | 103 | ||
92 | /* This code actually sets up the pages we've allocated to appear at | 104 | /* |
105 | * This code actually sets up the pages we've allocated to appear at | ||
93 | * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the | 106 | * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the |
94 | * kind of pages we're mapping (kernel pages), and a pointer to our | 107 | * kind of pages we're mapping (kernel pages), and a pointer to our |
95 | * array of struct pages. It increments that pointer, but we don't | 108 | * array of struct pages. It increments that pointer, but we don't |
96 | * care. */ | 109 | * care. |
110 | */ | ||
97 | pagep = switcher_page; | 111 | pagep = switcher_page; |
98 | err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); | 112 | err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); |
99 | if (err) { | 113 | if (err) { |
@@ -101,8 +115,10 @@ static __init int map_switcher(void) | |||
101 | goto free_vma; | 115 | goto free_vma; |
102 | } | 116 | } |
103 | 117 | ||
104 | /* Now the Switcher is mapped at the right address, we can't fail! | 118 | /* |
105 | * Copy in the compiled-in Switcher code (from <arch>_switcher.S). */ | 119 | * Now the Switcher is mapped at the right address, we can't fail! |
120 | * Copy in the compiled-in Switcher code (from <arch>_switcher.S). | ||
121 | */ | ||
106 | memcpy(switcher_vma->addr, start_switcher_text, | 122 | memcpy(switcher_vma->addr, start_switcher_text, |
107 | end_switcher_text - start_switcher_text); | 123 | end_switcher_text - start_switcher_text); |
108 | 124 | ||
@@ -124,8 +140,7 @@ out: | |||
124 | } | 140 | } |
125 | /*:*/ | 141 | /*:*/ |
126 | 142 | ||
127 | /* Cleaning up the mapping when the module is unloaded is almost... | 143 | /* Cleaning up the mapping when the module is unloaded is almost... too easy. */ |
128 | * too easy. */ | ||
129 | static void unmap_switcher(void) | 144 | static void unmap_switcher(void) |
130 | { | 145 | { |
131 | unsigned int i; | 146 | unsigned int i; |
@@ -151,16 +166,19 @@ static void unmap_switcher(void) | |||
151 | * But we can't trust the Guest: it might be trying to access the Launcher | 166 | * But we can't trust the Guest: it might be trying to access the Launcher |
152 | * code. We have to check that the range is below the pfn_limit the Launcher | 167 | * code. We have to check that the range is below the pfn_limit the Launcher |
153 | * gave us. We have to make sure that addr + len doesn't give us a false | 168 | * gave us. We have to make sure that addr + len doesn't give us a false |
154 | * positive by overflowing, too. */ | 169 | * positive by overflowing, too. |
170 | */ | ||
155 | bool lguest_address_ok(const struct lguest *lg, | 171 | bool lguest_address_ok(const struct lguest *lg, |
156 | unsigned long addr, unsigned long len) | 172 | unsigned long addr, unsigned long len) |
157 | { | 173 | { |
158 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); | 174 | return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); |
159 | } | 175 | } |
160 | 176 | ||
161 | /* This routine copies memory from the Guest. Here we can see how useful the | 177 | /* |
178 | * This routine copies memory from the Guest. Here we can see how useful the | ||
162 | * kill_lguest() routine we met in the Launcher can be: we return a random | 179 | * kill_lguest() routine we met in the Launcher can be: we return a random |
163 | * value (all zeroes) instead of needing to return an error. */ | 180 | * value (all zeroes) instead of needing to return an error. |
181 | */ | ||
164 | void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) | 182 | void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes) |
165 | { | 183 | { |
166 | if (!lguest_address_ok(cpu->lg, addr, bytes) | 184 | if (!lguest_address_ok(cpu->lg, addr, bytes) |
@@ -181,9 +199,11 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, | |||
181 | } | 199 | } |
182 | /*:*/ | 200 | /*:*/ |
183 | 201 | ||
184 | /*H:030 Let's jump straight to the the main loop which runs the Guest. | 202 | /*H:030 |
203 | * Let's jump straight to the the main loop which runs the Guest. | ||
185 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep | 204 | * Remember, this is called by the Launcher reading /dev/lguest, and we keep |
186 | * going around and around until something interesting happens. */ | 205 | * going around and around until something interesting happens. |
206 | */ | ||
187 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | 207 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user) |
188 | { | 208 | { |
189 | /* We stop running once the Guest is dead. */ | 209 | /* We stop running once the Guest is dead. */ |
@@ -195,10 +215,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
195 | if (cpu->hcall) | 215 | if (cpu->hcall) |
196 | do_hypercalls(cpu); | 216 | do_hypercalls(cpu); |
197 | 217 | ||
198 | /* It's possible the Guest did a NOTIFY hypercall to the | 218 | /* |
199 | * Launcher, in which case we return from the read() now. */ | 219 | * It's possible the Guest did a NOTIFY hypercall to the |
220 | * Launcher. | ||
221 | */ | ||
200 | if (cpu->pending_notify) { | 222 | if (cpu->pending_notify) { |
223 | /* | ||
224 | * Does it just needs to write to a registered | ||
225 | * eventfd (ie. the appropriate virtqueue thread)? | ||
226 | */ | ||
201 | if (!send_notify_to_eventfd(cpu)) { | 227 | if (!send_notify_to_eventfd(cpu)) { |
228 | /* OK, we tell the main Laucher. */ | ||
202 | if (put_user(cpu->pending_notify, user)) | 229 | if (put_user(cpu->pending_notify, user)) |
203 | return -EFAULT; | 230 | return -EFAULT; |
204 | return sizeof(cpu->pending_notify); | 231 | return sizeof(cpu->pending_notify); |
@@ -209,29 +236,39 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
209 | if (signal_pending(current)) | 236 | if (signal_pending(current)) |
210 | return -ERESTARTSYS; | 237 | return -ERESTARTSYS; |
211 | 238 | ||
212 | /* Check if there are any interrupts which can be delivered now: | 239 | /* |
240 | * Check if there are any interrupts which can be delivered now: | ||
213 | * if so, this sets up the hander to be executed when we next | 241 | * if so, this sets up the hander to be executed when we next |
214 | * run the Guest. */ | 242 | * run the Guest. |
243 | */ | ||
215 | irq = interrupt_pending(cpu, &more); | 244 | irq = interrupt_pending(cpu, &more); |
216 | if (irq < LGUEST_IRQS) | 245 | if (irq < LGUEST_IRQS) |
217 | try_deliver_interrupt(cpu, irq, more); | 246 | try_deliver_interrupt(cpu, irq, more); |
218 | 247 | ||
219 | /* All long-lived kernel loops need to check with this horrible | 248 | /* |
249 | * All long-lived kernel loops need to check with this horrible | ||
220 | * thing called the freezer. If the Host is trying to suspend, | 250 | * thing called the freezer. If the Host is trying to suspend, |
221 | * it stops us. */ | 251 | * it stops us. |
252 | */ | ||
222 | try_to_freeze(); | 253 | try_to_freeze(); |
223 | 254 | ||
224 | /* Just make absolutely sure the Guest is still alive. One of | 255 | /* |
225 | * those hypercalls could have been fatal, for example. */ | 256 | * Just make absolutely sure the Guest is still alive. One of |
257 | * those hypercalls could have been fatal, for example. | ||
258 | */ | ||
226 | if (cpu->lg->dead) | 259 | if (cpu->lg->dead) |
227 | break; | 260 | break; |
228 | 261 | ||
229 | /* If the Guest asked to be stopped, we sleep. The Guest's | 262 | /* |
230 | * clock timer will wake us. */ | 263 | * If the Guest asked to be stopped, we sleep. The Guest's |
264 | * clock timer will wake us. | ||
265 | */ | ||
231 | if (cpu->halted) { | 266 | if (cpu->halted) { |
232 | set_current_state(TASK_INTERRUPTIBLE); | 267 | set_current_state(TASK_INTERRUPTIBLE); |
233 | /* Just before we sleep, make sure no interrupt snuck in | 268 | /* |
234 | * which we should be doing. */ | 269 | * Just before we sleep, make sure no interrupt snuck in |
270 | * which we should be doing. | ||
271 | */ | ||
235 | if (interrupt_pending(cpu, &more) < LGUEST_IRQS) | 272 | if (interrupt_pending(cpu, &more) < LGUEST_IRQS) |
236 | set_current_state(TASK_RUNNING); | 273 | set_current_state(TASK_RUNNING); |
237 | else | 274 | else |
@@ -239,8 +276,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) | |||
239 | continue; | 276 | continue; |
240 | } | 277 | } |
241 | 278 | ||
242 | /* OK, now we're ready to jump into the Guest. First we put up | 279 | /* |
243 | * the "Do Not Disturb" sign: */ | 280 | * OK, now we're ready to jump into the Guest. First we put up |
281 | * the "Do Not Disturb" sign: | ||
282 | */ | ||
244 | local_irq_disable(); | 283 | local_irq_disable(); |
245 | 284 | ||
246 | /* Actually run the Guest until something happens. */ | 285 | /* Actually run the Guest until something happens. */ |
@@ -327,8 +366,10 @@ static void __exit fini(void) | |||
327 | } | 366 | } |
328 | /*:*/ | 367 | /*:*/ |
329 | 368 | ||
330 | /* The Host side of lguest can be a module. This is a nice way for people to | 369 | /* |
331 | * play with it. */ | 370 | * The Host side of lguest can be a module. This is a nice way for people to |
371 | * play with it. | ||
372 | */ | ||
332 | module_init(init); | 373 | module_init(init); |
333 | module_exit(fini); | 374 | module_exit(fini); |
334 | MODULE_LICENSE("GPL"); | 375 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index c29ffa19cb74..83511eb0923d 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c | |||
@@ -1,8 +1,10 @@ | |||
1 | /*P:500 Just as userspace programs request kernel operations through a system | 1 | /*P:500 |
2 | * Just as userspace programs request kernel operations through a system | ||
2 | * call, the Guest requests Host operations through a "hypercall". You might | 3 | * call, the Guest requests Host operations through a "hypercall". You might |
3 | * notice this nomenclature doesn't really follow any logic, but the name has | 4 | * notice this nomenclature doesn't really follow any logic, but the name has |
4 | * been around for long enough that we're stuck with it. As you'd expect, this | 5 | * been around for long enough that we're stuck with it. As you'd expect, this |
5 | * code is basically a one big switch statement. :*/ | 6 | * code is basically a one big switch statement. |
7 | :*/ | ||
6 | 8 | ||
7 | /* Copyright (C) 2006 Rusty Russell IBM Corporation | 9 | /* Copyright (C) 2006 Rusty Russell IBM Corporation |
8 | 10 | ||
@@ -28,30 +30,41 @@ | |||
28 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
29 | #include "lg.h" | 31 | #include "lg.h" |
30 | 32 | ||
31 | /*H:120 This is the core hypercall routine: where the Guest gets what it wants. | 33 | /*H:120 |
32 | * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ | 34 | * This is the core hypercall routine: where the Guest gets what it wants. |
35 | * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. | ||
36 | */ | ||
33 | static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | 37 | static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
34 | { | 38 | { |
35 | switch (args->arg0) { | 39 | switch (args->arg0) { |
36 | case LHCALL_FLUSH_ASYNC: | 40 | case LHCALL_FLUSH_ASYNC: |
37 | /* This call does nothing, except by breaking out of the Guest | 41 | /* |
38 | * it makes us process all the asynchronous hypercalls. */ | 42 | * This call does nothing, except by breaking out of the Guest |
43 | * it makes us process all the asynchronous hypercalls. | ||
44 | */ | ||
39 | break; | 45 | break; |
40 | case LHCALL_SEND_INTERRUPTS: | 46 | case LHCALL_SEND_INTERRUPTS: |
41 | /* This call does nothing too, but by breaking out of the Guest | 47 | /* |
42 | * it makes us process any pending interrupts. */ | 48 | * This call does nothing too, but by breaking out of the Guest |
49 | * it makes us process any pending interrupts. | ||
50 | */ | ||
43 | break; | 51 | break; |
44 | case LHCALL_LGUEST_INIT: | 52 | case LHCALL_LGUEST_INIT: |
45 | /* You can't get here unless you're already initialized. Don't | 53 | /* |
46 | * do that. */ | 54 | * You can't get here unless you're already initialized. Don't |
55 | * do that. | ||
56 | */ | ||
47 | kill_guest(cpu, "already have lguest_data"); | 57 | kill_guest(cpu, "already have lguest_data"); |
48 | break; | 58 | break; |
49 | case LHCALL_SHUTDOWN: { | 59 | case LHCALL_SHUTDOWN: { |
50 | /* Shutdown is such a trivial hypercall that we do it in four | ||
51 | * lines right here. */ | ||
52 | char msg[128]; | 60 | char msg[128]; |
53 | /* If the lgread fails, it will call kill_guest() itself; the | 61 | /* |
54 | * kill_guest() with the message will be ignored. */ | 62 | * Shutdown is such a trivial hypercall that we do it in five |
63 | * lines right here. | ||
64 | * | ||
65 | * If the lgread fails, it will call kill_guest() itself; the | ||
66 | * kill_guest() with the message will be ignored. | ||
67 | */ | ||
55 | __lgread(cpu, msg, args->arg1, sizeof(msg)); | 68 | __lgread(cpu, msg, args->arg1, sizeof(msg)); |
56 | msg[sizeof(msg)-1] = '\0'; | 69 | msg[sizeof(msg)-1] = '\0'; |
57 | kill_guest(cpu, "CRASH: %s", msg); | 70 | kill_guest(cpu, "CRASH: %s", msg); |
@@ -60,16 +73,17 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | |||
60 | break; | 73 | break; |
61 | } | 74 | } |
62 | case LHCALL_FLUSH_TLB: | 75 | case LHCALL_FLUSH_TLB: |
63 | /* FLUSH_TLB comes in two flavors, depending on the | 76 | /* FLUSH_TLB comes in two flavors, depending on the argument: */ |
64 | * argument: */ | ||
65 | if (args->arg1) | 77 | if (args->arg1) |
66 | guest_pagetable_clear_all(cpu); | 78 | guest_pagetable_clear_all(cpu); |
67 | else | 79 | else |
68 | guest_pagetable_flush_user(cpu); | 80 | guest_pagetable_flush_user(cpu); |
69 | break; | 81 | break; |
70 | 82 | ||
71 | /* All these calls simply pass the arguments through to the right | 83 | /* |
72 | * routines. */ | 84 | * All these calls simply pass the arguments through to the right |
85 | * routines. | ||
86 | */ | ||
73 | case LHCALL_NEW_PGTABLE: | 87 | case LHCALL_NEW_PGTABLE: |
74 | guest_new_pagetable(cpu, args->arg1); | 88 | guest_new_pagetable(cpu, args->arg1); |
75 | break; | 89 | break; |
@@ -112,15 +126,16 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) | |||
112 | kill_guest(cpu, "Bad hypercall %li\n", args->arg0); | 126 | kill_guest(cpu, "Bad hypercall %li\n", args->arg0); |
113 | } | 127 | } |
114 | } | 128 | } |
115 | /*:*/ | ||
116 | 129 | ||
117 | /*H:124 Asynchronous hypercalls are easy: we just look in the array in the | 130 | /*H:124 |
131 | * Asynchronous hypercalls are easy: we just look in the array in the | ||
118 | * Guest's "struct lguest_data" to see if any new ones are marked "ready". | 132 | * Guest's "struct lguest_data" to see if any new ones are marked "ready". |
119 | * | 133 | * |
120 | * We are careful to do these in order: obviously we respect the order the | 134 | * We are careful to do these in order: obviously we respect the order the |
121 | * Guest put them in the ring, but we also promise the Guest that they will | 135 | * Guest put them in the ring, but we also promise the Guest that they will |
122 | * happen before any normal hypercall (which is why we check this before | 136 | * happen before any normal hypercall (which is why we check this before |
123 | * checking for a normal hcall). */ | 137 | * checking for a normal hcall). |
138 | */ | ||
124 | static void do_async_hcalls(struct lg_cpu *cpu) | 139 | static void do_async_hcalls(struct lg_cpu *cpu) |
125 | { | 140 | { |
126 | unsigned int i; | 141 | unsigned int i; |
@@ -133,22 +148,28 @@ static void do_async_hcalls(struct lg_cpu *cpu) | |||
133 | /* We process "struct lguest_data"s hcalls[] ring once. */ | 148 | /* We process "struct lguest_data"s hcalls[] ring once. */ |
134 | for (i = 0; i < ARRAY_SIZE(st); i++) { | 149 | for (i = 0; i < ARRAY_SIZE(st); i++) { |
135 | struct hcall_args args; | 150 | struct hcall_args args; |
136 | /* We remember where we were up to from last time. This makes | 151 | /* |
152 | * We remember where we were up to from last time. This makes | ||
137 | * sure that the hypercalls are done in the order the Guest | 153 | * sure that the hypercalls are done in the order the Guest |
138 | * places them in the ring. */ | 154 | * places them in the ring. |
155 | */ | ||
139 | unsigned int n = cpu->next_hcall; | 156 | unsigned int n = cpu->next_hcall; |
140 | 157 | ||
141 | /* 0xFF means there's no call here (yet). */ | 158 | /* 0xFF means there's no call here (yet). */ |
142 | if (st[n] == 0xFF) | 159 | if (st[n] == 0xFF) |
143 | break; | 160 | break; |
144 | 161 | ||
145 | /* OK, we have hypercall. Increment the "next_hcall" cursor, | 162 | /* |
146 | * and wrap back to 0 if we reach the end. */ | 163 | * OK, we have hypercall. Increment the "next_hcall" cursor, |
164 | * and wrap back to 0 if we reach the end. | ||
165 | */ | ||
147 | if (++cpu->next_hcall == LHCALL_RING_SIZE) | 166 | if (++cpu->next_hcall == LHCALL_RING_SIZE) |
148 | cpu->next_hcall = 0; | 167 | cpu->next_hcall = 0; |
149 | 168 | ||
150 | /* Copy the hypercall arguments into a local copy of | 169 | /* |
151 | * the hcall_args struct. */ | 170 | * Copy the hypercall arguments into a local copy of the |
171 | * hcall_args struct. | ||
172 | */ | ||
152 | if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], | 173 | if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n], |
153 | sizeof(struct hcall_args))) { | 174 | sizeof(struct hcall_args))) { |
154 | kill_guest(cpu, "Fetching async hypercalls"); | 175 | kill_guest(cpu, "Fetching async hypercalls"); |
@@ -164,19 +185,25 @@ static void do_async_hcalls(struct lg_cpu *cpu) | |||
164 | break; | 185 | break; |
165 | } | 186 | } |
166 | 187 | ||
167 | /* Stop doing hypercalls if they want to notify the Launcher: | 188 | /* |
168 | * it needs to service this first. */ | 189 | * Stop doing hypercalls if they want to notify the Launcher: |
190 | * it needs to service this first. | ||
191 | */ | ||
169 | if (cpu->pending_notify) | 192 | if (cpu->pending_notify) |
170 | break; | 193 | break; |
171 | } | 194 | } |
172 | } | 195 | } |
173 | 196 | ||
174 | /* Last of all, we look at what happens first of all. The very first time the | 197 | /* |
175 | * Guest makes a hypercall, we end up here to set things up: */ | 198 | * Last of all, we look at what happens first of all. The very first time the |
199 | * Guest makes a hypercall, we end up here to set things up: | ||
200 | */ | ||
176 | static void initialize(struct lg_cpu *cpu) | 201 | static void initialize(struct lg_cpu *cpu) |
177 | { | 202 | { |
178 | /* You can't do anything until you're initialized. The Guest knows the | 203 | /* |
179 | * rules, so we're unforgiving here. */ | 204 | * You can't do anything until you're initialized. The Guest knows the |
205 | * rules, so we're unforgiving here. | ||
206 | */ | ||
180 | if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { | 207 | if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) { |
181 | kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); | 208 | kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0); |
182 | return; | 209 | return; |
@@ -185,32 +212,44 @@ static void initialize(struct lg_cpu *cpu) | |||
185 | if (lguest_arch_init_hypercalls(cpu)) | 212 | if (lguest_arch_init_hypercalls(cpu)) |
186 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 213 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
187 | 214 | ||
188 | /* The Guest tells us where we're not to deliver interrupts by putting | 215 | /* |
189 | * the range of addresses into "struct lguest_data". */ | 216 | * The Guest tells us where we're not to deliver interrupts by putting |
217 | * the range of addresses into "struct lguest_data". | ||
218 | */ | ||
190 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) | 219 | if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start) |
191 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) | 220 | || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end)) |
192 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 221 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
193 | 222 | ||
194 | /* We write the current time into the Guest's data page once so it can | 223 | /* |
195 | * set its clock. */ | 224 | * We write the current time into the Guest's data page once so it can |
225 | * set its clock. | ||
226 | */ | ||
196 | write_timestamp(cpu); | 227 | write_timestamp(cpu); |
197 | 228 | ||
198 | /* page_tables.c will also do some setup. */ | 229 | /* page_tables.c will also do some setup. */ |
199 | page_table_guest_data_init(cpu); | 230 | page_table_guest_data_init(cpu); |
200 | 231 | ||
201 | /* This is the one case where the above accesses might have been the | 232 | /* |
233 | * This is the one case where the above accesses might have been the | ||
202 | * first write to a Guest page. This may have caused a copy-on-write | 234 | * first write to a Guest page. This may have caused a copy-on-write |
203 | * fault, but the old page might be (read-only) in the Guest | 235 | * fault, but the old page might be (read-only) in the Guest |
204 | * pagetable. */ | 236 | * pagetable. |
237 | */ | ||
205 | guest_pagetable_clear_all(cpu); | 238 | guest_pagetable_clear_all(cpu); |
206 | } | 239 | } |
207 | /*:*/ | 240 | /*:*/ |
208 | 241 | ||
209 | /*M:013 If a Guest reads from a page (so creates a mapping) that it has never | 242 | /*M:013 |
243 | * If a Guest reads from a page (so creates a mapping) that it has never | ||
210 | * written to, and then the Launcher writes to it (ie. the output of a virtual | 244 | * written to, and then the Launcher writes to it (ie. the output of a virtual |
211 | * device), the Guest will still see the old page. In practice, this never | 245 | * device), the Guest will still see the old page. In practice, this never |
212 | * happens: why would the Guest read a page which it has never written to? But | 246 | * happens: why would the Guest read a page which it has never written to? But |
213 | * a similar scenario might one day bite us, so it's worth mentioning. :*/ | 247 | * a similar scenario might one day bite us, so it's worth mentioning. |
248 | * | ||
249 | * Note that if we used a shared anonymous mapping in the Launcher instead of | ||
250 | * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we | ||
251 | * need that to switch the Launcher to processes (away from threads) anyway. | ||
252 | :*/ | ||
214 | 253 | ||
215 | /*H:100 | 254 | /*H:100 |
216 | * Hypercalls | 255 | * Hypercalls |
@@ -229,17 +268,22 @@ void do_hypercalls(struct lg_cpu *cpu) | |||
229 | return; | 268 | return; |
230 | } | 269 | } |
231 | 270 | ||
232 | /* The Guest has initialized. | 271 | /* |
272 | * The Guest has initialized. | ||
233 | * | 273 | * |
234 | * Look in the hypercall ring for the async hypercalls: */ | 274 | * Look in the hypercall ring for the async hypercalls: |
275 | */ | ||
235 | do_async_hcalls(cpu); | 276 | do_async_hcalls(cpu); |
236 | 277 | ||
237 | /* If we stopped reading the hypercall ring because the Guest did a | 278 | /* |
279 | * If we stopped reading the hypercall ring because the Guest did a | ||
238 | * NOTIFY to the Launcher, we want to return now. Otherwise we do | 280 | * NOTIFY to the Launcher, we want to return now. Otherwise we do |
239 | * the hypercall. */ | 281 | * the hypercall. |
282 | */ | ||
240 | if (!cpu->pending_notify) { | 283 | if (!cpu->pending_notify) { |
241 | do_hcall(cpu, cpu->hcall); | 284 | do_hcall(cpu, cpu->hcall); |
242 | /* Tricky point: we reset the hcall pointer to mark the | 285 | /* |
286 | * Tricky point: we reset the hcall pointer to mark the | ||
243 | * hypercall as "done". We use the hcall pointer rather than | 287 | * hypercall as "done". We use the hcall pointer rather than |
244 | * the trap number to indicate a hypercall is pending. | 288 | * the trap number to indicate a hypercall is pending. |
245 | * Normally it doesn't matter: the Guest will run again and | 289 | * Normally it doesn't matter: the Guest will run again and |
@@ -248,13 +292,16 @@ void do_hypercalls(struct lg_cpu *cpu) | |||
248 | * However, if we are signalled or the Guest sends I/O to the | 292 | * However, if we are signalled or the Guest sends I/O to the |
249 | * Launcher, the run_guest() loop will exit without running the | 293 | * Launcher, the run_guest() loop will exit without running the |
250 | * Guest. When it comes back it would try to re-run the | 294 | * Guest. When it comes back it would try to re-run the |
251 | * hypercall. Finding that bug sucked. */ | 295 | * hypercall. Finding that bug sucked. |
296 | */ | ||
252 | cpu->hcall = NULL; | 297 | cpu->hcall = NULL; |
253 | } | 298 | } |
254 | } | 299 | } |
255 | 300 | ||
256 | /* This routine supplies the Guest with time: it's used for wallclock time at | 301 | /* |
257 | * initial boot and as a rough time source if the TSC isn't available. */ | 302 | * This routine supplies the Guest with time: it's used for wallclock time at |
303 | * initial boot and as a rough time source if the TSC isn't available. | ||
304 | */ | ||
258 | void write_timestamp(struct lg_cpu *cpu) | 305 | void write_timestamp(struct lg_cpu *cpu) |
259 | { | 306 | { |
260 | struct timespec now; | 307 | struct timespec now; |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 0e9067b0d507..18648180db02 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /*P:800 Interrupts (traps) are complicated enough to earn their own file. | 1 | /*P:800 |
2 | * Interrupts (traps) are complicated enough to earn their own file. | ||
2 | * There are three classes of interrupts: | 3 | * There are three classes of interrupts: |
3 | * | 4 | * |
4 | * 1) Real hardware interrupts which occur while we're running the Guest, | 5 | * 1) Real hardware interrupts which occur while we're running the Guest, |
@@ -10,7 +11,8 @@ | |||
10 | * just like real hardware would deliver them. Traps from the Guest can be set | 11 | * just like real hardware would deliver them. Traps from the Guest can be set |
11 | * up to go directly back into the Guest, but sometimes the Host wants to see | 12 | * up to go directly back into the Guest, but sometimes the Host wants to see |
12 | * them first, so we also have a way of "reflecting" them into the Guest as if | 13 | * them first, so we also have a way of "reflecting" them into the Guest as if |
13 | * they had been delivered to it directly. :*/ | 14 | * they had been delivered to it directly. |
15 | :*/ | ||
14 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
15 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
16 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -26,8 +28,10 @@ static unsigned long idt_address(u32 lo, u32 hi) | |||
26 | return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); | 28 | return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); |
27 | } | 29 | } |
28 | 30 | ||
29 | /* The "type" of the interrupt handler is a 4 bit field: we only support a | 31 | /* |
30 | * couple of types. */ | 32 | * The "type" of the interrupt handler is a 4 bit field: we only support a |
33 | * couple of types. | ||
34 | */ | ||
31 | static int idt_type(u32 lo, u32 hi) | 35 | static int idt_type(u32 lo, u32 hi) |
32 | { | 36 | { |
33 | return (hi >> 8) & 0xF; | 37 | return (hi >> 8) & 0xF; |
@@ -39,8 +43,10 @@ static bool idt_present(u32 lo, u32 hi) | |||
39 | return (hi & 0x8000); | 43 | return (hi & 0x8000); |
40 | } | 44 | } |
41 | 45 | ||
42 | /* We need a helper to "push" a value onto the Guest's stack, since that's a | 46 | /* |
43 | * big part of what delivering an interrupt does. */ | 47 | * We need a helper to "push" a value onto the Guest's stack, since that's a |
48 | * big part of what delivering an interrupt does. | ||
49 | */ | ||
44 | static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | 50 | static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) |
45 | { | 51 | { |
46 | /* Stack grows upwards: move stack then write value. */ | 52 | /* Stack grows upwards: move stack then write value. */ |
@@ -48,7 +54,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | |||
48 | lgwrite(cpu, *gstack, u32, val); | 54 | lgwrite(cpu, *gstack, u32, val); |
49 | } | 55 | } |
50 | 56 | ||
51 | /*H:210 The set_guest_interrupt() routine actually delivers the interrupt or | 57 | /*H:210 |
58 | * The set_guest_interrupt() routine actually delivers the interrupt or | ||
52 | * trap. The mechanics of delivering traps and interrupts to the Guest are the | 59 | * trap. The mechanics of delivering traps and interrupts to the Guest are the |
53 | * same, except some traps have an "error code" which gets pushed onto the | 60 | * same, except some traps have an "error code" which gets pushed onto the |
54 | * stack as well: the caller tells us if this is one. | 61 | * stack as well: the caller tells us if this is one. |
@@ -59,7 +66,8 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) | |||
59 | * | 66 | * |
60 | * We set up the stack just like the CPU does for a real interrupt, so it's | 67 | * We set up the stack just like the CPU does for a real interrupt, so it's |
61 | * identical for the Guest (and the standard "iret" instruction will undo | 68 | * identical for the Guest (and the standard "iret" instruction will undo |
62 | * it). */ | 69 | * it). |
70 | */ | ||
63 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | 71 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, |
64 | bool has_err) | 72 | bool has_err) |
65 | { | 73 | { |
@@ -67,20 +75,26 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
67 | u32 eflags, ss, irq_enable; | 75 | u32 eflags, ss, irq_enable; |
68 | unsigned long virtstack; | 76 | unsigned long virtstack; |
69 | 77 | ||
70 | /* There are two cases for interrupts: one where the Guest is already | 78 | /* |
79 | * There are two cases for interrupts: one where the Guest is already | ||
71 | * in the kernel, and a more complex one where the Guest is in | 80 | * in the kernel, and a more complex one where the Guest is in |
72 | * userspace. We check the privilege level to find out. */ | 81 | * userspace. We check the privilege level to find out. |
82 | */ | ||
73 | if ((cpu->regs->ss&0x3) != GUEST_PL) { | 83 | if ((cpu->regs->ss&0x3) != GUEST_PL) { |
74 | /* The Guest told us their kernel stack with the SET_STACK | 84 | /* |
75 | * hypercall: both the virtual address and the segment */ | 85 | * The Guest told us their kernel stack with the SET_STACK |
86 | * hypercall: both the virtual address and the segment. | ||
87 | */ | ||
76 | virtstack = cpu->esp1; | 88 | virtstack = cpu->esp1; |
77 | ss = cpu->ss1; | 89 | ss = cpu->ss1; |
78 | 90 | ||
79 | origstack = gstack = guest_pa(cpu, virtstack); | 91 | origstack = gstack = guest_pa(cpu, virtstack); |
80 | /* We push the old stack segment and pointer onto the new | 92 | /* |
93 | * We push the old stack segment and pointer onto the new | ||
81 | * stack: when the Guest does an "iret" back from the interrupt | 94 | * stack: when the Guest does an "iret" back from the interrupt |
82 | * handler the CPU will notice they're dropping privilege | 95 | * handler the CPU will notice they're dropping privilege |
83 | * levels and expect these here. */ | 96 | * levels and expect these here. |
97 | */ | ||
84 | push_guest_stack(cpu, &gstack, cpu->regs->ss); | 98 | push_guest_stack(cpu, &gstack, cpu->regs->ss); |
85 | push_guest_stack(cpu, &gstack, cpu->regs->esp); | 99 | push_guest_stack(cpu, &gstack, cpu->regs->esp); |
86 | } else { | 100 | } else { |
@@ -91,18 +105,22 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
91 | origstack = gstack = guest_pa(cpu, virtstack); | 105 | origstack = gstack = guest_pa(cpu, virtstack); |
92 | } | 106 | } |
93 | 107 | ||
94 | /* Remember that we never let the Guest actually disable interrupts, so | 108 | /* |
109 | * Remember that we never let the Guest actually disable interrupts, so | ||
95 | * the "Interrupt Flag" bit is always set. We copy that bit from the | 110 | * the "Interrupt Flag" bit is always set. We copy that bit from the |
96 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest | 111 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest |
97 | * copy it back in "lguest_iret". */ | 112 | * copy it back in "lguest_iret". |
113 | */ | ||
98 | eflags = cpu->regs->eflags; | 114 | eflags = cpu->regs->eflags; |
99 | if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 | 115 | if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 |
100 | && !(irq_enable & X86_EFLAGS_IF)) | 116 | && !(irq_enable & X86_EFLAGS_IF)) |
101 | eflags &= ~X86_EFLAGS_IF; | 117 | eflags &= ~X86_EFLAGS_IF; |
102 | 118 | ||
103 | /* An interrupt is expected to push three things on the stack: the old | 119 | /* |
120 | * An interrupt is expected to push three things on the stack: the old | ||
104 | * "eflags" word, the old code segment, and the old instruction | 121 | * "eflags" word, the old code segment, and the old instruction |
105 | * pointer. */ | 122 | * pointer. |
123 | */ | ||
106 | push_guest_stack(cpu, &gstack, eflags); | 124 | push_guest_stack(cpu, &gstack, eflags); |
107 | push_guest_stack(cpu, &gstack, cpu->regs->cs); | 125 | push_guest_stack(cpu, &gstack, cpu->regs->cs); |
108 | push_guest_stack(cpu, &gstack, cpu->regs->eip); | 126 | push_guest_stack(cpu, &gstack, cpu->regs->eip); |
@@ -111,15 +129,19 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
111 | if (has_err) | 129 | if (has_err) |
112 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); | 130 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); |
113 | 131 | ||
114 | /* Now we've pushed all the old state, we change the stack, the code | 132 | /* |
115 | * segment and the address to execute. */ | 133 | * Now we've pushed all the old state, we change the stack, the code |
134 | * segment and the address to execute. | ||
135 | */ | ||
116 | cpu->regs->ss = ss; | 136 | cpu->regs->ss = ss; |
117 | cpu->regs->esp = virtstack + (gstack - origstack); | 137 | cpu->regs->esp = virtstack + (gstack - origstack); |
118 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); | 138 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); |
119 | cpu->regs->eip = idt_address(lo, hi); | 139 | cpu->regs->eip = idt_address(lo, hi); |
120 | 140 | ||
121 | /* There are two kinds of interrupt handlers: 0xE is an "interrupt | 141 | /* |
122 | * gate" which expects interrupts to be disabled on entry. */ | 142 | * There are two kinds of interrupt handlers: 0xE is an "interrupt |
143 | * gate" which expects interrupts to be disabled on entry. | ||
144 | */ | ||
123 | if (idt_type(lo, hi) == 0xE) | 145 | if (idt_type(lo, hi) == 0xE) |
124 | if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) | 146 | if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) |
125 | kill_guest(cpu, "Disabling interrupts"); | 147 | kill_guest(cpu, "Disabling interrupts"); |
@@ -130,7 +152,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, | |||
130 | * | 152 | * |
131 | * interrupt_pending() returns the first pending interrupt which isn't blocked | 153 | * interrupt_pending() returns the first pending interrupt which isn't blocked |
132 | * by the Guest. It is called before every entry to the Guest, and just before | 154 | * by the Guest. It is called before every entry to the Guest, and just before |
133 | * we go to sleep when the Guest has halted itself. */ | 155 | * we go to sleep when the Guest has halted itself. |
156 | */ | ||
134 | unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | 157 | unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) |
135 | { | 158 | { |
136 | unsigned int irq; | 159 | unsigned int irq; |
@@ -140,8 +163,10 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | |||
140 | if (!cpu->lg->lguest_data) | 163 | if (!cpu->lg->lguest_data) |
141 | return LGUEST_IRQS; | 164 | return LGUEST_IRQS; |
142 | 165 | ||
143 | /* Take our "irqs_pending" array and remove any interrupts the Guest | 166 | /* |
144 | * wants blocked: the result ends up in "blk". */ | 167 | * Take our "irqs_pending" array and remove any interrupts the Guest |
168 | * wants blocked: the result ends up in "blk". | ||
169 | */ | ||
145 | if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, | 170 | if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, |
146 | sizeof(blk))) | 171 | sizeof(blk))) |
147 | return LGUEST_IRQS; | 172 | return LGUEST_IRQS; |
@@ -154,16 +179,20 @@ unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) | |||
154 | return irq; | 179 | return irq; |
155 | } | 180 | } |
156 | 181 | ||
157 | /* This actually diverts the Guest to running an interrupt handler, once an | 182 | /* |
158 | * interrupt has been identified by interrupt_pending(). */ | 183 | * This actually diverts the Guest to running an interrupt handler, once an |
184 | * interrupt has been identified by interrupt_pending(). | ||
185 | */ | ||
159 | void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | 186 | void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) |
160 | { | 187 | { |
161 | struct desc_struct *idt; | 188 | struct desc_struct *idt; |
162 | 189 | ||
163 | BUG_ON(irq >= LGUEST_IRQS); | 190 | BUG_ON(irq >= LGUEST_IRQS); |
164 | 191 | ||
165 | /* They may be in the middle of an iret, where they asked us never to | 192 | /* |
166 | * deliver interrupts. */ | 193 | * They may be in the middle of an iret, where they asked us never to |
194 | * deliver interrupts. | ||
195 | */ | ||
167 | if (cpu->regs->eip >= cpu->lg->noirq_start && | 196 | if (cpu->regs->eip >= cpu->lg->noirq_start && |
168 | (cpu->regs->eip < cpu->lg->noirq_end)) | 197 | (cpu->regs->eip < cpu->lg->noirq_end)) |
169 | return; | 198 | return; |
@@ -187,29 +216,37 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
187 | } | 216 | } |
188 | } | 217 | } |
189 | 218 | ||
190 | /* Look at the IDT entry the Guest gave us for this interrupt. The | 219 | /* |
220 | * Look at the IDT entry the Guest gave us for this interrupt. The | ||
191 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip | 221 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip |
192 | * over them. */ | 222 | * over them. |
223 | */ | ||
193 | idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; | 224 | idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; |
194 | /* If they don't have a handler (yet?), we just ignore it */ | 225 | /* If they don't have a handler (yet?), we just ignore it */ |
195 | if (idt_present(idt->a, idt->b)) { | 226 | if (idt_present(idt->a, idt->b)) { |
196 | /* OK, mark it no longer pending and deliver it. */ | 227 | /* OK, mark it no longer pending and deliver it. */ |
197 | clear_bit(irq, cpu->irqs_pending); | 228 | clear_bit(irq, cpu->irqs_pending); |
198 | /* set_guest_interrupt() takes the interrupt descriptor and a | 229 | /* |
230 | * set_guest_interrupt() takes the interrupt descriptor and a | ||
199 | * flag to say whether this interrupt pushes an error code onto | 231 | * flag to say whether this interrupt pushes an error code onto |
200 | * the stack as well: virtual interrupts never do. */ | 232 | * the stack as well: virtual interrupts never do. |
233 | */ | ||
201 | set_guest_interrupt(cpu, idt->a, idt->b, false); | 234 | set_guest_interrupt(cpu, idt->a, idt->b, false); |
202 | } | 235 | } |
203 | 236 | ||
204 | /* Every time we deliver an interrupt, we update the timestamp in the | 237 | /* |
238 | * Every time we deliver an interrupt, we update the timestamp in the | ||
205 | * Guest's lguest_data struct. It would be better for the Guest if we | 239 | * Guest's lguest_data struct. It would be better for the Guest if we |
206 | * did this more often, but it can actually be quite slow: doing it | 240 | * did this more often, but it can actually be quite slow: doing it |
207 | * here is a compromise which means at least it gets updated every | 241 | * here is a compromise which means at least it gets updated every |
208 | * timer interrupt. */ | 242 | * timer interrupt. |
243 | */ | ||
209 | write_timestamp(cpu); | 244 | write_timestamp(cpu); |
210 | 245 | ||
211 | /* If there are no other interrupts we want to deliver, clear | 246 | /* |
212 | * the pending flag. */ | 247 | * If there are no other interrupts we want to deliver, clear |
248 | * the pending flag. | ||
249 | */ | ||
213 | if (!more) | 250 | if (!more) |
214 | put_user(0, &cpu->lg->lguest_data->irq_pending); | 251 | put_user(0, &cpu->lg->lguest_data->irq_pending); |
215 | } | 252 | } |
@@ -217,24 +254,29 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) | |||
217 | /* And this is the routine when we want to set an interrupt for the Guest. */ | 254 | /* And this is the routine when we want to set an interrupt for the Guest. */ |
218 | void set_interrupt(struct lg_cpu *cpu, unsigned int irq) | 255 | void set_interrupt(struct lg_cpu *cpu, unsigned int irq) |
219 | { | 256 | { |
220 | /* Next time the Guest runs, the core code will see if it can deliver | 257 | /* |
221 | * this interrupt. */ | 258 | * Next time the Guest runs, the core code will see if it can deliver |
259 | * this interrupt. | ||
260 | */ | ||
222 | set_bit(irq, cpu->irqs_pending); | 261 | set_bit(irq, cpu->irqs_pending); |
223 | 262 | ||
224 | /* Make sure it sees it; it might be asleep (eg. halted), or | 263 | /* |
225 | * running the Guest right now, in which case kick_process() | 264 | * Make sure it sees it; it might be asleep (eg. halted), or running |
226 | * will knock it out. */ | 265 | * the Guest right now, in which case kick_process() will knock it out. |
266 | */ | ||
227 | if (!wake_up_process(cpu->tsk)) | 267 | if (!wake_up_process(cpu->tsk)) |
228 | kick_process(cpu->tsk); | 268 | kick_process(cpu->tsk); |
229 | } | 269 | } |
230 | /*:*/ | 270 | /*:*/ |
231 | 271 | ||
232 | /* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent | 272 | /* |
273 | * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent | ||
233 | * me a patch, so we support that too. It'd be a big step for lguest if half | 274 | * me a patch, so we support that too. It'd be a big step for lguest if half |
234 | * the Plan 9 user base were to start using it. | 275 | * the Plan 9 user base were to start using it. |
235 | * | 276 | * |
236 | * Actually now I think of it, it's possible that Ron *is* half the Plan 9 | 277 | * Actually now I think of it, it's possible that Ron *is* half the Plan 9 |
237 | * userbase. Oh well. */ | 278 | * userbase. Oh well. |
279 | */ | ||
238 | static bool could_be_syscall(unsigned int num) | 280 | static bool could_be_syscall(unsigned int num) |
239 | { | 281 | { |
240 | /* Normal Linux SYSCALL_VECTOR or reserved vector? */ | 282 | /* Normal Linux SYSCALL_VECTOR or reserved vector? */ |
@@ -274,9 +316,11 @@ void free_interrupts(void) | |||
274 | clear_bit(syscall_vector, used_vectors); | 316 | clear_bit(syscall_vector, used_vectors); |
275 | } | 317 | } |
276 | 318 | ||
277 | /*H:220 Now we've got the routines to deliver interrupts, delivering traps like | 319 | /*H:220 |
320 | * Now we've got the routines to deliver interrupts, delivering traps like | ||
278 | * page fault is easy. The only trick is that Intel decided that some traps | 321 | * page fault is easy. The only trick is that Intel decided that some traps |
279 | * should have error codes: */ | 322 | * should have error codes: |
323 | */ | ||
280 | static bool has_err(unsigned int trap) | 324 | static bool has_err(unsigned int trap) |
281 | { | 325 | { |
282 | return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); | 326 | return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); |
@@ -285,13 +329,17 @@ static bool has_err(unsigned int trap) | |||
285 | /* deliver_trap() returns true if it could deliver the trap. */ | 329 | /* deliver_trap() returns true if it could deliver the trap. */ |
286 | bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | 330 | bool deliver_trap(struct lg_cpu *cpu, unsigned int num) |
287 | { | 331 | { |
288 | /* Trap numbers are always 8 bit, but we set an impossible trap number | 332 | /* |
289 | * for traps inside the Switcher, so check that here. */ | 333 | * Trap numbers are always 8 bit, but we set an impossible trap number |
334 | * for traps inside the Switcher, so check that here. | ||
335 | */ | ||
290 | if (num >= ARRAY_SIZE(cpu->arch.idt)) | 336 | if (num >= ARRAY_SIZE(cpu->arch.idt)) |
291 | return false; | 337 | return false; |
292 | 338 | ||
293 | /* Early on the Guest hasn't set the IDT entries (or maybe it put a | 339 | /* |
294 | * bogus one in): if we fail here, the Guest will be killed. */ | 340 | * Early on the Guest hasn't set the IDT entries (or maybe it put a |
341 | * bogus one in): if we fail here, the Guest will be killed. | ||
342 | */ | ||
295 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) | 343 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) |
296 | return false; | 344 | return false; |
297 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, | 345 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, |
@@ -299,7 +347,8 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | |||
299 | return true; | 347 | return true; |
300 | } | 348 | } |
301 | 349 | ||
302 | /*H:250 Here's the hard part: returning to the Host every time a trap happens | 350 | /*H:250 |
351 | * Here's the hard part: returning to the Host every time a trap happens | ||
303 | * and then calling deliver_trap() and re-entering the Guest is slow. | 352 | * and then calling deliver_trap() and re-entering the Guest is slow. |
304 | * Particularly because Guest userspace system calls are traps (usually trap | 353 | * Particularly because Guest userspace system calls are traps (usually trap |
305 | * 128). | 354 | * 128). |
@@ -311,69 +360,87 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num) | |||
311 | * the other hypervisors would beat it up at lunchtime. | 360 | * the other hypervisors would beat it up at lunchtime. |
312 | * | 361 | * |
313 | * This routine indicates if a particular trap number could be delivered | 362 | * This routine indicates if a particular trap number could be delivered |
314 | * directly. */ | 363 | * directly. |
364 | */ | ||
315 | static bool direct_trap(unsigned int num) | 365 | static bool direct_trap(unsigned int num) |
316 | { | 366 | { |
317 | /* Hardware interrupts don't go to the Guest at all (except system | 367 | /* |
318 | * call). */ | 368 | * Hardware interrupts don't go to the Guest at all (except system |
369 | * call). | ||
370 | */ | ||
319 | if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) | 371 | if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) |
320 | return false; | 372 | return false; |
321 | 373 | ||
322 | /* The Host needs to see page faults (for shadow paging and to save the | 374 | /* |
375 | * The Host needs to see page faults (for shadow paging and to save the | ||
323 | * fault address), general protection faults (in/out emulation) and | 376 | * fault address), general protection faults (in/out emulation) and |
324 | * device not available (TS handling), invalid opcode fault (kvm hcall), | 377 | * device not available (TS handling), invalid opcode fault (kvm hcall), |
325 | * and of course, the hypercall trap. */ | 378 | * and of course, the hypercall trap. |
379 | */ | ||
326 | return num != 14 && num != 13 && num != 7 && | 380 | return num != 14 && num != 13 && num != 7 && |
327 | num != 6 && num != LGUEST_TRAP_ENTRY; | 381 | num != 6 && num != LGUEST_TRAP_ENTRY; |
328 | } | 382 | } |
329 | /*:*/ | 383 | /*:*/ |
330 | 384 | ||
331 | /*M:005 The Guest has the ability to turn its interrupt gates into trap gates, | 385 | /*M:005 |
386 | * The Guest has the ability to turn its interrupt gates into trap gates, | ||
332 | * if it is careful. The Host will let trap gates can go directly to the | 387 | * if it is careful. The Host will let trap gates can go directly to the |
333 | * Guest, but the Guest needs the interrupts atomically disabled for an | 388 | * Guest, but the Guest needs the interrupts atomically disabled for an |
334 | * interrupt gate. It can do this by pointing the trap gate at instructions | 389 | * interrupt gate. It can do this by pointing the trap gate at instructions |
335 | * within noirq_start and noirq_end, where it can safely disable interrupts. */ | 390 | * within noirq_start and noirq_end, where it can safely disable interrupts. |
391 | */ | ||
336 | 392 | ||
337 | /*M:006 The Guests do not use the sysenter (fast system call) instruction, | 393 | /*M:006 |
394 | * The Guests do not use the sysenter (fast system call) instruction, | ||
338 | * because it's hardcoded to enter privilege level 0 and so can't go direct. | 395 | * because it's hardcoded to enter privilege level 0 and so can't go direct. |
339 | * It's about twice as fast as the older "int 0x80" system call, so it might | 396 | * It's about twice as fast as the older "int 0x80" system call, so it might |
340 | * still be worthwhile to handle it in the Switcher and lcall down to the | 397 | * still be worthwhile to handle it in the Switcher and lcall down to the |
341 | * Guest. The sysenter semantics are hairy tho: search for that keyword in | 398 | * Guest. The sysenter semantics are hairy tho: search for that keyword in |
342 | * entry.S :*/ | 399 | * entry.S |
400 | :*/ | ||
343 | 401 | ||
344 | /*H:260 When we make traps go directly into the Guest, we need to make sure | 402 | /*H:260 |
403 | * When we make traps go directly into the Guest, we need to make sure | ||
345 | * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the | 404 | * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the |
346 | * CPU trying to deliver the trap will fault while trying to push the interrupt | 405 | * CPU trying to deliver the trap will fault while trying to push the interrupt |
347 | * words on the stack: this is called a double fault, and it forces us to kill | 406 | * words on the stack: this is called a double fault, and it forces us to kill |
348 | * the Guest. | 407 | * the Guest. |
349 | * | 408 | * |
350 | * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ | 409 | * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. |
410 | */ | ||
351 | void pin_stack_pages(struct lg_cpu *cpu) | 411 | void pin_stack_pages(struct lg_cpu *cpu) |
352 | { | 412 | { |
353 | unsigned int i; | 413 | unsigned int i; |
354 | 414 | ||
355 | /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or | 415 | /* |
356 | * two pages of stack space. */ | 416 | * Depending on the CONFIG_4KSTACKS option, the Guest can have one or |
417 | * two pages of stack space. | ||
418 | */ | ||
357 | for (i = 0; i < cpu->lg->stack_pages; i++) | 419 | for (i = 0; i < cpu->lg->stack_pages; i++) |
358 | /* The stack grows *upwards*, so the address we're given is the | 420 | /* |
421 | * The stack grows *upwards*, so the address we're given is the | ||
359 | * start of the page after the kernel stack. Subtract one to | 422 | * start of the page after the kernel stack. Subtract one to |
360 | * get back onto the first stack page, and keep subtracting to | 423 | * get back onto the first stack page, and keep subtracting to |
361 | * get to the rest of the stack pages. */ | 424 | * get to the rest of the stack pages. |
425 | */ | ||
362 | pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); | 426 | pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); |
363 | } | 427 | } |
364 | 428 | ||
365 | /* Direct traps also mean that we need to know whenever the Guest wants to use | 429 | /* |
430 | * Direct traps also mean that we need to know whenever the Guest wants to use | ||
366 | * a different kernel stack, so we can change the IDT entries to use that | 431 | * a different kernel stack, so we can change the IDT entries to use that |
367 | * stack. The IDT entries expect a virtual address, so unlike most addresses | 432 | * stack. The IDT entries expect a virtual address, so unlike most addresses |
368 | * the Guest gives us, the "esp" (stack pointer) value here is virtual, not | 433 | * the Guest gives us, the "esp" (stack pointer) value here is virtual, not |
369 | * physical. | 434 | * physical. |
370 | * | 435 | * |
371 | * In Linux each process has its own kernel stack, so this happens a lot: we | 436 | * In Linux each process has its own kernel stack, so this happens a lot: we |
372 | * change stacks on each context switch. */ | 437 | * change stacks on each context switch. |
438 | */ | ||
373 | void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) | 439 | void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) |
374 | { | 440 | { |
375 | /* You are not allowed have a stack segment with privilege level 0: bad | 441 | /* |
376 | * Guest! */ | 442 | * You're not allowed a stack segment with privilege level 0: bad Guest! |
443 | */ | ||
377 | if ((seg & 0x3) != GUEST_PL) | 444 | if ((seg & 0x3) != GUEST_PL) |
378 | kill_guest(cpu, "bad stack segment %i", seg); | 445 | kill_guest(cpu, "bad stack segment %i", seg); |
379 | /* We only expect one or two stack pages. */ | 446 | /* We only expect one or two stack pages. */ |
@@ -387,11 +454,15 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) | |||
387 | pin_stack_pages(cpu); | 454 | pin_stack_pages(cpu); |
388 | } | 455 | } |
389 | 456 | ||
390 | /* All this reference to mapping stacks leads us neatly into the other complex | 457 | /* |
391 | * part of the Host: page table handling. */ | 458 | * All this reference to mapping stacks leads us neatly into the other complex |
459 | * part of the Host: page table handling. | ||
460 | */ | ||
392 | 461 | ||
393 | /*H:235 This is the routine which actually checks the Guest's IDT entry and | 462 | /*H:235 |
394 | * transfers it into the entry in "struct lguest": */ | 463 | * This is the routine which actually checks the Guest's IDT entry and |
464 | * transfers it into the entry in "struct lguest": | ||
465 | */ | ||
395 | static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, | 466 | static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, |
396 | unsigned int num, u32 lo, u32 hi) | 467 | unsigned int num, u32 lo, u32 hi) |
397 | { | 468 | { |
@@ -407,30 +478,38 @@ static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, | |||
407 | if (type != 0xE && type != 0xF) | 478 | if (type != 0xE && type != 0xF) |
408 | kill_guest(cpu, "bad IDT type %i", type); | 479 | kill_guest(cpu, "bad IDT type %i", type); |
409 | 480 | ||
410 | /* We only copy the handler address, present bit, privilege level and | 481 | /* |
482 | * We only copy the handler address, present bit, privilege level and | ||
411 | * type. The privilege level controls where the trap can be triggered | 483 | * type. The privilege level controls where the trap can be triggered |
412 | * manually with an "int" instruction. This is usually GUEST_PL, | 484 | * manually with an "int" instruction. This is usually GUEST_PL, |
413 | * except for system calls which userspace can use. */ | 485 | * except for system calls which userspace can use. |
486 | */ | ||
414 | trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); | 487 | trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); |
415 | trap->b = (hi&0xFFFFEF00); | 488 | trap->b = (hi&0xFFFFEF00); |
416 | } | 489 | } |
417 | 490 | ||
418 | /*H:230 While we're here, dealing with delivering traps and interrupts to the | 491 | /*H:230 |
492 | * While we're here, dealing with delivering traps and interrupts to the | ||
419 | * Guest, we might as well complete the picture: how the Guest tells us where | 493 | * Guest, we might as well complete the picture: how the Guest tells us where |
420 | * it wants them to go. This would be simple, except making traps fast | 494 | * it wants them to go. This would be simple, except making traps fast |
421 | * requires some tricks. | 495 | * requires some tricks. |
422 | * | 496 | * |
423 | * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the | 497 | * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the |
424 | * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ | 498 | * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. |
499 | */ | ||
425 | void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) | 500 | void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) |
426 | { | 501 | { |
427 | /* Guest never handles: NMI, doublefault, spurious interrupt or | 502 | /* |
428 | * hypercall. We ignore when it tries to set them. */ | 503 | * Guest never handles: NMI, doublefault, spurious interrupt or |
504 | * hypercall. We ignore when it tries to set them. | ||
505 | */ | ||
429 | if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) | 506 | if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) |
430 | return; | 507 | return; |
431 | 508 | ||
432 | /* Mark the IDT as changed: next time the Guest runs we'll know we have | 509 | /* |
433 | * to copy this again. */ | 510 | * Mark the IDT as changed: next time the Guest runs we'll know we have |
511 | * to copy this again. | ||
512 | */ | ||
434 | cpu->changed |= CHANGED_IDT; | 513 | cpu->changed |= CHANGED_IDT; |
435 | 514 | ||
436 | /* Check that the Guest doesn't try to step outside the bounds. */ | 515 | /* Check that the Guest doesn't try to step outside the bounds. */ |
@@ -440,9 +519,11 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) | |||
440 | set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); | 519 | set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); |
441 | } | 520 | } |
442 | 521 | ||
443 | /* The default entry for each interrupt points into the Switcher routines which | 522 | /* |
523 | * The default entry for each interrupt points into the Switcher routines which | ||
444 | * simply return to the Host. The run_guest() loop will then call | 524 | * simply return to the Host. The run_guest() loop will then call |
445 | * deliver_trap() to bounce it back into the Guest. */ | 525 | * deliver_trap() to bounce it back into the Guest. |
526 | */ | ||
446 | static void default_idt_entry(struct desc_struct *idt, | 527 | static void default_idt_entry(struct desc_struct *idt, |
447 | int trap, | 528 | int trap, |
448 | const unsigned long handler, | 529 | const unsigned long handler, |
@@ -451,13 +532,17 @@ static void default_idt_entry(struct desc_struct *idt, | |||
451 | /* A present interrupt gate. */ | 532 | /* A present interrupt gate. */ |
452 | u32 flags = 0x8e00; | 533 | u32 flags = 0x8e00; |
453 | 534 | ||
454 | /* Set the privilege level on the entry for the hypercall: this allows | 535 | /* |
455 | * the Guest to use the "int" instruction to trigger it. */ | 536 | * Set the privilege level on the entry for the hypercall: this allows |
537 | * the Guest to use the "int" instruction to trigger it. | ||
538 | */ | ||
456 | if (trap == LGUEST_TRAP_ENTRY) | 539 | if (trap == LGUEST_TRAP_ENTRY) |
457 | flags |= (GUEST_PL << 13); | 540 | flags |= (GUEST_PL << 13); |
458 | else if (base) | 541 | else if (base) |
459 | /* Copy priv. level from what Guest asked for. This allows | 542 | /* |
460 | * debug (int 3) traps from Guest userspace, for example. */ | 543 | * Copy privilege level from what Guest asked for. This allows |
544 | * debug (int 3) traps from Guest userspace, for example. | ||
545 | */ | ||
461 | flags |= (base->b & 0x6000); | 546 | flags |= (base->b & 0x6000); |
462 | 547 | ||
463 | /* Now pack it into the IDT entry in its weird format. */ | 548 | /* Now pack it into the IDT entry in its weird format. */ |
@@ -475,16 +560,20 @@ void setup_default_idt_entries(struct lguest_ro_state *state, | |||
475 | default_idt_entry(&state->guest_idt[i], i, def[i], NULL); | 560 | default_idt_entry(&state->guest_idt[i], i, def[i], NULL); |
476 | } | 561 | } |
477 | 562 | ||
478 | /*H:240 We don't use the IDT entries in the "struct lguest" directly, instead | 563 | /*H:240 |
564 | * We don't use the IDT entries in the "struct lguest" directly, instead | ||
479 | * we copy them into the IDT which we've set up for Guests on this CPU, just | 565 | * we copy them into the IDT which we've set up for Guests on this CPU, just |
480 | * before we run the Guest. This routine does that copy. */ | 566 | * before we run the Guest. This routine does that copy. |
567 | */ | ||
481 | void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | 568 | void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, |
482 | const unsigned long *def) | 569 | const unsigned long *def) |
483 | { | 570 | { |
484 | unsigned int i; | 571 | unsigned int i; |
485 | 572 | ||
486 | /* We can simply copy the direct traps, otherwise we use the default | 573 | /* |
487 | * ones in the Switcher: they will return to the Host. */ | 574 | * We can simply copy the direct traps, otherwise we use the default |
575 | * ones in the Switcher: they will return to the Host. | ||
576 | */ | ||
488 | for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { | 577 | for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { |
489 | const struct desc_struct *gidt = &cpu->arch.idt[i]; | 578 | const struct desc_struct *gidt = &cpu->arch.idt[i]; |
490 | 579 | ||
@@ -492,14 +581,16 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | |||
492 | if (!direct_trap(i)) | 581 | if (!direct_trap(i)) |
493 | continue; | 582 | continue; |
494 | 583 | ||
495 | /* Only trap gates (type 15) can go direct to the Guest. | 584 | /* |
585 | * Only trap gates (type 15) can go direct to the Guest. | ||
496 | * Interrupt gates (type 14) disable interrupts as they are | 586 | * Interrupt gates (type 14) disable interrupts as they are |
497 | * entered, which we never let the Guest do. Not present | 587 | * entered, which we never let the Guest do. Not present |
498 | * entries (type 0x0) also can't go direct, of course. | 588 | * entries (type 0x0) also can't go direct, of course. |
499 | * | 589 | * |
500 | * If it can't go direct, we still need to copy the priv. level: | 590 | * If it can't go direct, we still need to copy the priv. level: |
501 | * they might want to give userspace access to a software | 591 | * they might want to give userspace access to a software |
502 | * interrupt. */ | 592 | * interrupt. |
593 | */ | ||
503 | if (idt_type(gidt->a, gidt->b) == 0xF) | 594 | if (idt_type(gidt->a, gidt->b) == 0xF) |
504 | idt[i] = *gidt; | 595 | idt[i] = *gidt; |
505 | else | 596 | else |
@@ -518,7 +609,8 @@ void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, | |||
518 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer | 609 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer |
519 | * infrastructure to set a callback at that time. | 610 | * infrastructure to set a callback at that time. |
520 | * | 611 | * |
521 | * 0 means "turn off the clock". */ | 612 | * 0 means "turn off the clock". |
613 | */ | ||
522 | void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) | 614 | void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) |
523 | { | 615 | { |
524 | ktime_t expires; | 616 | ktime_t expires; |
@@ -529,9 +621,11 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) | |||
529 | return; | 621 | return; |
530 | } | 622 | } |
531 | 623 | ||
532 | /* We use wallclock time here, so the Guest might not be running for | 624 | /* |
625 | * We use wallclock time here, so the Guest might not be running for | ||
533 | * all the time between now and the timer interrupt it asked for. This | 626 | * all the time between now and the timer interrupt it asked for. This |
534 | * is almost always the right thing to do. */ | 627 | * is almost always the right thing to do. |
628 | */ | ||
535 | expires = ktime_add_ns(ktime_get_real(), delta); | 629 | expires = ktime_add_ns(ktime_get_real(), delta); |
536 | hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); | 630 | hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); |
537 | } | 631 | } |
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 01c591923793..bc28745d05af 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h | |||
@@ -16,15 +16,13 @@ | |||
16 | void free_pagetables(void); | 16 | void free_pagetables(void); |
17 | int init_pagetables(struct page **switcher_page, unsigned int pages); | 17 | int init_pagetables(struct page **switcher_page, unsigned int pages); |
18 | 18 | ||
19 | struct pgdir | 19 | struct pgdir { |
20 | { | ||
21 | unsigned long gpgdir; | 20 | unsigned long gpgdir; |
22 | pgd_t *pgdir; | 21 | pgd_t *pgdir; |
23 | }; | 22 | }; |
24 | 23 | ||
25 | /* We have two pages shared with guests, per cpu. */ | 24 | /* We have two pages shared with guests, per cpu. */ |
26 | struct lguest_pages | 25 | struct lguest_pages { |
27 | { | ||
28 | /* This is the stack page mapped rw in guest */ | 26 | /* This is the stack page mapped rw in guest */ |
29 | char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; | 27 | char spare[PAGE_SIZE - sizeof(struct lguest_regs)]; |
30 | struct lguest_regs regs; | 28 | struct lguest_regs regs; |
@@ -54,13 +52,13 @@ struct lg_cpu { | |||
54 | 52 | ||
55 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ | 53 | unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ |
56 | 54 | ||
57 | /* At end of a page shared mapped over lguest_pages in guest. */ | 55 | /* At end of a page shared mapped over lguest_pages in guest. */ |
58 | unsigned long regs_page; | 56 | unsigned long regs_page; |
59 | struct lguest_regs *regs; | 57 | struct lguest_regs *regs; |
60 | 58 | ||
61 | struct lguest_pages *last_pages; | 59 | struct lguest_pages *last_pages; |
62 | 60 | ||
63 | int cpu_pgd; /* which pgd this cpu is currently using */ | 61 | int cpu_pgd; /* Which pgd this cpu is currently using */ |
64 | 62 | ||
65 | /* If a hypercall was asked for, this points to the arguments. */ | 63 | /* If a hypercall was asked for, this points to the arguments. */ |
66 | struct hcall_args *hcall; | 64 | struct hcall_args *hcall; |
@@ -89,15 +87,17 @@ struct lg_eventfd_map { | |||
89 | }; | 87 | }; |
90 | 88 | ||
91 | /* The private info the thread maintains about the guest. */ | 89 | /* The private info the thread maintains about the guest. */ |
92 | struct lguest | 90 | struct lguest { |
93 | { | ||
94 | struct lguest_data __user *lguest_data; | 91 | struct lguest_data __user *lguest_data; |
95 | struct lg_cpu cpus[NR_CPUS]; | 92 | struct lg_cpu cpus[NR_CPUS]; |
96 | unsigned int nr_cpus; | 93 | unsigned int nr_cpus; |
97 | 94 | ||
98 | u32 pfn_limit; | 95 | u32 pfn_limit; |
99 | /* This provides the offset to the base of guest-physical | 96 | |
100 | * memory in the Launcher. */ | 97 | /* |
98 | * This provides the offset to the base of guest-physical memory in the | ||
99 | * Launcher. | ||
100 | */ | ||
101 | void __user *mem_base; | 101 | void __user *mem_base; |
102 | unsigned long kernel_address; | 102 | unsigned long kernel_address; |
103 | 103 | ||
@@ -122,11 +122,13 @@ bool lguest_address_ok(const struct lguest *lg, | |||
122 | void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); | 122 | void __lgread(struct lg_cpu *, void *, unsigned long, unsigned); |
123 | void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); | 123 | void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); |
124 | 124 | ||
125 | /*H:035 Using memory-copy operations like that is usually inconvient, so we | 125 | /*H:035 |
126 | * Using memory-copy operations like that is usually inconvient, so we | ||
126 | * have the following helper macros which read and write a specific type (often | 127 | * have the following helper macros which read and write a specific type (often |
127 | * an unsigned long). | 128 | * an unsigned long). |
128 | * | 129 | * |
129 | * This reads into a variable of the given type then returns that. */ | 130 | * This reads into a variable of the given type then returns that. |
131 | */ | ||
130 | #define lgread(cpu, addr, type) \ | 132 | #define lgread(cpu, addr, type) \ |
131 | ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) | 133 | ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; }) |
132 | 134 | ||
@@ -140,9 +142,11 @@ void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned); | |||
140 | 142 | ||
141 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user); | 143 | int run_guest(struct lg_cpu *cpu, unsigned long __user *user); |
142 | 144 | ||
143 | /* Helper macros to obtain the first 12 or the last 20 bits, this is only the | 145 | /* |
146 | * Helper macros to obtain the first 12 or the last 20 bits, this is only the | ||
144 | * first step in the migration to the kernel types. pte_pfn is already defined | 147 | * first step in the migration to the kernel types. pte_pfn is already defined |
145 | * in the kernel. */ | 148 | * in the kernel. |
149 | */ | ||
146 | #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) | 150 | #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) |
147 | #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) | 151 | #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) |
148 | #define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) | 152 | #define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) |
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index e082cdac88b4..b6200bc39b58 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -1,10 +1,12 @@ | |||
1 | /*P:050 Lguest guests use a very simple method to describe devices. It's a | 1 | /*P:050 |
2 | * Lguest guests use a very simple method to describe devices. It's a | ||
2 | * series of device descriptors contained just above the top of normal Guest | 3 | * series of device descriptors contained just above the top of normal Guest |
3 | * memory. | 4 | * memory. |
4 | * | 5 | * |
5 | * We use the standard "virtio" device infrastructure, which provides us with a | 6 | * We use the standard "virtio" device infrastructure, which provides us with a |
6 | * console, a network and a block driver. Each one expects some configuration | 7 | * console, a network and a block driver. Each one expects some configuration |
7 | * information and a "virtqueue" or two to send and receive data. :*/ | 8 | * information and a "virtqueue" or two to send and receive data. |
9 | :*/ | ||
8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
9 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
10 | #include <linux/lguest_launcher.h> | 12 | #include <linux/lguest_launcher.h> |
@@ -20,8 +22,10 @@ | |||
20 | /* The pointer to our (page) of device descriptions. */ | 22 | /* The pointer to our (page) of device descriptions. */ |
21 | static void *lguest_devices; | 23 | static void *lguest_devices; |
22 | 24 | ||
23 | /* For Guests, device memory can be used as normal memory, so we cast away the | 25 | /* |
24 | * __iomem to quieten sparse. */ | 26 | * For Guests, device memory can be used as normal memory, so we cast away the |
27 | * __iomem to quieten sparse. | ||
28 | */ | ||
25 | static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) | 29 | static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) |
26 | { | 30 | { |
27 | return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); | 31 | return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); |
@@ -32,8 +36,10 @@ static inline void lguest_unmap(void *addr) | |||
32 | iounmap((__force void __iomem *)addr); | 36 | iounmap((__force void __iomem *)addr); |
33 | } | 37 | } |
34 | 38 | ||
35 | /*D:100 Each lguest device is just a virtio device plus a pointer to its entry | 39 | /*D:100 |
36 | * in the lguest_devices page. */ | 40 | * Each lguest device is just a virtio device plus a pointer to its entry |
41 | * in the lguest_devices page. | ||
42 | */ | ||
37 | struct lguest_device { | 43 | struct lguest_device { |
38 | struct virtio_device vdev; | 44 | struct virtio_device vdev; |
39 | 45 | ||
@@ -41,9 +47,11 @@ struct lguest_device { | |||
41 | struct lguest_device_desc *desc; | 47 | struct lguest_device_desc *desc; |
42 | }; | 48 | }; |
43 | 49 | ||
44 | /* Since the virtio infrastructure hands us a pointer to the virtio_device all | 50 | /* |
51 | * Since the virtio infrastructure hands us a pointer to the virtio_device all | ||
45 | * the time, it helps to have a curt macro to get a pointer to the struct | 52 | * the time, it helps to have a curt macro to get a pointer to the struct |
46 | * lguest_device it's enclosed in. */ | 53 | * lguest_device it's enclosed in. |
54 | */ | ||
47 | #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) | 55 | #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) |
48 | 56 | ||
49 | /*D:130 | 57 | /*D:130 |
@@ -55,7 +63,8 @@ struct lguest_device { | |||
55 | * the driver will look at them during setup. | 63 | * the driver will look at them during setup. |
56 | * | 64 | * |
57 | * A convenient routine to return the device's virtqueue config array: | 65 | * A convenient routine to return the device's virtqueue config array: |
58 | * immediately after the descriptor. */ | 66 | * immediately after the descriptor. |
67 | */ | ||
59 | static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) | 68 | static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) |
60 | { | 69 | { |
61 | return (void *)(desc + 1); | 70 | return (void *)(desc + 1); |
@@ -98,10 +107,12 @@ static u32 lg_get_features(struct virtio_device *vdev) | |||
98 | return features; | 107 | return features; |
99 | } | 108 | } |
100 | 109 | ||
101 | /* The virtio core takes the features the Host offers, and copies the | 110 | /* |
102 | * ones supported by the driver into the vdev->features array. Once | 111 | * The virtio core takes the features the Host offers, and copies the ones |
103 | * that's all sorted out, this routine is called so we can tell the | 112 | * supported by the driver into the vdev->features array. Once that's all |
104 | * Host which features we understand and accept. */ | 113 | * sorted out, this routine is called so we can tell the Host which features we |
114 | * understand and accept. | ||
115 | */ | ||
105 | static void lg_finalize_features(struct virtio_device *vdev) | 116 | static void lg_finalize_features(struct virtio_device *vdev) |
106 | { | 117 | { |
107 | unsigned int i, bits; | 118 | unsigned int i, bits; |
@@ -112,10 +123,11 @@ static void lg_finalize_features(struct virtio_device *vdev) | |||
112 | /* Give virtio_ring a chance to accept features. */ | 123 | /* Give virtio_ring a chance to accept features. */ |
113 | vring_transport_features(vdev); | 124 | vring_transport_features(vdev); |
114 | 125 | ||
115 | /* The vdev->feature array is a Linux bitmask: this isn't the | 126 | /* |
116 | * same as a the simple array of bits used by lguest devices | 127 | * The vdev->feature array is a Linux bitmask: this isn't the same as a |
117 | * for features. So we do this slow, manual conversion which is | 128 | * the simple array of bits used by lguest devices for features. So we |
118 | * completely general. */ | 129 | * do this slow, manual conversion which is completely general. |
130 | */ | ||
119 | memset(out_features, 0, desc->feature_len); | 131 | memset(out_features, 0, desc->feature_len); |
120 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; | 132 | bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; |
121 | for (i = 0; i < bits; i++) { | 133 | for (i = 0; i < bits; i++) { |
@@ -146,15 +158,19 @@ static void lg_set(struct virtio_device *vdev, unsigned int offset, | |||
146 | memcpy(lg_config(desc) + offset, buf, len); | 158 | memcpy(lg_config(desc) + offset, buf, len); |
147 | } | 159 | } |
148 | 160 | ||
149 | /* The operations to get and set the status word just access the status field | 161 | /* |
150 | * of the device descriptor. */ | 162 | * The operations to get and set the status word just access the status field |
163 | * of the device descriptor. | ||
164 | */ | ||
151 | static u8 lg_get_status(struct virtio_device *vdev) | 165 | static u8 lg_get_status(struct virtio_device *vdev) |
152 | { | 166 | { |
153 | return to_lgdev(vdev)->desc->status; | 167 | return to_lgdev(vdev)->desc->status; |
154 | } | 168 | } |
155 | 169 | ||
156 | /* To notify on status updates, we (ab)use the NOTIFY hypercall, with the | 170 | /* |
157 | * descriptor address of the device. A zero status means "reset". */ | 171 | * To notify on status updates, we (ab)use the NOTIFY hypercall, with the |
172 | * descriptor address of the device. A zero status means "reset". | ||
173 | */ | ||
158 | static void set_status(struct virtio_device *vdev, u8 status) | 174 | static void set_status(struct virtio_device *vdev, u8 status) |
159 | { | 175 | { |
160 | unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; | 176 | unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; |
@@ -191,8 +207,7 @@ static void lg_reset(struct virtio_device *vdev) | |||
191 | */ | 207 | */ |
192 | 208 | ||
193 | /*D:140 This is the information we remember about each virtqueue. */ | 209 | /*D:140 This is the information we remember about each virtqueue. */ |
194 | struct lguest_vq_info | 210 | struct lguest_vq_info { |
195 | { | ||
196 | /* A copy of the information contained in the device config. */ | 211 | /* A copy of the information contained in the device config. */ |
197 | struct lguest_vqconfig config; | 212 | struct lguest_vqconfig config; |
198 | 213 | ||
@@ -200,13 +215,17 @@ struct lguest_vq_info | |||
200 | void *pages; | 215 | void *pages; |
201 | }; | 216 | }; |
202 | 217 | ||
203 | /* When the virtio_ring code wants to prod the Host, it calls us here and we | 218 | /* |
219 | * When the virtio_ring code wants to prod the Host, it calls us here and we | ||
204 | * make a hypercall. We hand the physical address of the virtqueue so the Host | 220 | * make a hypercall. We hand the physical address of the virtqueue so the Host |
205 | * knows which virtqueue we're talking about. */ | 221 | * knows which virtqueue we're talking about. |
222 | */ | ||
206 | static void lg_notify(struct virtqueue *vq) | 223 | static void lg_notify(struct virtqueue *vq) |
207 | { | 224 | { |
208 | /* We store our virtqueue information in the "priv" pointer of the | 225 | /* |
209 | * virtqueue structure. */ | 226 | * We store our virtqueue information in the "priv" pointer of the |
227 | * virtqueue structure. | ||
228 | */ | ||
210 | struct lguest_vq_info *lvq = vq->priv; | 229 | struct lguest_vq_info *lvq = vq->priv; |
211 | 230 | ||
212 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); | 231 | kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); |
@@ -215,7 +234,8 @@ static void lg_notify(struct virtqueue *vq) | |||
215 | /* An extern declaration inside a C file is bad form. Don't do it. */ | 234 | /* An extern declaration inside a C file is bad form. Don't do it. */ |
216 | extern void lguest_setup_irq(unsigned int irq); | 235 | extern void lguest_setup_irq(unsigned int irq); |
217 | 236 | ||
218 | /* This routine finds the first virtqueue described in the configuration of | 237 | /* |
238 | * This routine finds the Nth virtqueue described in the configuration of | ||
219 | * this device and sets it up. | 239 | * this device and sets it up. |
220 | * | 240 | * |
221 | * This is kind of an ugly duckling. It'd be nicer to have a standard | 241 | * This is kind of an ugly duckling. It'd be nicer to have a standard |
@@ -223,9 +243,7 @@ extern void lguest_setup_irq(unsigned int irq); | |||
223 | * everyone wants to do it differently. The KVM coders want the Guest to | 243 | * everyone wants to do it differently. The KVM coders want the Guest to |
224 | * allocate its own pages and tell the Host where they are, but for lguest it's | 244 | * allocate its own pages and tell the Host where they are, but for lguest it's |
225 | * simpler for the Host to simply tell us where the pages are. | 245 | * simpler for the Host to simply tell us where the pages are. |
226 | * | 246 | */ |
227 | * So we provide drivers with a "find the Nth virtqueue and set it up" | ||
228 | * function. */ | ||
229 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | 247 | static struct virtqueue *lg_find_vq(struct virtio_device *vdev, |
230 | unsigned index, | 248 | unsigned index, |
231 | void (*callback)(struct virtqueue *vq), | 249 | void (*callback)(struct virtqueue *vq), |
@@ -244,9 +262,11 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
244 | if (!lvq) | 262 | if (!lvq) |
245 | return ERR_PTR(-ENOMEM); | 263 | return ERR_PTR(-ENOMEM); |
246 | 264 | ||
247 | /* Make a copy of the "struct lguest_vqconfig" entry, which sits after | 265 | /* |
266 | * Make a copy of the "struct lguest_vqconfig" entry, which sits after | ||
248 | * the descriptor. We need a copy because the config space might not | 267 | * the descriptor. We need a copy because the config space might not |
249 | * be aligned correctly. */ | 268 | * be aligned correctly. |
269 | */ | ||
250 | memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); | 270 | memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); |
251 | 271 | ||
252 | printk("Mapping virtqueue %i addr %lx\n", index, | 272 | printk("Mapping virtqueue %i addr %lx\n", index, |
@@ -261,8 +281,10 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
261 | goto free_lvq; | 281 | goto free_lvq; |
262 | } | 282 | } |
263 | 283 | ||
264 | /* OK, tell virtio_ring.c to set up a virtqueue now we know its size | 284 | /* |
265 | * and we've got a pointer to its pages. */ | 285 | * OK, tell virtio_ring.c to set up a virtqueue now we know its size |
286 | * and we've got a pointer to its pages. | ||
287 | */ | ||
266 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, | 288 | vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, |
267 | vdev, lvq->pages, lg_notify, callback, name); | 289 | vdev, lvq->pages, lg_notify, callback, name); |
268 | if (!vq) { | 290 | if (!vq) { |
@@ -273,18 +295,23 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, | |||
273 | /* Make sure the interrupt is allocated. */ | 295 | /* Make sure the interrupt is allocated. */ |
274 | lguest_setup_irq(lvq->config.irq); | 296 | lguest_setup_irq(lvq->config.irq); |
275 | 297 | ||
276 | /* Tell the interrupt for this virtqueue to go to the virtio_ring | 298 | /* |
277 | * interrupt handler. */ | 299 | * Tell the interrupt for this virtqueue to go to the virtio_ring |
278 | /* FIXME: We used to have a flag for the Host to tell us we could use | 300 | * interrupt handler. |
301 | * | ||
302 | * FIXME: We used to have a flag for the Host to tell us we could use | ||
279 | * the interrupt as a source of randomness: it'd be nice to have that | 303 | * the interrupt as a source of randomness: it'd be nice to have that |
280 | * back.. */ | 304 | * back. |
305 | */ | ||
281 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, | 306 | err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, |
282 | dev_name(&vdev->dev), vq); | 307 | dev_name(&vdev->dev), vq); |
283 | if (err) | 308 | if (err) |
284 | goto destroy_vring; | 309 | goto destroy_vring; |
285 | 310 | ||
286 | /* Last of all we hook up our 'struct lguest_vq_info" to the | 311 | /* |
287 | * virtqueue's priv pointer. */ | 312 | * Last of all we hook up our 'struct lguest_vq_info" to the |
313 | * virtqueue's priv pointer. | ||
314 | */ | ||
288 | vq->priv = lvq; | 315 | vq->priv = lvq; |
289 | return vq; | 316 | return vq; |
290 | 317 | ||
@@ -358,11 +385,14 @@ static struct virtio_config_ops lguest_config_ops = { | |||
358 | .del_vqs = lg_del_vqs, | 385 | .del_vqs = lg_del_vqs, |
359 | }; | 386 | }; |
360 | 387 | ||
361 | /* The root device for the lguest virtio devices. This makes them appear as | 388 | /* |
362 | * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. */ | 389 | * The root device for the lguest virtio devices. This makes them appear as |
390 | * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. | ||
391 | */ | ||
363 | static struct device *lguest_root; | 392 | static struct device *lguest_root; |
364 | 393 | ||
365 | /*D:120 This is the core of the lguest bus: actually adding a new device. | 394 | /*D:120 |
395 | * This is the core of the lguest bus: actually adding a new device. | ||
366 | * It's a separate function because it's neater that way, and because an | 396 | * It's a separate function because it's neater that way, and because an |
367 | * earlier version of the code supported hotplug and unplug. They were removed | 397 | * earlier version of the code supported hotplug and unplug. They were removed |
368 | * early on because they were never used. | 398 | * early on because they were never used. |
@@ -371,14 +401,14 @@ static struct device *lguest_root; | |||
371 | * | 401 | * |
372 | * It's worth reading this carefully: we start with a pointer to the new device | 402 | * It's worth reading this carefully: we start with a pointer to the new device |
373 | * descriptor in the "lguest_devices" page, and the offset into the device | 403 | * descriptor in the "lguest_devices" page, and the offset into the device |
374 | * descriptor page so we can uniquely identify it if things go badly wrong. */ | 404 | * descriptor page so we can uniquely identify it if things go badly wrong. |
405 | */ | ||
375 | static void add_lguest_device(struct lguest_device_desc *d, | 406 | static void add_lguest_device(struct lguest_device_desc *d, |
376 | unsigned int offset) | 407 | unsigned int offset) |
377 | { | 408 | { |
378 | struct lguest_device *ldev; | 409 | struct lguest_device *ldev; |
379 | 410 | ||
380 | /* Start with zeroed memory; Linux's device layer seems to count on | 411 | /* Start with zeroed memory; Linux's device layer counts on it. */ |
381 | * it. */ | ||
382 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); | 412 | ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); |
383 | if (!ldev) { | 413 | if (!ldev) { |
384 | printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", | 414 | printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", |
@@ -388,17 +418,25 @@ static void add_lguest_device(struct lguest_device_desc *d, | |||
388 | 418 | ||
389 | /* This devices' parent is the lguest/ dir. */ | 419 | /* This devices' parent is the lguest/ dir. */ |
390 | ldev->vdev.dev.parent = lguest_root; | 420 | ldev->vdev.dev.parent = lguest_root; |
391 | /* We have a unique device index thanks to the dev_index counter. */ | 421 | /* |
422 | * The device type comes straight from the descriptor. There's also a | ||
423 | * device vendor field in the virtio_device struct, which we leave as | ||
424 | * 0. | ||
425 | */ | ||
392 | ldev->vdev.id.device = d->type; | 426 | ldev->vdev.id.device = d->type; |
393 | /* We have a simple set of routines for querying the device's | 427 | /* |
394 | * configuration information and setting its status. */ | 428 | * We have a simple set of routines for querying the device's |
429 | * configuration information and setting its status. | ||
430 | */ | ||
395 | ldev->vdev.config = &lguest_config_ops; | 431 | ldev->vdev.config = &lguest_config_ops; |
396 | /* And we remember the device's descriptor for lguest_config_ops. */ | 432 | /* And we remember the device's descriptor for lguest_config_ops. */ |
397 | ldev->desc = d; | 433 | ldev->desc = d; |
398 | 434 | ||
399 | /* register_virtio_device() sets up the generic fields for the struct | 435 | /* |
436 | * register_virtio_device() sets up the generic fields for the struct | ||
400 | * virtio_device and calls device_register(). This makes the bus | 437 | * virtio_device and calls device_register(). This makes the bus |
401 | * infrastructure look for a matching driver. */ | 438 | * infrastructure look for a matching driver. |
439 | */ | ||
402 | if (register_virtio_device(&ldev->vdev) != 0) { | 440 | if (register_virtio_device(&ldev->vdev) != 0) { |
403 | printk(KERN_ERR "Failed to register lguest dev %u type %u\n", | 441 | printk(KERN_ERR "Failed to register lguest dev %u type %u\n", |
404 | offset, d->type); | 442 | offset, d->type); |
@@ -406,8 +444,10 @@ static void add_lguest_device(struct lguest_device_desc *d, | |||
406 | } | 444 | } |
407 | } | 445 | } |
408 | 446 | ||
409 | /*D:110 scan_devices() simply iterates through the device page. The type 0 is | 447 | /*D:110 |
410 | * reserved to mean "end of devices". */ | 448 | * scan_devices() simply iterates through the device page. The type 0 is |
449 | * reserved to mean "end of devices". | ||
450 | */ | ||
411 | static void scan_devices(void) | 451 | static void scan_devices(void) |
412 | { | 452 | { |
413 | unsigned int i; | 453 | unsigned int i; |
@@ -426,7 +466,8 @@ static void scan_devices(void) | |||
426 | } | 466 | } |
427 | } | 467 | } |
428 | 468 | ||
429 | /*D:105 Fairly early in boot, lguest_devices_init() is called to set up the | 469 | /*D:105 |
470 | * Fairly early in boot, lguest_devices_init() is called to set up the | ||
430 | * lguest device infrastructure. We check that we are a Guest by checking | 471 | * lguest device infrastructure. We check that we are a Guest by checking |
431 | * pv_info.name: there are other ways of checking, but this seems most | 472 | * pv_info.name: there are other ways of checking, but this seems most |
432 | * obvious to me. | 473 | * obvious to me. |
@@ -437,7 +478,8 @@ static void scan_devices(void) | |||
437 | * correct sysfs incantation). | 478 | * correct sysfs incantation). |
438 | * | 479 | * |
439 | * Finally we call scan_devices() which adds all the devices found in the | 480 | * Finally we call scan_devices() which adds all the devices found in the |
440 | * lguest_devices page. */ | 481 | * lguest_devices page. |
482 | */ | ||
441 | static int __init lguest_devices_init(void) | 483 | static int __init lguest_devices_init(void) |
442 | { | 484 | { |
443 | if (strcmp(pv_info.name, "lguest") != 0) | 485 | if (strcmp(pv_info.name, "lguest") != 0) |
@@ -456,11 +498,13 @@ static int __init lguest_devices_init(void) | |||
456 | /* We do this after core stuff, but before the drivers. */ | 498 | /* We do this after core stuff, but before the drivers. */ |
457 | postcore_initcall(lguest_devices_init); | 499 | postcore_initcall(lguest_devices_init); |
458 | 500 | ||
459 | /*D:150 At this point in the journey we used to now wade through the lguest | 501 | /*D:150 |
502 | * At this point in the journey we used to now wade through the lguest | ||
460 | * devices themselves: net, block and console. Since they're all now virtio | 503 | * devices themselves: net, block and console. Since they're all now virtio |
461 | * devices rather than lguest-specific, I've decided to ignore them. Mostly, | 504 | * devices rather than lguest-specific, I've decided to ignore them. Mostly, |
462 | * they're kind of boring. But this does mean you'll never experience the | 505 | * they're kind of boring. But this does mean you'll never experience the |
463 | * thrill of reading the forbidden love scene buried deep in the block driver. | 506 | * thrill of reading the forbidden love scene buried deep in the block driver. |
464 | * | 507 | * |
465 | * "make Launcher" beckons, where we answer questions like "Where do Guests | 508 | * "make Launcher" beckons, where we answer questions like "Where do Guests |
466 | * come from?", and "What do you do when someone asks for optimization?". */ | 509 | * come from?", and "What do you do when someone asks for optimization?". |
510 | */ | ||
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 9f9a2953b383..b4d3f7ca554f 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher | 1 | /*P:200 This contains all the /dev/lguest code, whereby the userspace launcher |
2 | * controls and communicates with the Guest. For example, the first write will | 2 | * controls and communicates with the Guest. For example, the first write will |
3 | * tell us the Guest's memory layout, pagetable, entry point and kernel address | 3 | * tell us the Guest's memory layout and entry point. A read will run the |
4 | * offset. A read will run the Guest until something happens, such as a signal | 4 | * Guest until something happens, such as a signal or the Guest doing a NOTIFY |
5 | * or the Guest doing a NOTIFY out to the Launcher. :*/ | 5 | * out to the Launcher. |
6 | :*/ | ||
6 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
7 | #include <linux/miscdevice.h> | 8 | #include <linux/miscdevice.h> |
8 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
@@ -11,14 +12,41 @@ | |||
11 | #include <linux/file.h> | 12 | #include <linux/file.h> |
12 | #include "lg.h" | 13 | #include "lg.h" |
13 | 14 | ||
15 | /*L:056 | ||
16 | * Before we move on, let's jump ahead and look at what the kernel does when | ||
17 | * it needs to look up the eventfds. That will complete our picture of how we | ||
18 | * use RCU. | ||
19 | * | ||
20 | * The notification value is in cpu->pending_notify: we return true if it went | ||
21 | * to an eventfd. | ||
22 | */ | ||
14 | bool send_notify_to_eventfd(struct lg_cpu *cpu) | 23 | bool send_notify_to_eventfd(struct lg_cpu *cpu) |
15 | { | 24 | { |
16 | unsigned int i; | 25 | unsigned int i; |
17 | struct lg_eventfd_map *map; | 26 | struct lg_eventfd_map *map; |
18 | 27 | ||
19 | /* lg->eventfds is RCU-protected */ | 28 | /* |
29 | * This "rcu_read_lock()" helps track when someone is still looking at | ||
30 | * the (RCU-using) eventfds array. It's not actually a lock at all; | ||
31 | * indeed it's a noop in many configurations. (You didn't expect me to | ||
32 | * explain all the RCU secrets here, did you?) | ||
33 | */ | ||
20 | rcu_read_lock(); | 34 | rcu_read_lock(); |
35 | /* | ||
36 | * rcu_dereference is the counter-side of rcu_assign_pointer(); it | ||
37 | * makes sure we don't access the memory pointed to by | ||
38 | * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, | ||
39 | * but Alpha allows this! Paul McKenney points out that a really | ||
40 | * aggressive compiler could have the same effect: | ||
41 | * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html | ||
42 | * | ||
43 | * So play safe, use rcu_dereference to get the rcu-protected pointer: | ||
44 | */ | ||
21 | map = rcu_dereference(cpu->lg->eventfds); | 45 | map = rcu_dereference(cpu->lg->eventfds); |
46 | /* | ||
47 | * Simple array search: even if they add an eventfd while we do this, | ||
48 | * we'll continue to use the old array and just won't see the new one. | ||
49 | */ | ||
22 | for (i = 0; i < map->num; i++) { | 50 | for (i = 0; i < map->num; i++) { |
23 | if (map->map[i].addr == cpu->pending_notify) { | 51 | if (map->map[i].addr == cpu->pending_notify) { |
24 | eventfd_signal(map->map[i].event, 1); | 52 | eventfd_signal(map->map[i].event, 1); |
@@ -26,19 +54,50 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) | |||
26 | break; | 54 | break; |
27 | } | 55 | } |
28 | } | 56 | } |
57 | /* We're done with the rcu-protected variable cpu->lg->eventfds. */ | ||
29 | rcu_read_unlock(); | 58 | rcu_read_unlock(); |
59 | |||
60 | /* If we cleared the notification, it's because we found a match. */ | ||
30 | return cpu->pending_notify == 0; | 61 | return cpu->pending_notify == 0; |
31 | } | 62 | } |
32 | 63 | ||
64 | /*L:055 | ||
65 | * One of the more tricksy tricks in the Linux Kernel is a technique called | ||
66 | * Read Copy Update. Since one point of lguest is to teach lguest journeyers | ||
67 | * about kernel coding, I use it here. (In case you're curious, other purposes | ||
68 | * include learning about virtualization and instilling a deep appreciation for | ||
69 | * simplicity and puppies). | ||
70 | * | ||
71 | * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we | ||
72 | * add new eventfds without ever blocking readers from accessing the array. | ||
73 | * The current Launcher only does this during boot, so that never happens. But | ||
74 | * Read Copy Update is cool, and adding a lock risks damaging even more puppies | ||
75 | * than this code does. | ||
76 | * | ||
77 | * We allocate a brand new one-larger array, copy the old one and add our new | ||
78 | * element. Then we make the lg eventfd pointer point to the new array. | ||
79 | * That's the easy part: now we need to free the old one, but we need to make | ||
80 | * sure no slow CPU somewhere is still looking at it. That's what | ||
81 | * synchronize_rcu does for us: waits until every CPU has indicated that it has | ||
82 | * moved on to know it's no longer using the old one. | ||
83 | * | ||
84 | * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. | ||
85 | */ | ||
33 | static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) | 86 | static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) |
34 | { | 87 | { |
35 | struct lg_eventfd_map *new, *old = lg->eventfds; | 88 | struct lg_eventfd_map *new, *old = lg->eventfds; |
36 | 89 | ||
90 | /* | ||
91 | * We don't allow notifications on value 0 anyway (pending_notify of | ||
92 | * 0 means "nothing pending"). | ||
93 | */ | ||
37 | if (!addr) | 94 | if (!addr) |
38 | return -EINVAL; | 95 | return -EINVAL; |
39 | 96 | ||
40 | /* Replace the old array with the new one, carefully: others can | 97 | /* |
41 | * be accessing it at the same time */ | 98 | * Replace the old array with the new one, carefully: others can |
99 | * be accessing it at the same time. | ||
100 | */ | ||
42 | new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), | 101 | new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), |
43 | GFP_KERNEL); | 102 | GFP_KERNEL); |
44 | if (!new) | 103 | if (!new) |
@@ -52,22 +111,41 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) | |||
52 | new->map[new->num].addr = addr; | 111 | new->map[new->num].addr = addr; |
53 | new->map[new->num].event = eventfd_ctx_fdget(fd); | 112 | new->map[new->num].event = eventfd_ctx_fdget(fd); |
54 | if (IS_ERR(new->map[new->num].event)) { | 113 | if (IS_ERR(new->map[new->num].event)) { |
114 | int err = PTR_ERR(new->map[new->num].event); | ||
55 | kfree(new); | 115 | kfree(new); |
56 | return PTR_ERR(new->map[new->num].event); | 116 | return err; |
57 | } | 117 | } |
58 | new->num++; | 118 | new->num++; |
59 | 119 | ||
60 | /* Now put new one in place. */ | 120 | /* |
121 | * Now put new one in place: rcu_assign_pointer() is a fancy way of | ||
122 | * doing "lg->eventfds = new", but it uses memory barriers to make | ||
123 | * absolutely sure that the contents of "new" written above is nailed | ||
124 | * down before we actually do the assignment. | ||
125 | * | ||
126 | * We have to think about these kinds of things when we're operating on | ||
127 | * live data without locks. | ||
128 | */ | ||
61 | rcu_assign_pointer(lg->eventfds, new); | 129 | rcu_assign_pointer(lg->eventfds, new); |
62 | 130 | ||
63 | /* We're not in a big hurry. Wait until noone's looking at old | 131 | /* |
64 | * version, then delete it. */ | 132 | * We're not in a big hurry. Wait until noone's looking at old |
133 | * version, then free it. | ||
134 | */ | ||
65 | synchronize_rcu(); | 135 | synchronize_rcu(); |
66 | kfree(old); | 136 | kfree(old); |
67 | 137 | ||
68 | return 0; | 138 | return 0; |
69 | } | 139 | } |
70 | 140 | ||
141 | /*L:052 | ||
142 | * Receiving notifications from the Guest is usually done by attaching a | ||
143 | * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will | ||
144 | * become readable when the Guest does an LHCALL_NOTIFY with that value. | ||
145 | * | ||
146 | * This is really convenient for processing each virtqueue in a separate | ||
147 | * thread. | ||
148 | */ | ||
71 | static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) | 149 | static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) |
72 | { | 150 | { |
73 | unsigned long addr, fd; | 151 | unsigned long addr, fd; |
@@ -79,15 +157,22 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) | |||
79 | if (get_user(fd, input) != 0) | 157 | if (get_user(fd, input) != 0) |
80 | return -EFAULT; | 158 | return -EFAULT; |
81 | 159 | ||
160 | /* | ||
161 | * Just make sure two callers don't add eventfds at once. We really | ||
162 | * only need to lock against callers adding to the same Guest, so using | ||
163 | * the Big Lguest Lock is overkill. But this is setup, not a fast path. | ||
164 | */ | ||
82 | mutex_lock(&lguest_lock); | 165 | mutex_lock(&lguest_lock); |
83 | err = add_eventfd(lg, addr, fd); | 166 | err = add_eventfd(lg, addr, fd); |
84 | mutex_unlock(&lguest_lock); | 167 | mutex_unlock(&lguest_lock); |
85 | 168 | ||
86 | return 0; | 169 | return err; |
87 | } | 170 | } |
88 | 171 | ||
89 | /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt | 172 | /*L:050 |
90 | * number to /dev/lguest. */ | 173 | * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt |
174 | * number to /dev/lguest. | ||
175 | */ | ||
91 | static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) | 176 | static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) |
92 | { | 177 | { |
93 | unsigned long irq; | 178 | unsigned long irq; |
@@ -97,12 +182,18 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) | |||
97 | if (irq >= LGUEST_IRQS) | 182 | if (irq >= LGUEST_IRQS) |
98 | return -EINVAL; | 183 | return -EINVAL; |
99 | 184 | ||
185 | /* | ||
186 | * Next time the Guest runs, the core code will see if it can deliver | ||
187 | * this interrupt. | ||
188 | */ | ||
100 | set_interrupt(cpu, irq); | 189 | set_interrupt(cpu, irq); |
101 | return 0; | 190 | return 0; |
102 | } | 191 | } |
103 | 192 | ||
104 | /*L:040 Once our Guest is initialized, the Launcher makes it run by reading | 193 | /*L:040 |
105 | * from /dev/lguest. */ | 194 | * Once our Guest is initialized, the Launcher makes it run by reading |
195 | * from /dev/lguest. | ||
196 | */ | ||
106 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | 197 | static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) |
107 | { | 198 | { |
108 | struct lguest *lg = file->private_data; | 199 | struct lguest *lg = file->private_data; |
@@ -138,8 +229,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
138 | return len; | 229 | return len; |
139 | } | 230 | } |
140 | 231 | ||
141 | /* If we returned from read() last time because the Guest sent I/O, | 232 | /* |
142 | * clear the flag. */ | 233 | * If we returned from read() last time because the Guest sent I/O, |
234 | * clear the flag. | ||
235 | */ | ||
143 | if (cpu->pending_notify) | 236 | if (cpu->pending_notify) |
144 | cpu->pending_notify = 0; | 237 | cpu->pending_notify = 0; |
145 | 238 | ||
@@ -147,8 +240,10 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) | |||
147 | return run_guest(cpu, (unsigned long __user *)user); | 240 | return run_guest(cpu, (unsigned long __user *)user); |
148 | } | 241 | } |
149 | 242 | ||
150 | /*L:025 This actually initializes a CPU. For the moment, a Guest is only | 243 | /*L:025 |
151 | * uniprocessor, so "id" is always 0. */ | 244 | * This actually initializes a CPU. For the moment, a Guest is only |
245 | * uniprocessor, so "id" is always 0. | ||
246 | */ | ||
152 | static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | 247 | static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) |
153 | { | 248 | { |
154 | /* We have a limited number the number of CPUs in the lguest struct. */ | 249 | /* We have a limited number the number of CPUs in the lguest struct. */ |
@@ -163,8 +258,10 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
163 | /* Each CPU has a timer it can set. */ | 258 | /* Each CPU has a timer it can set. */ |
164 | init_clockdev(cpu); | 259 | init_clockdev(cpu); |
165 | 260 | ||
166 | /* We need a complete page for the Guest registers: they are accessible | 261 | /* |
167 | * to the Guest and we can only grant it access to whole pages. */ | 262 | * We need a complete page for the Guest registers: they are accessible |
263 | * to the Guest and we can only grant it access to whole pages. | ||
264 | */ | ||
168 | cpu->regs_page = get_zeroed_page(GFP_KERNEL); | 265 | cpu->regs_page = get_zeroed_page(GFP_KERNEL); |
169 | if (!cpu->regs_page) | 266 | if (!cpu->regs_page) |
170 | return -ENOMEM; | 267 | return -ENOMEM; |
@@ -172,29 +269,38 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
172 | /* We actually put the registers at the bottom of the page. */ | 269 | /* We actually put the registers at the bottom of the page. */ |
173 | cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); | 270 | cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs); |
174 | 271 | ||
175 | /* Now we initialize the Guest's registers, handing it the start | 272 | /* |
176 | * address. */ | 273 | * Now we initialize the Guest's registers, handing it the start |
274 | * address. | ||
275 | */ | ||
177 | lguest_arch_setup_regs(cpu, start_ip); | 276 | lguest_arch_setup_regs(cpu, start_ip); |
178 | 277 | ||
179 | /* We keep a pointer to the Launcher task (ie. current task) for when | 278 | /* |
180 | * other Guests want to wake this one (eg. console input). */ | 279 | * We keep a pointer to the Launcher task (ie. current task) for when |
280 | * other Guests want to wake this one (eg. console input). | ||
281 | */ | ||
181 | cpu->tsk = current; | 282 | cpu->tsk = current; |
182 | 283 | ||
183 | /* We need to keep a pointer to the Launcher's memory map, because if | 284 | /* |
285 | * We need to keep a pointer to the Launcher's memory map, because if | ||
184 | * the Launcher dies we need to clean it up. If we don't keep a | 286 | * the Launcher dies we need to clean it up. If we don't keep a |
185 | * reference, it is destroyed before close() is called. */ | 287 | * reference, it is destroyed before close() is called. |
288 | */ | ||
186 | cpu->mm = get_task_mm(cpu->tsk); | 289 | cpu->mm = get_task_mm(cpu->tsk); |
187 | 290 | ||
188 | /* We remember which CPU's pages this Guest used last, for optimization | 291 | /* |
189 | * when the same Guest runs on the same CPU twice. */ | 292 | * We remember which CPU's pages this Guest used last, for optimization |
293 | * when the same Guest runs on the same CPU twice. | ||
294 | */ | ||
190 | cpu->last_pages = NULL; | 295 | cpu->last_pages = NULL; |
191 | 296 | ||
192 | /* No error == success. */ | 297 | /* No error == success. */ |
193 | return 0; | 298 | return 0; |
194 | } | 299 | } |
195 | 300 | ||
196 | /*L:020 The initialization write supplies 3 pointer sized (32 or 64 bit) | 301 | /*L:020 |
197 | * values (in addition to the LHREQ_INITIALIZE value). These are: | 302 | * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in |
303 | * addition to the LHREQ_INITIALIZE value). These are: | ||
198 | * | 304 | * |
199 | * base: The start of the Guest-physical memory inside the Launcher memory. | 305 | * base: The start of the Guest-physical memory inside the Launcher memory. |
200 | * | 306 | * |
@@ -206,14 +312,15 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) | |||
206 | */ | 312 | */ |
207 | static int initialize(struct file *file, const unsigned long __user *input) | 313 | static int initialize(struct file *file, const unsigned long __user *input) |
208 | { | 314 | { |
209 | /* "struct lguest" contains everything we (the Host) know about a | 315 | /* "struct lguest" contains all we (the Host) know about a Guest. */ |
210 | * Guest. */ | ||
211 | struct lguest *lg; | 316 | struct lguest *lg; |
212 | int err; | 317 | int err; |
213 | unsigned long args[3]; | 318 | unsigned long args[3]; |
214 | 319 | ||
215 | /* We grab the Big Lguest lock, which protects against multiple | 320 | /* |
216 | * simultaneous initializations. */ | 321 | * We grab the Big Lguest lock, which protects against multiple |
322 | * simultaneous initializations. | ||
323 | */ | ||
217 | mutex_lock(&lguest_lock); | 324 | mutex_lock(&lguest_lock); |
218 | /* You can't initialize twice! Close the device and start again... */ | 325 | /* You can't initialize twice! Close the device and start again... */ |
219 | if (file->private_data) { | 326 | if (file->private_data) { |
@@ -248,8 +355,10 @@ static int initialize(struct file *file, const unsigned long __user *input) | |||
248 | if (err) | 355 | if (err) |
249 | goto free_eventfds; | 356 | goto free_eventfds; |
250 | 357 | ||
251 | /* Initialize the Guest's shadow page tables, using the toplevel | 358 | /* |
252 | * address the Launcher gave us. This allocates memory, so can fail. */ | 359 | * Initialize the Guest's shadow page tables, using the toplevel |
360 | * address the Launcher gave us. This allocates memory, so can fail. | ||
361 | */ | ||
253 | err = init_guest_pagetable(lg); | 362 | err = init_guest_pagetable(lg); |
254 | if (err) | 363 | if (err) |
255 | goto free_regs; | 364 | goto free_regs; |
@@ -274,20 +383,24 @@ unlock: | |||
274 | return err; | 383 | return err; |
275 | } | 384 | } |
276 | 385 | ||
277 | /*L:010 The first operation the Launcher does must be a write. All writes | 386 | /*L:010 |
387 | * The first operation the Launcher does must be a write. All writes | ||
278 | * start with an unsigned long number: for the first write this must be | 388 | * start with an unsigned long number: for the first write this must be |
279 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use | 389 | * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use |
280 | * writes of other values to send interrupts. | 390 | * writes of other values to send interrupts or set up receipt of notifications. |
281 | * | 391 | * |
282 | * Note that we overload the "offset" in the /dev/lguest file to indicate what | 392 | * Note that we overload the "offset" in the /dev/lguest file to indicate what |
283 | * CPU number we're dealing with. Currently this is always 0, since we only | 393 | * CPU number we're dealing with. Currently this is always 0 since we only |
284 | * support uniprocessor Guests, but you can see the beginnings of SMP support | 394 | * support uniprocessor Guests, but you can see the beginnings of SMP support |
285 | * here. */ | 395 | * here. |
396 | */ | ||
286 | static ssize_t write(struct file *file, const char __user *in, | 397 | static ssize_t write(struct file *file, const char __user *in, |
287 | size_t size, loff_t *off) | 398 | size_t size, loff_t *off) |
288 | { | 399 | { |
289 | /* Once the Guest is initialized, we hold the "struct lguest" in the | 400 | /* |
290 | * file private data. */ | 401 | * Once the Guest is initialized, we hold the "struct lguest" in the |
402 | * file private data. | ||
403 | */ | ||
291 | struct lguest *lg = file->private_data; | 404 | struct lguest *lg = file->private_data; |
292 | const unsigned long __user *input = (const unsigned long __user *)in; | 405 | const unsigned long __user *input = (const unsigned long __user *)in; |
293 | unsigned long req; | 406 | unsigned long req; |
@@ -322,13 +435,15 @@ static ssize_t write(struct file *file, const char __user *in, | |||
322 | } | 435 | } |
323 | } | 436 | } |
324 | 437 | ||
325 | /*L:060 The final piece of interface code is the close() routine. It reverses | 438 | /*L:060 |
439 | * The final piece of interface code is the close() routine. It reverses | ||
326 | * everything done in initialize(). This is usually called because the | 440 | * everything done in initialize(). This is usually called because the |
327 | * Launcher exited. | 441 | * Launcher exited. |
328 | * | 442 | * |
329 | * Note that the close routine returns 0 or a negative error number: it can't | 443 | * Note that the close routine returns 0 or a negative error number: it can't |
330 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for | 444 | * really fail, but it can whine. I blame Sun for this wart, and K&R C for |
331 | * letting them do it. :*/ | 445 | * letting them do it. |
446 | :*/ | ||
332 | static int close(struct inode *inode, struct file *file) | 447 | static int close(struct inode *inode, struct file *file) |
333 | { | 448 | { |
334 | struct lguest *lg = file->private_data; | 449 | struct lguest *lg = file->private_data; |
@@ -338,8 +453,10 @@ static int close(struct inode *inode, struct file *file) | |||
338 | if (!lg) | 453 | if (!lg) |
339 | return 0; | 454 | return 0; |
340 | 455 | ||
341 | /* We need the big lock, to protect from inter-guest I/O and other | 456 | /* |
342 | * Launchers initializing guests. */ | 457 | * We need the big lock, to protect from inter-guest I/O and other |
458 | * Launchers initializing guests. | ||
459 | */ | ||
343 | mutex_lock(&lguest_lock); | 460 | mutex_lock(&lguest_lock); |
344 | 461 | ||
345 | /* Free up the shadow page tables for the Guest. */ | 462 | /* Free up the shadow page tables for the Guest. */ |
@@ -350,8 +467,10 @@ static int close(struct inode *inode, struct file *file) | |||
350 | hrtimer_cancel(&lg->cpus[i].hrt); | 467 | hrtimer_cancel(&lg->cpus[i].hrt); |
351 | /* We can free up the register page we allocated. */ | 468 | /* We can free up the register page we allocated. */ |
352 | free_page(lg->cpus[i].regs_page); | 469 | free_page(lg->cpus[i].regs_page); |
353 | /* Now all the memory cleanups are done, it's safe to release | 470 | /* |
354 | * the Launcher's memory management structure. */ | 471 | * Now all the memory cleanups are done, it's safe to release |
472 | * the Launcher's memory management structure. | ||
473 | */ | ||
355 | mmput(lg->cpus[i].mm); | 474 | mmput(lg->cpus[i].mm); |
356 | } | 475 | } |
357 | 476 | ||
@@ -360,8 +479,10 @@ static int close(struct inode *inode, struct file *file) | |||
360 | eventfd_ctx_put(lg->eventfds->map[i].event); | 479 | eventfd_ctx_put(lg->eventfds->map[i].event); |
361 | kfree(lg->eventfds); | 480 | kfree(lg->eventfds); |
362 | 481 | ||
363 | /* If lg->dead doesn't contain an error code it will be NULL or a | 482 | /* |
364 | * kmalloc()ed string, either of which is ok to hand to kfree(). */ | 483 | * If lg->dead doesn't contain an error code it will be NULL or a |
484 | * kmalloc()ed string, either of which is ok to hand to kfree(). | ||
485 | */ | ||
365 | if (!IS_ERR(lg->dead)) | 486 | if (!IS_ERR(lg->dead)) |
366 | kfree(lg->dead); | 487 | kfree(lg->dead); |
367 | /* Free the memory allocated to the lguest_struct */ | 488 | /* Free the memory allocated to the lguest_struct */ |
@@ -385,7 +506,8 @@ static int close(struct inode *inode, struct file *file) | |||
385 | * | 506 | * |
386 | * We begin our understanding with the Host kernel interface which the Launcher | 507 | * We begin our understanding with the Host kernel interface which the Launcher |
387 | * uses: reading and writing a character device called /dev/lguest. All the | 508 | * uses: reading and writing a character device called /dev/lguest. All the |
388 | * work happens in the read(), write() and close() routines: */ | 509 | * work happens in the read(), write() and close() routines: |
510 | */ | ||
389 | static struct file_operations lguest_fops = { | 511 | static struct file_operations lguest_fops = { |
390 | .owner = THIS_MODULE, | 512 | .owner = THIS_MODULE, |
391 | .release = close, | 513 | .release = close, |
@@ -393,8 +515,10 @@ static struct file_operations lguest_fops = { | |||
393 | .read = read, | 515 | .read = read, |
394 | }; | 516 | }; |
395 | 517 | ||
396 | /* This is a textbook example of a "misc" character device. Populate a "struct | 518 | /* |
397 | * miscdevice" and register it with misc_register(). */ | 519 | * This is a textbook example of a "misc" character device. Populate a "struct |
520 | * miscdevice" and register it with misc_register(). | ||
521 | */ | ||
398 | static struct miscdevice lguest_dev = { | 522 | static struct miscdevice lguest_dev = { |
399 | .minor = MISC_DYNAMIC_MINOR, | 523 | .minor = MISC_DYNAMIC_MINOR, |
400 | .name = "lguest", | 524 | .name = "lguest", |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index a6fe1abda240..a8d0aee3bc0e 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -1,9 +1,11 @@ | |||
1 | /*P:700 The pagetable code, on the other hand, still shows the scars of | 1 | /*P:700 |
2 | * The pagetable code, on the other hand, still shows the scars of | ||
2 | * previous encounters. It's functional, and as neat as it can be in the | 3 | * previous encounters. It's functional, and as neat as it can be in the |
3 | * circumstances, but be wary, for these things are subtle and break easily. | 4 | * circumstances, but be wary, for these things are subtle and break easily. |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | 5 | * The Guest provides a virtual to physical mapping, but we can neither trust |
5 | * it nor use it: we verify and convert it here then point the CPU to the | 6 | * it nor use it: we verify and convert it here then point the CPU to the |
6 | * converted Guest pages when running the Guest. :*/ | 7 | * converted Guest pages when running the Guest. |
8 | :*/ | ||
7 | 9 | ||
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | 10 | /* Copyright (C) Rusty Russell IBM Corporation 2006. |
9 | * GPL v2 and any later version */ | 11 | * GPL v2 and any later version */ |
@@ -17,18 +19,20 @@ | |||
17 | #include <asm/bootparam.h> | 19 | #include <asm/bootparam.h> |
18 | #include "lg.h" | 20 | #include "lg.h" |
19 | 21 | ||
20 | /*M:008 We hold reference to pages, which prevents them from being swapped. | 22 | /*M:008 |
23 | * We hold reference to pages, which prevents them from being swapped. | ||
21 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants | 24 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants |
22 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | 25 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we |
23 | * could probably consider launching Guests as non-root. :*/ | 26 | * could probably consider launching Guests as non-root. |
27 | :*/ | ||
24 | 28 | ||
25 | /*H:300 | 29 | /*H:300 |
26 | * The Page Table Code | 30 | * The Page Table Code |
27 | * | 31 | * |
28 | * We use two-level page tables for the Guest. If you're not entirely | 32 | * We use two-level page tables for the Guest, or three-level with PAE. If |
29 | * comfortable with virtual addresses, physical addresses and page tables then | 33 | * you're not entirely comfortable with virtual addresses, physical addresses |
30 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with | 34 | * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page |
31 | * diagrams!). | 35 | * Table Handling" (with diagrams!). |
32 | * | 36 | * |
33 | * The Guest keeps page tables, but we maintain the actual ones here: these are | 37 | * The Guest keeps page tables, but we maintain the actual ones here: these are |
34 | * called "shadow" page tables. Which is a very Guest-centric name: these are | 38 | * called "shadow" page tables. Which is a very Guest-centric name: these are |
@@ -45,16 +49,18 @@ | |||
45 | * (v) Flushing (throwing away) page tables, | 49 | * (v) Flushing (throwing away) page tables, |
46 | * (vi) Mapping the Switcher when the Guest is about to run, | 50 | * (vi) Mapping the Switcher when the Guest is about to run, |
47 | * (vii) Setting up the page tables initially. | 51 | * (vii) Setting up the page tables initially. |
48 | :*/ | 52 | :*/ |
49 | 53 | ||
50 | 54 | /* | |
51 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | 55 | * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) |
52 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE | 56 | * or 512 PTE entries with PAE (2MB). |
53 | * page. */ | 57 | */ |
54 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) | 58 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) |
55 | 59 | ||
56 | /* For PAE we need the PMD index as well. We use the last 2MB, so we | 60 | /* |
57 | * will need the last pmd entry of the last pmd page. */ | 61 | * For PAE we need the PMD index as well. We use the last 2MB, so we |
62 | * will need the last pmd entry of the last pmd page. | ||
63 | */ | ||
58 | #ifdef CONFIG_X86_PAE | 64 | #ifdef CONFIG_X86_PAE |
59 | #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) | 65 | #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) |
60 | #define RESERVE_MEM 2U | 66 | #define RESERVE_MEM 2U |
@@ -64,14 +70,18 @@ | |||
64 | #define CHECK_GPGD_MASK _PAGE_TABLE | 70 | #define CHECK_GPGD_MASK _PAGE_TABLE |
65 | #endif | 71 | #endif |
66 | 72 | ||
67 | /* We actually need a separate PTE page for each CPU. Remember that after the | 73 | /* |
74 | * We actually need a separate PTE page for each CPU. Remember that after the | ||
68 | * Switcher code itself comes two pages for each CPU, and we don't want this | 75 | * Switcher code itself comes two pages for each CPU, and we don't want this |
69 | * CPU's guest to see the pages of any other CPU. */ | 76 | * CPU's guest to see the pages of any other CPU. |
77 | */ | ||
70 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | 78 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); |
71 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | 79 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
72 | 80 | ||
73 | /*H:320 The page table code is curly enough to need helper functions to keep it | 81 | /*H:320 |
74 | * clear and clean. | 82 | * The page table code is curly enough to need helper functions to keep it |
83 | * clear and clean. The kernel itself provides many of them; one advantage | ||
84 | * of insisting that the Guest and Host use the same CONFIG_PAE setting. | ||
75 | * | 85 | * |
76 | * There are two functions which return pointers to the shadow (aka "real") | 86 | * There are two functions which return pointers to the shadow (aka "real") |
77 | * page tables. | 87 | * page tables. |
@@ -79,7 +89,8 @@ static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | |||
79 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | 89 | * spgd_addr() takes the virtual address and returns a pointer to the top-level |
80 | * page directory entry (PGD) for that address. Since we keep track of several | 90 | * page directory entry (PGD) for that address. Since we keep track of several |
81 | * page tables, the "i" argument tells us which one we're interested in (it's | 91 | * page tables, the "i" argument tells us which one we're interested in (it's |
82 | * usually the current one). */ | 92 | * usually the current one). |
93 | */ | ||
83 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) | 94 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) |
84 | { | 95 | { |
85 | unsigned int index = pgd_index(vaddr); | 96 | unsigned int index = pgd_index(vaddr); |
@@ -96,9 +107,11 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) | |||
96 | } | 107 | } |
97 | 108 | ||
98 | #ifdef CONFIG_X86_PAE | 109 | #ifdef CONFIG_X86_PAE |
99 | /* This routine then takes the PGD entry given above, which contains the | 110 | /* |
111 | * This routine then takes the PGD entry given above, which contains the | ||
100 | * address of the PMD page. It then returns a pointer to the PMD entry for the | 112 | * address of the PMD page. It then returns a pointer to the PMD entry for the |
101 | * given address. */ | 113 | * given address. |
114 | */ | ||
102 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | 115 | static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
103 | { | 116 | { |
104 | unsigned int index = pmd_index(vaddr); | 117 | unsigned int index = pmd_index(vaddr); |
@@ -119,9 +132,11 @@ static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | |||
119 | } | 132 | } |
120 | #endif | 133 | #endif |
121 | 134 | ||
122 | /* This routine then takes the page directory entry returned above, which | 135 | /* |
136 | * This routine then takes the page directory entry returned above, which | ||
123 | * contains the address of the page table entry (PTE) page. It then returns a | 137 | * contains the address of the page table entry (PTE) page. It then returns a |
124 | * pointer to the PTE entry for the given address. */ | 138 | * pointer to the PTE entry for the given address. |
139 | */ | ||
125 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | 140 | static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) |
126 | { | 141 | { |
127 | #ifdef CONFIG_X86_PAE | 142 | #ifdef CONFIG_X86_PAE |
@@ -139,8 +154,10 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) | |||
139 | return &page[pte_index(vaddr)]; | 154 | return &page[pte_index(vaddr)]; |
140 | } | 155 | } |
141 | 156 | ||
142 | /* These two functions just like the above two, except they access the Guest | 157 | /* |
143 | * page tables. Hence they return a Guest address. */ | 158 | * These functions are just like the above two, except they access the Guest |
159 | * page tables. Hence they return a Guest address. | ||
160 | */ | ||
144 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) | 161 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) |
145 | { | 162 | { |
146 | unsigned int index = vaddr >> (PGDIR_SHIFT); | 163 | unsigned int index = vaddr >> (PGDIR_SHIFT); |
@@ -148,6 +165,7 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) | |||
148 | } | 165 | } |
149 | 166 | ||
150 | #ifdef CONFIG_X86_PAE | 167 | #ifdef CONFIG_X86_PAE |
168 | /* Follow the PGD to the PMD. */ | ||
151 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | 169 | static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) |
152 | { | 170 | { |
153 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | 171 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
@@ -155,6 +173,7 @@ static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) | |||
155 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); | 173 | return gpage + pmd_index(vaddr) * sizeof(pmd_t); |
156 | } | 174 | } |
157 | 175 | ||
176 | /* Follow the PMD to the PTE. */ | ||
158 | static unsigned long gpte_addr(struct lg_cpu *cpu, | 177 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
159 | pmd_t gpmd, unsigned long vaddr) | 178 | pmd_t gpmd, unsigned long vaddr) |
160 | { | 179 | { |
@@ -164,6 +183,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, | |||
164 | return gpage + pte_index(vaddr) * sizeof(pte_t); | 183 | return gpage + pte_index(vaddr) * sizeof(pte_t); |
165 | } | 184 | } |
166 | #else | 185 | #else |
186 | /* Follow the PGD to the PTE (no mid-level for !PAE). */ | ||
167 | static unsigned long gpte_addr(struct lg_cpu *cpu, | 187 | static unsigned long gpte_addr(struct lg_cpu *cpu, |
168 | pgd_t gpgd, unsigned long vaddr) | 188 | pgd_t gpgd, unsigned long vaddr) |
169 | { | 189 | { |
@@ -175,17 +195,21 @@ static unsigned long gpte_addr(struct lg_cpu *cpu, | |||
175 | #endif | 195 | #endif |
176 | /*:*/ | 196 | /*:*/ |
177 | 197 | ||
178 | /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as | 198 | /*M:014 |
179 | * an optimization (ie. pre-faulting). :*/ | 199 | * get_pfn is slow: we could probably try to grab batches of pages here as |
200 | * an optimization (ie. pre-faulting). | ||
201 | :*/ | ||
180 | 202 | ||
181 | /*H:350 This routine takes a page number given by the Guest and converts it to | 203 | /*H:350 |
204 | * This routine takes a page number given by the Guest and converts it to | ||
182 | * an actual, physical page number. It can fail for several reasons: the | 205 | * an actual, physical page number. It can fail for several reasons: the |
183 | * virtual address might not be mapped by the Launcher, the write flag is set | 206 | * virtual address might not be mapped by the Launcher, the write flag is set |
184 | * and the page is read-only, or the write flag was set and the page was | 207 | * and the page is read-only, or the write flag was set and the page was |
185 | * shared so had to be copied, but we ran out of memory. | 208 | * shared so had to be copied, but we ran out of memory. |
186 | * | 209 | * |
187 | * This holds a reference to the page, so release_pte() is careful to put that | 210 | * This holds a reference to the page, so release_pte() is careful to put that |
188 | * back. */ | 211 | * back. |
212 | */ | ||
189 | static unsigned long get_pfn(unsigned long virtpfn, int write) | 213 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
190 | { | 214 | { |
191 | struct page *page; | 215 | struct page *page; |
@@ -198,33 +222,41 @@ static unsigned long get_pfn(unsigned long virtpfn, int write) | |||
198 | return -1UL; | 222 | return -1UL; |
199 | } | 223 | } |
200 | 224 | ||
201 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table | 225 | /*H:340 |
226 | * Converting a Guest page table entry to a shadow (ie. real) page table | ||
202 | * entry can be a little tricky. The flags are (almost) the same, but the | 227 | * entry can be a little tricky. The flags are (almost) the same, but the |
203 | * Guest PTE contains a virtual page number: the CPU needs the real page | 228 | * Guest PTE contains a virtual page number: the CPU needs the real page |
204 | * number. */ | 229 | * number. |
230 | */ | ||
205 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | 231 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) |
206 | { | 232 | { |
207 | unsigned long pfn, base, flags; | 233 | unsigned long pfn, base, flags; |
208 | 234 | ||
209 | /* The Guest sets the global flag, because it thinks that it is using | 235 | /* |
236 | * The Guest sets the global flag, because it thinks that it is using | ||
210 | * PGE. We only told it to use PGE so it would tell us whether it was | 237 | * PGE. We only told it to use PGE so it would tell us whether it was |
211 | * flushing a kernel mapping or a userspace mapping. We don't actually | 238 | * flushing a kernel mapping or a userspace mapping. We don't actually |
212 | * use the global bit, so throw it away. */ | 239 | * use the global bit, so throw it away. |
240 | */ | ||
213 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); | 241 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); |
214 | 242 | ||
215 | /* The Guest's pages are offset inside the Launcher. */ | 243 | /* The Guest's pages are offset inside the Launcher. */ |
216 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; | 244 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; |
217 | 245 | ||
218 | /* We need a temporary "unsigned long" variable to hold the answer from | 246 | /* |
247 | * We need a temporary "unsigned long" variable to hold the answer from | ||
219 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't | 248 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't |
220 | * fit in spte.pfn. get_pfn() finds the real physical number of the | 249 | * fit in spte.pfn. get_pfn() finds the real physical number of the |
221 | * page, given the virtual number. */ | 250 | * page, given the virtual number. |
251 | */ | ||
222 | pfn = get_pfn(base + pte_pfn(gpte), write); | 252 | pfn = get_pfn(base + pte_pfn(gpte), write); |
223 | if (pfn == -1UL) { | 253 | if (pfn == -1UL) { |
224 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); | 254 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); |
225 | /* When we destroy the Guest, we'll go through the shadow page | 255 | /* |
256 | * When we destroy the Guest, we'll go through the shadow page | ||
226 | * tables and release_pte() them. Make sure we don't think | 257 | * tables and release_pte() them. Make sure we don't think |
227 | * this one is valid! */ | 258 | * this one is valid! |
259 | */ | ||
228 | flags = 0; | 260 | flags = 0; |
229 | } | 261 | } |
230 | /* Now we assemble our shadow PTE from the page number and flags. */ | 262 | /* Now we assemble our shadow PTE from the page number and flags. */ |
@@ -234,8 +266,10 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | |||
234 | /*H:460 And to complete the chain, release_pte() looks like this: */ | 266 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
235 | static void release_pte(pte_t pte) | 267 | static void release_pte(pte_t pte) |
236 | { | 268 | { |
237 | /* Remember that get_user_pages_fast() took a reference to the page, in | 269 | /* |
238 | * get_pfn()? We have to put it back now. */ | 270 | * Remember that get_user_pages_fast() took a reference to the page, in |
271 | * get_pfn()? We have to put it back now. | ||
272 | */ | ||
239 | if (pte_flags(pte) & _PAGE_PRESENT) | 273 | if (pte_flags(pte) & _PAGE_PRESENT) |
240 | put_page(pte_page(pte)); | 274 | put_page(pte_page(pte)); |
241 | } | 275 | } |
@@ -273,7 +307,8 @@ static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) | |||
273 | * and return to the Guest without it knowing. | 307 | * and return to the Guest without it knowing. |
274 | * | 308 | * |
275 | * If we fixed up the fault (ie. we mapped the address), this routine returns | 309 | * If we fixed up the fault (ie. we mapped the address), this routine returns |
276 | * true. Otherwise, it was a real fault and we need to tell the Guest. */ | 310 | * true. Otherwise, it was a real fault and we need to tell the Guest. |
311 | */ | ||
277 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | 312 | bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) |
278 | { | 313 | { |
279 | pgd_t gpgd; | 314 | pgd_t gpgd; |
@@ -282,6 +317,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
282 | pte_t gpte; | 317 | pte_t gpte; |
283 | pte_t *spte; | 318 | pte_t *spte; |
284 | 319 | ||
320 | /* Mid level for PAE. */ | ||
285 | #ifdef CONFIG_X86_PAE | 321 | #ifdef CONFIG_X86_PAE |
286 | pmd_t *spmd; | 322 | pmd_t *spmd; |
287 | pmd_t gpmd; | 323 | pmd_t gpmd; |
@@ -298,22 +334,26 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
298 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { | 334 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { |
299 | /* No shadow entry: allocate a new shadow PTE page. */ | 335 | /* No shadow entry: allocate a new shadow PTE page. */ |
300 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | 336 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
301 | /* This is not really the Guest's fault, but killing it is | 337 | /* |
302 | * simple for this corner case. */ | 338 | * This is not really the Guest's fault, but killing it is |
339 | * simple for this corner case. | ||
340 | */ | ||
303 | if (!ptepage) { | 341 | if (!ptepage) { |
304 | kill_guest(cpu, "out of memory allocating pte page"); | 342 | kill_guest(cpu, "out of memory allocating pte page"); |
305 | return false; | 343 | return false; |
306 | } | 344 | } |
307 | /* We check that the Guest pgd is OK. */ | 345 | /* We check that the Guest pgd is OK. */ |
308 | check_gpgd(cpu, gpgd); | 346 | check_gpgd(cpu, gpgd); |
309 | /* And we copy the flags to the shadow PGD entry. The page | 347 | /* |
310 | * number in the shadow PGD is the page we just allocated. */ | 348 | * And we copy the flags to the shadow PGD entry. The page |
349 | * number in the shadow PGD is the page we just allocated. | ||
350 | */ | ||
311 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); | 351 | set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); |
312 | } | 352 | } |
313 | 353 | ||
314 | #ifdef CONFIG_X86_PAE | 354 | #ifdef CONFIG_X86_PAE |
315 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); | 355 | gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); |
316 | /* middle level not present? We can't map it in. */ | 356 | /* Middle level not present? We can't map it in. */ |
317 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) | 357 | if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) |
318 | return false; | 358 | return false; |
319 | 359 | ||
@@ -324,8 +364,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
324 | /* No shadow entry: allocate a new shadow PTE page. */ | 364 | /* No shadow entry: allocate a new shadow PTE page. */ |
325 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | 365 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
326 | 366 | ||
327 | /* This is not really the Guest's fault, but killing it is | 367 | /* |
328 | * simple for this corner case. */ | 368 | * This is not really the Guest's fault, but killing it is |
369 | * simple for this corner case. | ||
370 | */ | ||
329 | if (!ptepage) { | 371 | if (!ptepage) { |
330 | kill_guest(cpu, "out of memory allocating pte page"); | 372 | kill_guest(cpu, "out of memory allocating pte page"); |
331 | return false; | 373 | return false; |
@@ -334,27 +376,37 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
334 | /* We check that the Guest pmd is OK. */ | 376 | /* We check that the Guest pmd is OK. */ |
335 | check_gpmd(cpu, gpmd); | 377 | check_gpmd(cpu, gpmd); |
336 | 378 | ||
337 | /* And we copy the flags to the shadow PMD entry. The page | 379 | /* |
338 | * number in the shadow PMD is the page we just allocated. */ | 380 | * And we copy the flags to the shadow PMD entry. The page |
381 | * number in the shadow PMD is the page we just allocated. | ||
382 | */ | ||
339 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); | 383 | native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); |
340 | } | 384 | } |
341 | 385 | ||
342 | /* OK, now we look at the lower level in the Guest page table: keep its | 386 | /* |
343 | * address, because we might update it later. */ | 387 | * OK, now we look at the lower level in the Guest page table: keep its |
388 | * address, because we might update it later. | ||
389 | */ | ||
344 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); | 390 | gpte_ptr = gpte_addr(cpu, gpmd, vaddr); |
345 | #else | 391 | #else |
346 | /* OK, now we look at the lower level in the Guest page table: keep its | 392 | /* |
347 | * address, because we might update it later. */ | 393 | * OK, now we look at the lower level in the Guest page table: keep its |
394 | * address, because we might update it later. | ||
395 | */ | ||
348 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); | 396 | gpte_ptr = gpte_addr(cpu, gpgd, vaddr); |
349 | #endif | 397 | #endif |
398 | |||
399 | /* Read the actual PTE value. */ | ||
350 | gpte = lgread(cpu, gpte_ptr, pte_t); | 400 | gpte = lgread(cpu, gpte_ptr, pte_t); |
351 | 401 | ||
352 | /* If this page isn't in the Guest page tables, we can't page it in. */ | 402 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
353 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | 403 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) |
354 | return false; | 404 | return false; |
355 | 405 | ||
356 | /* Check they're not trying to write to a page the Guest wants | 406 | /* |
357 | * read-only (bit 2 of errcode == write). */ | 407 | * Check they're not trying to write to a page the Guest wants |
408 | * read-only (bit 2 of errcode == write). | ||
409 | */ | ||
358 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) | 410 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) |
359 | return false; | 411 | return false; |
360 | 412 | ||
@@ -362,8 +414,10 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
362 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) | 414 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) |
363 | return false; | 415 | return false; |
364 | 416 | ||
365 | /* Check that the Guest PTE flags are OK, and the page number is below | 417 | /* |
366 | * the pfn_limit (ie. not mapping the Launcher binary). */ | 418 | * Check that the Guest PTE flags are OK, and the page number is below |
419 | * the pfn_limit (ie. not mapping the Launcher binary). | ||
420 | */ | ||
367 | check_gpte(cpu, gpte); | 421 | check_gpte(cpu, gpte); |
368 | 422 | ||
369 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ | 423 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
@@ -373,29 +427,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
373 | 427 | ||
374 | /* Get the pointer to the shadow PTE entry we're going to set. */ | 428 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
375 | spte = spte_addr(cpu, *spgd, vaddr); | 429 | spte = spte_addr(cpu, *spgd, vaddr); |
376 | /* If there was a valid shadow PTE entry here before, we release it. | 430 | |
377 | * This can happen with a write to a previously read-only entry. */ | 431 | /* |
432 | * If there was a valid shadow PTE entry here before, we release it. | ||
433 | * This can happen with a write to a previously read-only entry. | ||
434 | */ | ||
378 | release_pte(*spte); | 435 | release_pte(*spte); |
379 | 436 | ||
380 | /* If this is a write, we insist that the Guest page is writable (the | 437 | /* |
381 | * final arg to gpte_to_spte()). */ | 438 | * If this is a write, we insist that the Guest page is writable (the |
439 | * final arg to gpte_to_spte()). | ||
440 | */ | ||
382 | if (pte_dirty(gpte)) | 441 | if (pte_dirty(gpte)) |
383 | *spte = gpte_to_spte(cpu, gpte, 1); | 442 | *spte = gpte_to_spte(cpu, gpte, 1); |
384 | else | 443 | else |
385 | /* If this is a read, don't set the "writable" bit in the page | 444 | /* |
445 | * If this is a read, don't set the "writable" bit in the page | ||
386 | * table entry, even if the Guest says it's writable. That way | 446 | * table entry, even if the Guest says it's writable. That way |
387 | * we will come back here when a write does actually occur, so | 447 | * we will come back here when a write does actually occur, so |
388 | * we can update the Guest's _PAGE_DIRTY flag. */ | 448 | * we can update the Guest's _PAGE_DIRTY flag. |
449 | */ | ||
389 | native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); | 450 | native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); |
390 | 451 | ||
391 | /* Finally, we write the Guest PTE entry back: we've set the | 452 | /* |
392 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | 453 | * Finally, we write the Guest PTE entry back: we've set the |
454 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. | ||
455 | */ | ||
393 | lgwrite(cpu, gpte_ptr, pte_t, gpte); | 456 | lgwrite(cpu, gpte_ptr, pte_t, gpte); |
394 | 457 | ||
395 | /* The fault is fixed, the page table is populated, the mapping | 458 | /* |
459 | * The fault is fixed, the page table is populated, the mapping | ||
396 | * manipulated, the result returned and the code complete. A small | 460 | * manipulated, the result returned and the code complete. A small |
397 | * delay and a trace of alliteration are the only indications the Guest | 461 | * delay and a trace of alliteration are the only indications the Guest |
398 | * has that a page fault occurred at all. */ | 462 | * has that a page fault occurred at all. |
463 | */ | ||
399 | return true; | 464 | return true; |
400 | } | 465 | } |
401 | 466 | ||
@@ -408,7 +473,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |||
408 | * mapped, so it's overkill. | 473 | * mapped, so it's overkill. |
409 | * | 474 | * |
410 | * This is a quick version which answers the question: is this virtual address | 475 | * This is a quick version which answers the question: is this virtual address |
411 | * mapped by the shadow page tables, and is it writable? */ | 476 | * mapped by the shadow page tables, and is it writable? |
477 | */ | ||
412 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) | 478 | static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) |
413 | { | 479 | { |
414 | pgd_t *spgd; | 480 | pgd_t *spgd; |
@@ -428,21 +494,26 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) | |||
428 | return false; | 494 | return false; |
429 | #endif | 495 | #endif |
430 | 496 | ||
431 | /* Check the flags on the pte entry itself: it must be present and | 497 | /* |
432 | * writable. */ | 498 | * Check the flags on the pte entry itself: it must be present and |
499 | * writable. | ||
500 | */ | ||
433 | flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); | 501 | flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); |
434 | 502 | ||
435 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); | 503 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
436 | } | 504 | } |
437 | 505 | ||
438 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already | 506 | /* |
507 | * So, when pin_stack_pages() asks us to pin a page, we check if it's already | ||
439 | * in the page tables, and if not, we call demand_page() with error code 2 | 508 | * in the page tables, and if not, we call demand_page() with error code 2 |
440 | * (meaning "write"). */ | 509 | * (meaning "write"). |
510 | */ | ||
441 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) | 511 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) |
442 | { | 512 | { |
443 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) | 513 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) |
444 | kill_guest(cpu, "bad stack page %#lx", vaddr); | 514 | kill_guest(cpu, "bad stack page %#lx", vaddr); |
445 | } | 515 | } |
516 | /*:*/ | ||
446 | 517 | ||
447 | #ifdef CONFIG_X86_PAE | 518 | #ifdef CONFIG_X86_PAE |
448 | static void release_pmd(pmd_t *spmd) | 519 | static void release_pmd(pmd_t *spmd) |
@@ -479,15 +550,21 @@ static void release_pgd(pgd_t *spgd) | |||
479 | } | 550 | } |
480 | 551 | ||
481 | #else /* !CONFIG_X86_PAE */ | 552 | #else /* !CONFIG_X86_PAE */ |
482 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ | 553 | /*H:450 |
554 | * If we chase down the release_pgd() code, the non-PAE version looks like | ||
555 | * this. The PAE version is almost identical, but instead of calling | ||
556 | * release_pte it calls release_pmd(), which looks much like this. | ||
557 | */ | ||
483 | static void release_pgd(pgd_t *spgd) | 558 | static void release_pgd(pgd_t *spgd) |
484 | { | 559 | { |
485 | /* If the entry's not present, there's nothing to release. */ | 560 | /* If the entry's not present, there's nothing to release. */ |
486 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | 561 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
487 | unsigned int i; | 562 | unsigned int i; |
488 | /* Converting the pfn to find the actual PTE page is easy: turn | 563 | /* |
564 | * Converting the pfn to find the actual PTE page is easy: turn | ||
489 | * the page number into a physical address, then convert to a | 565 | * the page number into a physical address, then convert to a |
490 | * virtual address (easy for kernel pages like this one). */ | 566 | * virtual address (easy for kernel pages like this one). |
567 | */ | ||
491 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | 568 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
492 | /* For each entry in the page, we might need to release it. */ | 569 | /* For each entry in the page, we might need to release it. */ |
493 | for (i = 0; i < PTRS_PER_PTE; i++) | 570 | for (i = 0; i < PTRS_PER_PTE; i++) |
@@ -499,9 +576,12 @@ static void release_pgd(pgd_t *spgd) | |||
499 | } | 576 | } |
500 | } | 577 | } |
501 | #endif | 578 | #endif |
502 | /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() | 579 | |
580 | /*H:445 | ||
581 | * We saw flush_user_mappings() twice: once from the flush_user_mappings() | ||
503 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. | 582 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. |
504 | * It simply releases every PTE page from 0 up to the Guest's kernel address. */ | 583 | * It simply releases every PTE page from 0 up to the Guest's kernel address. |
584 | */ | ||
505 | static void flush_user_mappings(struct lguest *lg, int idx) | 585 | static void flush_user_mappings(struct lguest *lg, int idx) |
506 | { | 586 | { |
507 | unsigned int i; | 587 | unsigned int i; |
@@ -510,10 +590,12 @@ static void flush_user_mappings(struct lguest *lg, int idx) | |||
510 | release_pgd(lg->pgdirs[idx].pgdir + i); | 590 | release_pgd(lg->pgdirs[idx].pgdir + i); |
511 | } | 591 | } |
512 | 592 | ||
513 | /*H:440 (v) Flushing (throwing away) page tables, | 593 | /*H:440 |
594 | * (v) Flushing (throwing away) page tables, | ||
514 | * | 595 | * |
515 | * The Guest has a hypercall to throw away the page tables: it's used when a | 596 | * The Guest has a hypercall to throw away the page tables: it's used when a |
516 | * large number of mappings have been changed. */ | 597 | * large number of mappings have been changed. |
598 | */ | ||
517 | void guest_pagetable_flush_user(struct lg_cpu *cpu) | 599 | void guest_pagetable_flush_user(struct lg_cpu *cpu) |
518 | { | 600 | { |
519 | /* Drop the userspace part of the current page table. */ | 601 | /* Drop the userspace part of the current page table. */ |
@@ -551,9 +633,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |||
551 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | 633 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); |
552 | } | 634 | } |
553 | 635 | ||
554 | /* We keep several page tables. This is a simple routine to find the page | 636 | /* |
637 | * We keep several page tables. This is a simple routine to find the page | ||
555 | * table (if any) corresponding to this top-level address the Guest has given | 638 | * table (if any) corresponding to this top-level address the Guest has given |
556 | * us. */ | 639 | * us. |
640 | */ | ||
557 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | 641 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
558 | { | 642 | { |
559 | unsigned int i; | 643 | unsigned int i; |
@@ -563,9 +647,11 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | |||
563 | return i; | 647 | return i; |
564 | } | 648 | } |
565 | 649 | ||
566 | /*H:435 And this is us, creating the new page directory. If we really do | 650 | /*H:435 |
651 | * And this is us, creating the new page directory. If we really do | ||
567 | * allocate a new one (and so the kernel parts are not there), we set | 652 | * allocate a new one (and so the kernel parts are not there), we set |
568 | * blank_pgdir. */ | 653 | * blank_pgdir. |
654 | */ | ||
569 | static unsigned int new_pgdir(struct lg_cpu *cpu, | 655 | static unsigned int new_pgdir(struct lg_cpu *cpu, |
570 | unsigned long gpgdir, | 656 | unsigned long gpgdir, |
571 | int *blank_pgdir) | 657 | int *blank_pgdir) |
@@ -575,8 +661,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
575 | pmd_t *pmd_table; | 661 | pmd_t *pmd_table; |
576 | #endif | 662 | #endif |
577 | 663 | ||
578 | /* We pick one entry at random to throw out. Choosing the Least | 664 | /* |
579 | * Recently Used might be better, but this is easy. */ | 665 | * We pick one entry at random to throw out. Choosing the Least |
666 | * Recently Used might be better, but this is easy. | ||
667 | */ | ||
580 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); | 668 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); |
581 | /* If it's never been allocated at all before, try now. */ | 669 | /* If it's never been allocated at all before, try now. */ |
582 | if (!cpu->lg->pgdirs[next].pgdir) { | 670 | if (!cpu->lg->pgdirs[next].pgdir) { |
@@ -587,8 +675,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
587 | next = cpu->cpu_pgd; | 675 | next = cpu->cpu_pgd; |
588 | else { | 676 | else { |
589 | #ifdef CONFIG_X86_PAE | 677 | #ifdef CONFIG_X86_PAE |
590 | /* In PAE mode, allocate a pmd page and populate the | 678 | /* |
591 | * last pgd entry. */ | 679 | * In PAE mode, allocate a pmd page and populate the |
680 | * last pgd entry. | ||
681 | */ | ||
592 | pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); | 682 | pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); |
593 | if (!pmd_table) { | 683 | if (!pmd_table) { |
594 | free_page((long)cpu->lg->pgdirs[next].pgdir); | 684 | free_page((long)cpu->lg->pgdirs[next].pgdir); |
@@ -598,8 +688,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
598 | set_pgd(cpu->lg->pgdirs[next].pgdir + | 688 | set_pgd(cpu->lg->pgdirs[next].pgdir + |
599 | SWITCHER_PGD_INDEX, | 689 | SWITCHER_PGD_INDEX, |
600 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 690 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
601 | /* This is a blank page, so there are no kernel | 691 | /* |
602 | * mappings: caller must map the stack! */ | 692 | * This is a blank page, so there are no kernel |
693 | * mappings: caller must map the stack! | ||
694 | */ | ||
603 | *blank_pgdir = 1; | 695 | *blank_pgdir = 1; |
604 | } | 696 | } |
605 | #else | 697 | #else |
@@ -615,19 +707,23 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, | |||
615 | return next; | 707 | return next; |
616 | } | 708 | } |
617 | 709 | ||
618 | /*H:430 (iv) Switching page tables | 710 | /*H:430 |
711 | * (iv) Switching page tables | ||
619 | * | 712 | * |
620 | * Now we've seen all the page table setting and manipulation, let's see | 713 | * Now we've seen all the page table setting and manipulation, let's see |
621 | * what happens when the Guest changes page tables (ie. changes the top-level | 714 | * what happens when the Guest changes page tables (ie. changes the top-level |
622 | * pgdir). This occurs on almost every context switch. */ | 715 | * pgdir). This occurs on almost every context switch. |
716 | */ | ||
623 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | 717 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) |
624 | { | 718 | { |
625 | int newpgdir, repin = 0; | 719 | int newpgdir, repin = 0; |
626 | 720 | ||
627 | /* Look to see if we have this one already. */ | 721 | /* Look to see if we have this one already. */ |
628 | newpgdir = find_pgdir(cpu->lg, pgtable); | 722 | newpgdir = find_pgdir(cpu->lg, pgtable); |
629 | /* If not, we allocate or mug an existing one: if it's a fresh one, | 723 | /* |
630 | * repin gets set to 1. */ | 724 | * If not, we allocate or mug an existing one: if it's a fresh one, |
725 | * repin gets set to 1. | ||
726 | */ | ||
631 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) | 727 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) |
632 | newpgdir = new_pgdir(cpu, pgtable, &repin); | 728 | newpgdir = new_pgdir(cpu, pgtable, &repin); |
633 | /* Change the current pgd index to the new one. */ | 729 | /* Change the current pgd index to the new one. */ |
@@ -637,9 +733,11 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | |||
637 | pin_stack_pages(cpu); | 733 | pin_stack_pages(cpu); |
638 | } | 734 | } |
639 | 735 | ||
640 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all | 736 | /*H:470 |
737 | * Finally, a routine which throws away everything: all PGD entries in all | ||
641 | * the shadow page tables, including the Guest's kernel mappings. This is used | 738 | * the shadow page tables, including the Guest's kernel mappings. This is used |
642 | * when we destroy the Guest. */ | 739 | * when we destroy the Guest. |
740 | */ | ||
643 | static void release_all_pagetables(struct lguest *lg) | 741 | static void release_all_pagetables(struct lguest *lg) |
644 | { | 742 | { |
645 | unsigned int i, j; | 743 | unsigned int i, j; |
@@ -656,8 +754,10 @@ static void release_all_pagetables(struct lguest *lg) | |||
656 | spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; | 754 | spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; |
657 | pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | 755 | pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); |
658 | 756 | ||
659 | /* And release the pmd entries of that pmd page, | 757 | /* |
660 | * except for the switcher pmd. */ | 758 | * And release the pmd entries of that pmd page, |
759 | * except for the switcher pmd. | ||
760 | */ | ||
661 | for (k = 0; k < SWITCHER_PMD_INDEX; k++) | 761 | for (k = 0; k < SWITCHER_PMD_INDEX; k++) |
662 | release_pmd(&pmdpage[k]); | 762 | release_pmd(&pmdpage[k]); |
663 | #endif | 763 | #endif |
@@ -667,10 +767,12 @@ static void release_all_pagetables(struct lguest *lg) | |||
667 | } | 767 | } |
668 | } | 768 | } |
669 | 769 | ||
670 | /* We also throw away everything when a Guest tells us it's changed a kernel | 770 | /* |
771 | * We also throw away everything when a Guest tells us it's changed a kernel | ||
671 | * mapping. Since kernel mappings are in every page table, it's easiest to | 772 | * mapping. Since kernel mappings are in every page table, it's easiest to |
672 | * throw them all away. This traps the Guest in amber for a while as | 773 | * throw them all away. This traps the Guest in amber for a while as |
673 | * everything faults back in, but it's rare. */ | 774 | * everything faults back in, but it's rare. |
775 | */ | ||
674 | void guest_pagetable_clear_all(struct lg_cpu *cpu) | 776 | void guest_pagetable_clear_all(struct lg_cpu *cpu) |
675 | { | 777 | { |
676 | release_all_pagetables(cpu->lg); | 778 | release_all_pagetables(cpu->lg); |
@@ -678,15 +780,19 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu) | |||
678 | pin_stack_pages(cpu); | 780 | pin_stack_pages(cpu); |
679 | } | 781 | } |
680 | /*:*/ | 782 | /*:*/ |
681 | /*M:009 Since we throw away all mappings when a kernel mapping changes, our | 783 | |
784 | /*M:009 | ||
785 | * Since we throw away all mappings when a kernel mapping changes, our | ||
682 | * performance sucks for guests using highmem. In fact, a guest with | 786 | * performance sucks for guests using highmem. In fact, a guest with |
683 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | 787 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is |
684 | * usually slower than a Guest with less memory. | 788 | * usually slower than a Guest with less memory. |
685 | * | 789 | * |
686 | * This, of course, cannot be fixed. It would take some kind of... well, I | 790 | * This, of course, cannot be fixed. It would take some kind of... well, I |
687 | * don't know, but the term "puissant code-fu" comes to mind. :*/ | 791 | * don't know, but the term "puissant code-fu" comes to mind. |
792 | :*/ | ||
688 | 793 | ||
689 | /*H:420 This is the routine which actually sets the page table entry for then | 794 | /*H:420 |
795 | * This is the routine which actually sets the page table entry for then | ||
690 | * "idx"'th shadow page table. | 796 | * "idx"'th shadow page table. |
691 | * | 797 | * |
692 | * Normally, we can just throw out the old entry and replace it with 0: if they | 798 | * Normally, we can just throw out the old entry and replace it with 0: if they |
@@ -715,31 +821,36 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, | |||
715 | spmd = spmd_addr(cpu, *spgd, vaddr); | 821 | spmd = spmd_addr(cpu, *spgd, vaddr); |
716 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { | 822 | if (pmd_flags(*spmd) & _PAGE_PRESENT) { |
717 | #endif | 823 | #endif |
718 | /* Otherwise, we start by releasing | 824 | /* Otherwise, start by releasing the existing entry. */ |
719 | * the existing entry. */ | ||
720 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); | 825 | pte_t *spte = spte_addr(cpu, *spgd, vaddr); |
721 | release_pte(*spte); | 826 | release_pte(*spte); |
722 | 827 | ||
723 | /* If they're setting this entry as dirty or accessed, | 828 | /* |
724 | * we might as well put that entry they've given us | 829 | * If they're setting this entry as dirty or accessed, |
725 | * in now. This shaves 10% off a | 830 | * we might as well put that entry they've given us in |
726 | * copy-on-write micro-benchmark. */ | 831 | * now. This shaves 10% off a copy-on-write |
832 | * micro-benchmark. | ||
833 | */ | ||
727 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { | 834 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
728 | check_gpte(cpu, gpte); | 835 | check_gpte(cpu, gpte); |
729 | native_set_pte(spte, | 836 | native_set_pte(spte, |
730 | gpte_to_spte(cpu, gpte, | 837 | gpte_to_spte(cpu, gpte, |
731 | pte_flags(gpte) & _PAGE_DIRTY)); | 838 | pte_flags(gpte) & _PAGE_DIRTY)); |
732 | } else | 839 | } else { |
733 | /* Otherwise kill it and we can demand_page() | 840 | /* |
734 | * it in later. */ | 841 | * Otherwise kill it and we can demand_page() |
842 | * it in later. | ||
843 | */ | ||
735 | native_set_pte(spte, __pte(0)); | 844 | native_set_pte(spte, __pte(0)); |
845 | } | ||
736 | #ifdef CONFIG_X86_PAE | 846 | #ifdef CONFIG_X86_PAE |
737 | } | 847 | } |
738 | #endif | 848 | #endif |
739 | } | 849 | } |
740 | } | 850 | } |
741 | 851 | ||
742 | /*H:410 Updating a PTE entry is a little trickier. | 852 | /*H:410 |
853 | * Updating a PTE entry is a little trickier. | ||
743 | * | 854 | * |
744 | * We keep track of several different page tables (the Guest uses one for each | 855 | * We keep track of several different page tables (the Guest uses one for each |
745 | * process, so it makes sense to cache at least a few). Each of these have | 856 | * process, so it makes sense to cache at least a few). Each of these have |
@@ -748,12 +859,15 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, | |||
748 | * all the page tables, not just the current one. This is rare. | 859 | * all the page tables, not just the current one. This is rare. |
749 | * | 860 | * |
750 | * The benefit is that when we have to track a new page table, we can keep all | 861 | * The benefit is that when we have to track a new page table, we can keep all |
751 | * the kernel mappings. This speeds up context switch immensely. */ | 862 | * the kernel mappings. This speeds up context switch immensely. |
863 | */ | ||
752 | void guest_set_pte(struct lg_cpu *cpu, | 864 | void guest_set_pte(struct lg_cpu *cpu, |
753 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) | 865 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) |
754 | { | 866 | { |
755 | /* Kernel mappings must be changed on all top levels. Slow, but doesn't | 867 | /* |
756 | * happen often. */ | 868 | * Kernel mappings must be changed on all top levels. Slow, but doesn't |
869 | * happen often. | ||
870 | */ | ||
757 | if (vaddr >= cpu->lg->kernel_address) { | 871 | if (vaddr >= cpu->lg->kernel_address) { |
758 | unsigned int i; | 872 | unsigned int i; |
759 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) | 873 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) |
@@ -795,19 +909,25 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |||
795 | /* ... throw it away. */ | 909 | /* ... throw it away. */ |
796 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); | 910 | release_pgd(lg->pgdirs[pgdir].pgdir + idx); |
797 | } | 911 | } |
912 | |||
798 | #ifdef CONFIG_X86_PAE | 913 | #ifdef CONFIG_X86_PAE |
914 | /* For setting a mid-level, we just throw everything away. It's easy. */ | ||
799 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) | 915 | void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) |
800 | { | 916 | { |
801 | guest_pagetable_clear_all(&lg->cpus[0]); | 917 | guest_pagetable_clear_all(&lg->cpus[0]); |
802 | } | 918 | } |
803 | #endif | 919 | #endif |
804 | 920 | ||
805 | /* Once we know how much memory we have we can construct simple identity | 921 | /*H:505 |
806 | * (which set virtual == physical) and linear mappings | 922 | * To get through boot, we construct simple identity page mappings (which |
807 | * which will get the Guest far enough into the boot to create its own. | 923 | * set virtual == physical) and linear mappings which will get the Guest far |
924 | * enough into the boot to create its own. The linear mapping means we | ||
925 | * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET, | ||
926 | * as you'll see. | ||
808 | * | 927 | * |
809 | * We lay them out of the way, just below the initrd (which is why we need to | 928 | * We lay them out of the way, just below the initrd (which is why we need to |
810 | * know its size here). */ | 929 | * know its size here). |
930 | */ | ||
811 | static unsigned long setup_pagetables(struct lguest *lg, | 931 | static unsigned long setup_pagetables(struct lguest *lg, |
812 | unsigned long mem, | 932 | unsigned long mem, |
813 | unsigned long initrd_size) | 933 | unsigned long initrd_size) |
@@ -825,8 +945,10 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
825 | unsigned int phys_linear; | 945 | unsigned int phys_linear; |
826 | #endif | 946 | #endif |
827 | 947 | ||
828 | /* We have mapped_pages frames to map, so we need | 948 | /* |
829 | * linear_pages page tables to map them. */ | 949 | * We have mapped_pages frames to map, so we need linear_pages page |
950 | * tables to map them. | ||
951 | */ | ||
830 | mapped_pages = mem / PAGE_SIZE; | 952 | mapped_pages = mem / PAGE_SIZE; |
831 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; | 953 | linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; |
832 | 954 | ||
@@ -837,10 +959,16 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
837 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; | 959 | linear = (void *)pgdir - linear_pages * PAGE_SIZE; |
838 | 960 | ||
839 | #ifdef CONFIG_X86_PAE | 961 | #ifdef CONFIG_X86_PAE |
962 | /* | ||
963 | * And the single mid page goes below that. We only use one, but | ||
964 | * that's enough to map 1G, which definitely gets us through boot. | ||
965 | */ | ||
840 | pmds = (void *)linear - PAGE_SIZE; | 966 | pmds = (void *)linear - PAGE_SIZE; |
841 | #endif | 967 | #endif |
842 | /* Linear mapping is easy: put every page's address into the | 968 | /* |
843 | * mapping in order. */ | 969 | * Linear mapping is easy: put every page's address into the |
970 | * mapping in order. | ||
971 | */ | ||
844 | for (i = 0; i < mapped_pages; i++) { | 972 | for (i = 0; i < mapped_pages; i++) { |
845 | pte_t pte; | 973 | pte_t pte; |
846 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); | 974 | pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); |
@@ -848,11 +976,14 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
848 | return -EFAULT; | 976 | return -EFAULT; |
849 | } | 977 | } |
850 | 978 | ||
851 | /* The top level points to the linear page table pages above. | ||
852 | * We setup the identity and linear mappings here. */ | ||
853 | #ifdef CONFIG_X86_PAE | 979 | #ifdef CONFIG_X86_PAE |
980 | /* | ||
981 | * Make the Guest PMD entries point to the corresponding place in the | ||
982 | * linear mapping (up to one page worth of PMD). | ||
983 | */ | ||
854 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; | 984 | for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; |
855 | i += PTRS_PER_PTE, j++) { | 985 | i += PTRS_PER_PTE, j++) { |
986 | /* FIXME: native_set_pmd is overkill here. */ | ||
856 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) | 987 | native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) |
857 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | 988 | - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); |
858 | 989 | ||
@@ -860,18 +991,36 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
860 | return -EFAULT; | 991 | return -EFAULT; |
861 | } | 992 | } |
862 | 993 | ||
994 | /* One PGD entry, pointing to that PMD page. */ | ||
863 | set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); | 995 | set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); |
996 | /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ | ||
864 | if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) | 997 | if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) |
865 | return -EFAULT; | 998 | return -EFAULT; |
999 | /* | ||
1000 | * And the third PGD entry (ie. addresses 3G-4G). | ||
1001 | * | ||
1002 | * FIXME: This assumes that PAGE_OFFSET for the Guest is 0xC0000000. | ||
1003 | */ | ||
866 | if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) | 1004 | if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) |
867 | return -EFAULT; | 1005 | return -EFAULT; |
868 | #else | 1006 | #else |
1007 | /* | ||
1008 | * The top level points to the linear page table pages above. | ||
1009 | * We setup the identity and linear mappings here. | ||
1010 | */ | ||
869 | phys_linear = (unsigned long)linear - mem_base; | 1011 | phys_linear = (unsigned long)linear - mem_base; |
870 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { | 1012 | for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { |
871 | pgd_t pgd; | 1013 | pgd_t pgd; |
1014 | /* | ||
1015 | * Create a PGD entry which points to the right part of the | ||
1016 | * linear PTE pages. | ||
1017 | */ | ||
872 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | | 1018 | pgd = __pgd((phys_linear + i * sizeof(pte_t)) | |
873 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); | 1019 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); |
874 | 1020 | ||
1021 | /* | ||
1022 | * Copy it into the PGD page at 0 and PAGE_OFFSET. | ||
1023 | */ | ||
875 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) | 1024 | if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) |
876 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) | 1025 | || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) |
877 | + i / PTRS_PER_PTE], | 1026 | + i / PTRS_PER_PTE], |
@@ -880,15 +1029,19 @@ static unsigned long setup_pagetables(struct lguest *lg, | |||
880 | } | 1029 | } |
881 | #endif | 1030 | #endif |
882 | 1031 | ||
883 | /* We return the top level (guest-physical) address: remember where | 1032 | /* |
884 | * this is. */ | 1033 | * We return the top level (guest-physical) address: we remember where |
1034 | * this is to write it into lguest_data when the Guest initializes. | ||
1035 | */ | ||
885 | return (unsigned long)pgdir - mem_base; | 1036 | return (unsigned long)pgdir - mem_base; |
886 | } | 1037 | } |
887 | 1038 | ||
888 | /*H:500 (vii) Setting up the page tables initially. | 1039 | /*H:500 |
1040 | * (vii) Setting up the page tables initially. | ||
889 | * | 1041 | * |
890 | * When a Guest is first created, the Launcher tells us where the toplevel of | 1042 | * When a Guest is first created, the Launcher tells us where the toplevel of |
891 | * its first page table is. We set some things up here: */ | 1043 | * its first page table is. We set some things up here: |
1044 | */ | ||
892 | int init_guest_pagetable(struct lguest *lg) | 1045 | int init_guest_pagetable(struct lguest *lg) |
893 | { | 1046 | { |
894 | u64 mem; | 1047 | u64 mem; |
@@ -898,21 +1051,27 @@ int init_guest_pagetable(struct lguest *lg) | |||
898 | pgd_t *pgd; | 1051 | pgd_t *pgd; |
899 | pmd_t *pmd_table; | 1052 | pmd_t *pmd_table; |
900 | #endif | 1053 | #endif |
901 | /* Get the Guest memory size and the ramdisk size from the boot header | 1054 | /* |
902 | * located at lg->mem_base (Guest address 0). */ | 1055 | * Get the Guest memory size and the ramdisk size from the boot header |
1056 | * located at lg->mem_base (Guest address 0). | ||
1057 | */ | ||
903 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) | 1058 | if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) |
904 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) | 1059 | || get_user(initrd_size, &boot->hdr.ramdisk_size)) |
905 | return -EFAULT; | 1060 | return -EFAULT; |
906 | 1061 | ||
907 | /* We start on the first shadow page table, and give it a blank PGD | 1062 | /* |
908 | * page. */ | 1063 | * We start on the first shadow page table, and give it a blank PGD |
1064 | * page. | ||
1065 | */ | ||
909 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); | 1066 | lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); |
910 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) | 1067 | if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) |
911 | return lg->pgdirs[0].gpgdir; | 1068 | return lg->pgdirs[0].gpgdir; |
912 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); | 1069 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); |
913 | if (!lg->pgdirs[0].pgdir) | 1070 | if (!lg->pgdirs[0].pgdir) |
914 | return -ENOMEM; | 1071 | return -ENOMEM; |
1072 | |||
915 | #ifdef CONFIG_X86_PAE | 1073 | #ifdef CONFIG_X86_PAE |
1074 | /* For PAE, we also create the initial mid-level. */ | ||
916 | pgd = lg->pgdirs[0].pgdir; | 1075 | pgd = lg->pgdirs[0].pgdir; |
917 | pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); | 1076 | pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); |
918 | if (!pmd_table) | 1077 | if (!pmd_table) |
@@ -921,27 +1080,33 @@ int init_guest_pagetable(struct lguest *lg) | |||
921 | set_pgd(pgd + SWITCHER_PGD_INDEX, | 1080 | set_pgd(pgd + SWITCHER_PGD_INDEX, |
922 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 1081 | __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
923 | #endif | 1082 | #endif |
1083 | |||
1084 | /* This is the current page table. */ | ||
924 | lg->cpus[0].cpu_pgd = 0; | 1085 | lg->cpus[0].cpu_pgd = 0; |
925 | return 0; | 1086 | return 0; |
926 | } | 1087 | } |
927 | 1088 | ||
928 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ | 1089 | /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ |
929 | void page_table_guest_data_init(struct lg_cpu *cpu) | 1090 | void page_table_guest_data_init(struct lg_cpu *cpu) |
930 | { | 1091 | { |
931 | /* We get the kernel address: above this is all kernel memory. */ | 1092 | /* We get the kernel address: above this is all kernel memory. */ |
932 | if (get_user(cpu->lg->kernel_address, | 1093 | if (get_user(cpu->lg->kernel_address, |
933 | &cpu->lg->lguest_data->kernel_address) | 1094 | &cpu->lg->lguest_data->kernel_address) |
934 | /* We tell the Guest that it can't use the top 2 or 4 MB | 1095 | /* |
935 | * of virtual addresses used by the Switcher. */ | 1096 | * We tell the Guest that it can't use the top 2 or 4 MB |
1097 | * of virtual addresses used by the Switcher. | ||
1098 | */ | ||
936 | || put_user(RESERVE_MEM * 1024 * 1024, | 1099 | || put_user(RESERVE_MEM * 1024 * 1024, |
937 | &cpu->lg->lguest_data->reserve_mem) | 1100 | &cpu->lg->lguest_data->reserve_mem) |
938 | || put_user(cpu->lg->pgdirs[0].gpgdir, | 1101 | || put_user(cpu->lg->pgdirs[0].gpgdir, |
939 | &cpu->lg->lguest_data->pgdir)) | 1102 | &cpu->lg->lguest_data->pgdir)) |
940 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | 1103 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); |
941 | 1104 | ||
942 | /* In flush_user_mappings() we loop from 0 to | 1105 | /* |
1106 | * In flush_user_mappings() we loop from 0 to | ||
943 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the | 1107 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the |
944 | * Switcher mappings, so check that now. */ | 1108 | * Switcher mappings, so check that now. |
1109 | */ | ||
945 | #ifdef CONFIG_X86_PAE | 1110 | #ifdef CONFIG_X86_PAE |
946 | if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && | 1111 | if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && |
947 | pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) | 1112 | pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) |
@@ -964,12 +1129,14 @@ void free_guest_pagetable(struct lguest *lg) | |||
964 | free_page((long)lg->pgdirs[i].pgdir); | 1129 | free_page((long)lg->pgdirs[i].pgdir); |
965 | } | 1130 | } |
966 | 1131 | ||
967 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. | 1132 | /*H:480 |
1133 | * (vi) Mapping the Switcher when the Guest is about to run. | ||
968 | * | 1134 | * |
969 | * The Switcher and the two pages for this CPU need to be visible in the | 1135 | * The Switcher and the two pages for this CPU need to be visible in the |
970 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages | 1136 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
971 | * for each CPU already set up, we just need to hook them in now we know which | 1137 | * for each CPU already set up, we just need to hook them in now we know which |
972 | * Guest is about to run on this CPU. */ | 1138 | * Guest is about to run on this CPU. |
1139 | */ | ||
973 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) | 1140 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) |
974 | { | 1141 | { |
975 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | 1142 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
@@ -980,30 +1147,38 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
980 | pmd_t switcher_pmd; | 1147 | pmd_t switcher_pmd; |
981 | pmd_t *pmd_table; | 1148 | pmd_t *pmd_table; |
982 | 1149 | ||
1150 | /* FIXME: native_set_pmd is overkill here. */ | ||
983 | native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> | 1151 | native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> |
984 | PAGE_SHIFT, PAGE_KERNEL_EXEC)); | 1152 | PAGE_SHIFT, PAGE_KERNEL_EXEC)); |
985 | 1153 | ||
1154 | /* Figure out where the pmd page is, by reading the PGD, and converting | ||
1155 | * it to a virtual address. */ | ||
986 | pmd_table = __va(pgd_pfn(cpu->lg-> | 1156 | pmd_table = __va(pgd_pfn(cpu->lg-> |
987 | pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) | 1157 | pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) |
988 | << PAGE_SHIFT); | 1158 | << PAGE_SHIFT); |
1159 | /* Now write it into the shadow page table. */ | ||
989 | native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); | 1160 | native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); |
990 | #else | 1161 | #else |
991 | pgd_t switcher_pgd; | 1162 | pgd_t switcher_pgd; |
992 | 1163 | ||
993 | /* Make the last PGD entry for this Guest point to the Switcher's PTE | 1164 | /* |
994 | * page for this CPU (with appropriate flags). */ | 1165 | * Make the last PGD entry for this Guest point to the Switcher's PTE |
1166 | * page for this CPU (with appropriate flags). | ||
1167 | */ | ||
995 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); | 1168 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); |
996 | 1169 | ||
997 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; | 1170 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; |
998 | 1171 | ||
999 | #endif | 1172 | #endif |
1000 | /* We also change the Switcher PTE page. When we're running the Guest, | 1173 | /* |
1174 | * We also change the Switcher PTE page. When we're running the Guest, | ||
1001 | * we want the Guest's "regs" page to appear where the first Switcher | 1175 | * we want the Guest's "regs" page to appear where the first Switcher |
1002 | * page for this CPU is. This is an optimization: when the Switcher | 1176 | * page for this CPU is. This is an optimization: when the Switcher |
1003 | * saves the Guest registers, it saves them into the first page of this | 1177 | * saves the Guest registers, it saves them into the first page of this |
1004 | * CPU's "struct lguest_pages": if we make sure the Guest's register | 1178 | * CPU's "struct lguest_pages": if we make sure the Guest's register |
1005 | * page is already mapped there, we don't have to copy them out | 1179 | * page is already mapped there, we don't have to copy them out |
1006 | * again. */ | 1180 | * again. |
1181 | */ | ||
1007 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; | 1182 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; |
1008 | native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); | 1183 | native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); |
1009 | native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], | 1184 | native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], |
@@ -1019,10 +1194,12 @@ static void free_switcher_pte_pages(void) | |||
1019 | free_page((long)switcher_pte_page(i)); | 1194 | free_page((long)switcher_pte_page(i)); |
1020 | } | 1195 | } |
1021 | 1196 | ||
1022 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given | 1197 | /*H:520 |
1198 | * Setting up the Switcher PTE page for given CPU is fairly easy, given | ||
1023 | * the CPU number and the "struct page"s for the Switcher code itself. | 1199 | * the CPU number and the "struct page"s for the Switcher code itself. |
1024 | * | 1200 | * |
1025 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ | 1201 | * Currently the Switcher is less than a page long, so "pages" is always 1. |
1202 | */ | ||
1026 | static __init void populate_switcher_pte_page(unsigned int cpu, | 1203 | static __init void populate_switcher_pte_page(unsigned int cpu, |
1027 | struct page *switcher_page[], | 1204 | struct page *switcher_page[], |
1028 | unsigned int pages) | 1205 | unsigned int pages) |
@@ -1043,13 +1220,16 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
1043 | native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), | 1220 | native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), |
1044 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); | 1221 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); |
1045 | 1222 | ||
1046 | /* The second page contains the "struct lguest_ro_state", and is | 1223 | /* |
1047 | * read-only. */ | 1224 | * The second page contains the "struct lguest_ro_state", and is |
1225 | * read-only. | ||
1226 | */ | ||
1048 | native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), | 1227 | native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), |
1049 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | 1228 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); |
1050 | } | 1229 | } |
1051 | 1230 | ||
1052 | /* We've made it through the page table code. Perhaps our tired brains are | 1231 | /* |
1232 | * We've made it through the page table code. Perhaps our tired brains are | ||
1053 | * still processing the details, or perhaps we're simply glad it's over. | 1233 | * still processing the details, or perhaps we're simply glad it's over. |
1054 | * | 1234 | * |
1055 | * If nothing else, note that all this complexity in juggling shadow page tables | 1235 | * If nothing else, note that all this complexity in juggling shadow page tables |
@@ -1058,10 +1238,13 @@ static __init void populate_switcher_pte_page(unsigned int cpu, | |||
1058 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD | 1238 | * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD |
1059 | * have implemented shadow page table support directly into hardware. | 1239 | * have implemented shadow page table support directly into hardware. |
1060 | * | 1240 | * |
1061 | * There is just one file remaining in the Host. */ | 1241 | * There is just one file remaining in the Host. |
1242 | */ | ||
1062 | 1243 | ||
1063 | /*H:510 At boot or module load time, init_pagetables() allocates and populates | 1244 | /*H:510 |
1064 | * the Switcher PTE page for each CPU. */ | 1245 | * At boot or module load time, init_pagetables() allocates and populates |
1246 | * the Switcher PTE page for each CPU. | ||
1247 | */ | ||
1065 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) | 1248 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
1066 | { | 1249 | { |
1067 | unsigned int i; | 1250 | unsigned int i; |
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 482ed5a18750..951c57b0a7e0 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /*P:600 The x86 architecture has segments, which involve a table of descriptors | 1 | /*P:600 |
2 | * The x86 architecture has segments, which involve a table of descriptors | ||
2 | * which can be used to do funky things with virtual address interpretation. | 3 | * which can be used to do funky things with virtual address interpretation. |
3 | * We originally used to use segments so the Guest couldn't alter the | 4 | * We originally used to use segments so the Guest couldn't alter the |
4 | * Guest<->Host Switcher, and then we had to trim Guest segments, and restore | 5 | * Guest<->Host Switcher, and then we had to trim Guest segments, and restore |
@@ -8,7 +9,8 @@ | |||
8 | * | 9 | * |
9 | * In these modern times, the segment handling code consists of simple sanity | 10 | * In these modern times, the segment handling code consists of simple sanity |
10 | * checks, and the worst you'll experience reading this code is butterfly-rash | 11 | * checks, and the worst you'll experience reading this code is butterfly-rash |
11 | * from frolicking through its parklike serenity. :*/ | 12 | * from frolicking through its parklike serenity. |
13 | :*/ | ||
12 | #include "lg.h" | 14 | #include "lg.h" |
13 | 15 | ||
14 | /*H:600 | 16 | /*H:600 |
@@ -41,10 +43,12 @@ | |||
41 | * begin. | 43 | * begin. |
42 | */ | 44 | */ |
43 | 45 | ||
44 | /* There are several entries we don't let the Guest set. The TSS entry is the | 46 | /* |
47 | * There are several entries we don't let the Guest set. The TSS entry is the | ||
45 | * "Task State Segment" which controls all kinds of delicate things. The | 48 | * "Task State Segment" which controls all kinds of delicate things. The |
46 | * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the | 49 | * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the |
47 | * the Guest can't be trusted to deal with double faults. */ | 50 | * the Guest can't be trusted to deal with double faults. |
51 | */ | ||
48 | static bool ignored_gdt(unsigned int num) | 52 | static bool ignored_gdt(unsigned int num) |
49 | { | 53 | { |
50 | return (num == GDT_ENTRY_TSS | 54 | return (num == GDT_ENTRY_TSS |
@@ -53,42 +57,52 @@ static bool ignored_gdt(unsigned int num) | |||
53 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); | 57 | || num == GDT_ENTRY_DOUBLEFAULT_TSS); |
54 | } | 58 | } |
55 | 59 | ||
56 | /*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We | 60 | /*H:630 |
61 | * Once the Guest gave us new GDT entries, we fix them up a little. We | ||
57 | * don't care if they're invalid: the worst that can happen is a General | 62 | * don't care if they're invalid: the worst that can happen is a General |
58 | * Protection Fault in the Switcher when it restores a Guest segment register | 63 | * Protection Fault in the Switcher when it restores a Guest segment register |
59 | * which tries to use that entry. Then we kill the Guest for causing such a | 64 | * which tries to use that entry. Then we kill the Guest for causing such a |
60 | * mess: the message will be "unhandled trap 256". */ | 65 | * mess: the message will be "unhandled trap 256". |
66 | */ | ||
61 | static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) | 67 | static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) |
62 | { | 68 | { |
63 | unsigned int i; | 69 | unsigned int i; |
64 | 70 | ||
65 | for (i = start; i < end; i++) { | 71 | for (i = start; i < end; i++) { |
66 | /* We never copy these ones to real GDT, so we don't care what | 72 | /* |
67 | * they say */ | 73 | * We never copy these ones to real GDT, so we don't care what |
74 | * they say | ||
75 | */ | ||
68 | if (ignored_gdt(i)) | 76 | if (ignored_gdt(i)) |
69 | continue; | 77 | continue; |
70 | 78 | ||
71 | /* Segment descriptors contain a privilege level: the Guest is | 79 | /* |
80 | * Segment descriptors contain a privilege level: the Guest is | ||
72 | * sometimes careless and leaves this as 0, even though it's | 81 | * sometimes careless and leaves this as 0, even though it's |
73 | * running at privilege level 1. If so, we fix it here. */ | 82 | * running at privilege level 1. If so, we fix it here. |
83 | */ | ||
74 | if ((cpu->arch.gdt[i].b & 0x00006000) == 0) | 84 | if ((cpu->arch.gdt[i].b & 0x00006000) == 0) |
75 | cpu->arch.gdt[i].b |= (GUEST_PL << 13); | 85 | cpu->arch.gdt[i].b |= (GUEST_PL << 13); |
76 | 86 | ||
77 | /* Each descriptor has an "accessed" bit. If we don't set it | 87 | /* |
88 | * Each descriptor has an "accessed" bit. If we don't set it | ||
78 | * now, the CPU will try to set it when the Guest first loads | 89 | * now, the CPU will try to set it when the Guest first loads |
79 | * that entry into a segment register. But the GDT isn't | 90 | * that entry into a segment register. But the GDT isn't |
80 | * writable by the Guest, so bad things can happen. */ | 91 | * writable by the Guest, so bad things can happen. |
92 | */ | ||
81 | cpu->arch.gdt[i].b |= 0x00000100; | 93 | cpu->arch.gdt[i].b |= 0x00000100; |
82 | } | 94 | } |
83 | } | 95 | } |
84 | 96 | ||
85 | /*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep | 97 | /*H:610 |
98 | * Like the IDT, we never simply use the GDT the Guest gives us. We keep | ||
86 | * a GDT for each CPU, and copy across the Guest's entries each time we want to | 99 | * a GDT for each CPU, and copy across the Guest's entries each time we want to |
87 | * run the Guest on that CPU. | 100 | * run the Guest on that CPU. |
88 | * | 101 | * |
89 | * This routine is called at boot or modprobe time for each CPU to set up the | 102 | * This routine is called at boot or modprobe time for each CPU to set up the |
90 | * constant GDT entries: the ones which are the same no matter what Guest we're | 103 | * constant GDT entries: the ones which are the same no matter what Guest we're |
91 | * running. */ | 104 | * running. |
105 | */ | ||
92 | void setup_default_gdt_entries(struct lguest_ro_state *state) | 106 | void setup_default_gdt_entries(struct lguest_ro_state *state) |
93 | { | 107 | { |
94 | struct desc_struct *gdt = state->guest_gdt; | 108 | struct desc_struct *gdt = state->guest_gdt; |
@@ -98,30 +112,37 @@ void setup_default_gdt_entries(struct lguest_ro_state *state) | |||
98 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | 112 | gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
99 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | 113 | gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; |
100 | 114 | ||
101 | /* The TSS segment refers to the TSS entry for this particular CPU. | 115 | /* |
116 | * The TSS segment refers to the TSS entry for this particular CPU. | ||
102 | * Forgive the magic flags: the 0x8900 means the entry is Present, it's | 117 | * Forgive the magic flags: the 0x8900 means the entry is Present, it's |
103 | * privilege level 0 Available 386 TSS system segment, and the 0x67 | 118 | * privilege level 0 Available 386 TSS system segment, and the 0x67 |
104 | * means Saturn is eclipsed by Mercury in the twelfth house. */ | 119 | * means Saturn is eclipsed by Mercury in the twelfth house. |
120 | */ | ||
105 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); | 121 | gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); |
106 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | 122 | gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) |
107 | | ((tss >> 16) & 0x000000FF); | 123 | | ((tss >> 16) & 0x000000FF); |
108 | } | 124 | } |
109 | 125 | ||
110 | /* This routine sets up the initial Guest GDT for booting. All entries start | 126 | /* |
111 | * as 0 (unusable). */ | 127 | * This routine sets up the initial Guest GDT for booting. All entries start |
128 | * as 0 (unusable). | ||
129 | */ | ||
112 | void setup_guest_gdt(struct lg_cpu *cpu) | 130 | void setup_guest_gdt(struct lg_cpu *cpu) |
113 | { | 131 | { |
114 | /* Start with full 0-4G segments... */ | 132 | /* |
133 | * Start with full 0-4G segments...except the Guest is allowed to use | ||
134 | * them, so set the privilege level appropriately in the flags. | ||
135 | */ | ||
115 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; | 136 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; |
116 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; | 137 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; |
117 | /* ...except the Guest is allowed to use them, so set the privilege | ||
118 | * level appropriately in the flags. */ | ||
119 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); | 138 | cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); |
120 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); | 139 | cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); |
121 | } | 140 | } |
122 | 141 | ||
123 | /*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" | 142 | /*H:650 |
124 | * entries. */ | 143 | * An optimization of copy_gdt(), for just the three "thead-local storage" |
144 | * entries. | ||
145 | */ | ||
125 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) | 146 | void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) |
126 | { | 147 | { |
127 | unsigned int i; | 148 | unsigned int i; |
@@ -130,26 +151,34 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) | |||
130 | gdt[i] = cpu->arch.gdt[i]; | 151 | gdt[i] = cpu->arch.gdt[i]; |
131 | } | 152 | } |
132 | 153 | ||
133 | /*H:640 When the Guest is run on a different CPU, or the GDT entries have | 154 | /*H:640 |
134 | * changed, copy_gdt() is called to copy the Guest's GDT entries across to this | 155 | * When the Guest is run on a different CPU, or the GDT entries have changed, |
135 | * CPU's GDT. */ | 156 | * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's |
157 | * GDT. | ||
158 | */ | ||
136 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) | 159 | void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) |
137 | { | 160 | { |
138 | unsigned int i; | 161 | unsigned int i; |
139 | 162 | ||
140 | /* The default entries from setup_default_gdt_entries() are not | 163 | /* |
141 | * replaced. See ignored_gdt() above. */ | 164 | * The default entries from setup_default_gdt_entries() are not |
165 | * replaced. See ignored_gdt() above. | ||
166 | */ | ||
142 | for (i = 0; i < GDT_ENTRIES; i++) | 167 | for (i = 0; i < GDT_ENTRIES; i++) |
143 | if (!ignored_gdt(i)) | 168 | if (!ignored_gdt(i)) |
144 | gdt[i] = cpu->arch.gdt[i]; | 169 | gdt[i] = cpu->arch.gdt[i]; |
145 | } | 170 | } |
146 | 171 | ||
147 | /*H:620 This is where the Guest asks us to load a new GDT entry | 172 | /*H:620 |
148 | * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ | 173 | * This is where the Guest asks us to load a new GDT entry |
174 | * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. | ||
175 | */ | ||
149 | void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | 176 | void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) |
150 | { | 177 | { |
151 | /* We assume the Guest has the same number of GDT entries as the | 178 | /* |
152 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ | 179 | * We assume the Guest has the same number of GDT entries as the |
180 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. | ||
181 | */ | ||
153 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) | 182 | if (num >= ARRAY_SIZE(cpu->arch.gdt)) |
154 | kill_guest(cpu, "too many gdt entries %i", num); | 183 | kill_guest(cpu, "too many gdt entries %i", num); |
155 | 184 | ||
@@ -157,15 +186,19 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | |||
157 | cpu->arch.gdt[num].a = lo; | 186 | cpu->arch.gdt[num].a = lo; |
158 | cpu->arch.gdt[num].b = hi; | 187 | cpu->arch.gdt[num].b = hi; |
159 | fixup_gdt_table(cpu, num, num+1); | 188 | fixup_gdt_table(cpu, num, num+1); |
160 | /* Mark that the GDT changed so the core knows it has to copy it again, | 189 | /* |
161 | * even if the Guest is run on the same CPU. */ | 190 | * Mark that the GDT changed so the core knows it has to copy it again, |
191 | * even if the Guest is run on the same CPU. | ||
192 | */ | ||
162 | cpu->changed |= CHANGED_GDT; | 193 | cpu->changed |= CHANGED_GDT; |
163 | } | 194 | } |
164 | 195 | ||
165 | /* This is the fast-track version for just changing the three TLS entries. | 196 | /* |
197 | * This is the fast-track version for just changing the three TLS entries. | ||
166 | * Remember that this happens on every context switch, so it's worth | 198 | * Remember that this happens on every context switch, so it's worth |
167 | * optimizing. But wouldn't it be neater to have a single hypercall to cover | 199 | * optimizing. But wouldn't it be neater to have a single hypercall to cover |
168 | * both cases? */ | 200 | * both cases? |
201 | */ | ||
169 | void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) | 202 | void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) |
170 | { | 203 | { |
171 | struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; | 204 | struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; |
@@ -175,7 +208,6 @@ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) | |||
175 | /* Note that just the TLS entries have changed. */ | 208 | /* Note that just the TLS entries have changed. */ |
176 | cpu->changed |= CHANGED_GDT_TLS; | 209 | cpu->changed |= CHANGED_GDT_TLS; |
177 | } | 210 | } |
178 | /*:*/ | ||
179 | 211 | ||
180 | /*H:660 | 212 | /*H:660 |
181 | * With this, we have finished the Host. | 213 | * With this, we have finished the Host. |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index eaf722fe309a..6ae388849a3b 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -17,13 +17,15 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ | 19 | */ |
20 | /*P:450 This file contains the x86-specific lguest code. It used to be all | 20 | /*P:450 |
21 | * This file contains the x86-specific lguest code. It used to be all | ||
21 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers | 22 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers |
22 | * wrestled most of the dependencies out to here in preparation for porting | 23 | * wrestled most of the dependencies out to here in preparation for porting |
23 | * lguest to other architectures (see what I mean by foolhardy?). | 24 | * lguest to other architectures (see what I mean by foolhardy?). |
24 | * | 25 | * |
25 | * This also contains a couple of non-obvious setup and teardown pieces which | 26 | * This also contains a couple of non-obvious setup and teardown pieces which |
26 | * were implemented after days of debugging pain. :*/ | 27 | * were implemented after days of debugging pain. |
28 | :*/ | ||
27 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
28 | #include <linux/start_kernel.h> | 30 | #include <linux/start_kernel.h> |
29 | #include <linux/string.h> | 31 | #include <linux/string.h> |
@@ -82,25 +84,33 @@ static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); | |||
82 | */ | 84 | */ |
83 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) | 85 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) |
84 | { | 86 | { |
85 | /* Copying all this data can be quite expensive. We usually run the | 87 | /* |
88 | * Copying all this data can be quite expensive. We usually run the | ||
86 | * same Guest we ran last time (and that Guest hasn't run anywhere else | 89 | * same Guest we ran last time (and that Guest hasn't run anywhere else |
87 | * meanwhile). If that's not the case, we pretend everything in the | 90 | * meanwhile). If that's not the case, we pretend everything in the |
88 | * Guest has changed. */ | 91 | * Guest has changed. |
92 | */ | ||
89 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { | 93 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { |
90 | __get_cpu_var(last_cpu) = cpu; | 94 | __get_cpu_var(last_cpu) = cpu; |
91 | cpu->last_pages = pages; | 95 | cpu->last_pages = pages; |
92 | cpu->changed = CHANGED_ALL; | 96 | cpu->changed = CHANGED_ALL; |
93 | } | 97 | } |
94 | 98 | ||
95 | /* These copies are pretty cheap, so we do them unconditionally: */ | 99 | /* |
96 | /* Save the current Host top-level page directory. */ | 100 | * These copies are pretty cheap, so we do them unconditionally: */ |
101 | /* Save the current Host top-level page directory. | ||
102 | */ | ||
97 | pages->state.host_cr3 = __pa(current->mm->pgd); | 103 | pages->state.host_cr3 = __pa(current->mm->pgd); |
98 | /* Set up the Guest's page tables to see this CPU's pages (and no | 104 | /* |
99 | * other CPU's pages). */ | 105 | * Set up the Guest's page tables to see this CPU's pages (and no |
106 | * other CPU's pages). | ||
107 | */ | ||
100 | map_switcher_in_guest(cpu, pages); | 108 | map_switcher_in_guest(cpu, pages); |
101 | /* Set up the two "TSS" members which tell the CPU what stack to use | 109 | /* |
110 | * Set up the two "TSS" members which tell the CPU what stack to use | ||
102 | * for traps which do directly into the Guest (ie. traps at privilege | 111 | * for traps which do directly into the Guest (ie. traps at privilege |
103 | * level 1). */ | 112 | * level 1). |
113 | */ | ||
104 | pages->state.guest_tss.sp1 = cpu->esp1; | 114 | pages->state.guest_tss.sp1 = cpu->esp1; |
105 | pages->state.guest_tss.ss1 = cpu->ss1; | 115 | pages->state.guest_tss.ss1 = cpu->ss1; |
106 | 116 | ||
@@ -125,97 +135,126 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
125 | /* This is a dummy value we need for GCC's sake. */ | 135 | /* This is a dummy value we need for GCC's sake. */ |
126 | unsigned int clobber; | 136 | unsigned int clobber; |
127 | 137 | ||
128 | /* Copy the guest-specific information into this CPU's "struct | 138 | /* |
129 | * lguest_pages". */ | 139 | * Copy the guest-specific information into this CPU's "struct |
140 | * lguest_pages". | ||
141 | */ | ||
130 | copy_in_guest_info(cpu, pages); | 142 | copy_in_guest_info(cpu, pages); |
131 | 143 | ||
132 | /* Set the trap number to 256 (impossible value). If we fault while | 144 | /* |
145 | * Set the trap number to 256 (impossible value). If we fault while | ||
133 | * switching to the Guest (bad segment registers or bug), this will | 146 | * switching to the Guest (bad segment registers or bug), this will |
134 | * cause us to abort the Guest. */ | 147 | * cause us to abort the Guest. |
148 | */ | ||
135 | cpu->regs->trapnum = 256; | 149 | cpu->regs->trapnum = 256; |
136 | 150 | ||
137 | /* Now: we push the "eflags" register on the stack, then do an "lcall". | 151 | /* |
152 | * Now: we push the "eflags" register on the stack, then do an "lcall". | ||
138 | * This is how we change from using the kernel code segment to using | 153 | * This is how we change from using the kernel code segment to using |
139 | * the dedicated lguest code segment, as well as jumping into the | 154 | * the dedicated lguest code segment, as well as jumping into the |
140 | * Switcher. | 155 | * Switcher. |
141 | * | 156 | * |
142 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | 157 | * The lcall also pushes the old code segment (KERNEL_CS) onto the |
143 | * stack, then the address of this call. This stack layout happens to | 158 | * stack, then the address of this call. This stack layout happens to |
144 | * exactly match the stack layout created by an interrupt... */ | 159 | * exactly match the stack layout created by an interrupt... |
160 | */ | ||
145 | asm volatile("pushf; lcall *lguest_entry" | 161 | asm volatile("pushf; lcall *lguest_entry" |
146 | /* This is how we tell GCC that %eax ("a") and %ebx ("b") | 162 | /* |
147 | * are changed by this routine. The "=" means output. */ | 163 | * This is how we tell GCC that %eax ("a") and %ebx ("b") |
164 | * are changed by this routine. The "=" means output. | ||
165 | */ | ||
148 | : "=a"(clobber), "=b"(clobber) | 166 | : "=a"(clobber), "=b"(clobber) |
149 | /* %eax contains the pages pointer. ("0" refers to the | 167 | /* |
168 | * %eax contains the pages pointer. ("0" refers to the | ||
150 | * 0-th argument above, ie "a"). %ebx contains the | 169 | * 0-th argument above, ie "a"). %ebx contains the |
151 | * physical address of the Guest's top-level page | 170 | * physical address of the Guest's top-level page |
152 | * directory. */ | 171 | * directory. |
172 | */ | ||
153 | : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) | 173 | : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) |
154 | /* We tell gcc that all these registers could change, | 174 | /* |
175 | * We tell gcc that all these registers could change, | ||
155 | * which means we don't have to save and restore them in | 176 | * which means we don't have to save and restore them in |
156 | * the Switcher. */ | 177 | * the Switcher. |
178 | */ | ||
157 | : "memory", "%edx", "%ecx", "%edi", "%esi"); | 179 | : "memory", "%edx", "%ecx", "%edi", "%esi"); |
158 | } | 180 | } |
159 | /*:*/ | 181 | /*:*/ |
160 | 182 | ||
161 | /*M:002 There are hooks in the scheduler which we can register to tell when we | 183 | /*M:002 |
184 | * There are hooks in the scheduler which we can register to tell when we | ||
162 | * get kicked off the CPU (preempt_notifier_register()). This would allow us | 185 | * get kicked off the CPU (preempt_notifier_register()). This would allow us |
163 | * to lazily disable SYSENTER which would regain some performance, and should | 186 | * to lazily disable SYSENTER which would regain some performance, and should |
164 | * also simplify copy_in_guest_info(). Note that we'd still need to restore | 187 | * also simplify copy_in_guest_info(). Note that we'd still need to restore |
165 | * things when we exit to Launcher userspace, but that's fairly easy. | 188 | * things when we exit to Launcher userspace, but that's fairly easy. |
166 | * | 189 | * |
167 | * We could also try using this hooks for PGE, but that might be too expensive. | 190 | * We could also try using these hooks for PGE, but that might be too expensive. |
168 | * | 191 | * |
169 | * The hooks were designed for KVM, but we can also put them to good use. :*/ | 192 | * The hooks were designed for KVM, but we can also put them to good use. |
193 | :*/ | ||
170 | 194 | ||
171 | /*H:040 This is the i386-specific code to setup and run the Guest. Interrupts | 195 | /*H:040 |
172 | * are disabled: we own the CPU. */ | 196 | * This is the i386-specific code to setup and run the Guest. Interrupts |
197 | * are disabled: we own the CPU. | ||
198 | */ | ||
173 | void lguest_arch_run_guest(struct lg_cpu *cpu) | 199 | void lguest_arch_run_guest(struct lg_cpu *cpu) |
174 | { | 200 | { |
175 | /* Remember the awfully-named TS bit? If the Guest has asked to set it | 201 | /* |
202 | * Remember the awfully-named TS bit? If the Guest has asked to set it | ||
176 | * we set it now, so we can trap and pass that trap to the Guest if it | 203 | * we set it now, so we can trap and pass that trap to the Guest if it |
177 | * uses the FPU. */ | 204 | * uses the FPU. |
205 | */ | ||
178 | if (cpu->ts) | 206 | if (cpu->ts) |
179 | unlazy_fpu(current); | 207 | unlazy_fpu(current); |
180 | 208 | ||
181 | /* SYSENTER is an optimized way of doing system calls. We can't allow | 209 | /* |
210 | * SYSENTER is an optimized way of doing system calls. We can't allow | ||
182 | * it because it always jumps to privilege level 0. A normal Guest | 211 | * it because it always jumps to privilege level 0. A normal Guest |
183 | * won't try it because we don't advertise it in CPUID, but a malicious | 212 | * won't try it because we don't advertise it in CPUID, but a malicious |
184 | * Guest (or malicious Guest userspace program) could, so we tell the | 213 | * Guest (or malicious Guest userspace program) could, so we tell the |
185 | * CPU to disable it before running the Guest. */ | 214 | * CPU to disable it before running the Guest. |
215 | */ | ||
186 | if (boot_cpu_has(X86_FEATURE_SEP)) | 216 | if (boot_cpu_has(X86_FEATURE_SEP)) |
187 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | 217 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); |
188 | 218 | ||
189 | /* Now we actually run the Guest. It will return when something | 219 | /* |
220 | * Now we actually run the Guest. It will return when something | ||
190 | * interesting happens, and we can examine its registers to see what it | 221 | * interesting happens, and we can examine its registers to see what it |
191 | * was doing. */ | 222 | * was doing. |
223 | */ | ||
192 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); | 224 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); |
193 | 225 | ||
194 | /* Note that the "regs" structure contains two extra entries which are | 226 | /* |
227 | * Note that the "regs" structure contains two extra entries which are | ||
195 | * not really registers: a trap number which says what interrupt or | 228 | * not really registers: a trap number which says what interrupt or |
196 | * trap made the switcher code come back, and an error code which some | 229 | * trap made the switcher code come back, and an error code which some |
197 | * traps set. */ | 230 | * traps set. |
231 | */ | ||
198 | 232 | ||
199 | /* Restore SYSENTER if it's supposed to be on. */ | 233 | /* Restore SYSENTER if it's supposed to be on. */ |
200 | if (boot_cpu_has(X86_FEATURE_SEP)) | 234 | if (boot_cpu_has(X86_FEATURE_SEP)) |
201 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | 235 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); |
202 | 236 | ||
203 | /* If the Guest page faulted, then the cr2 register will tell us the | 237 | /* |
238 | * If the Guest page faulted, then the cr2 register will tell us the | ||
204 | * bad virtual address. We have to grab this now, because once we | 239 | * bad virtual address. We have to grab this now, because once we |
205 | * re-enable interrupts an interrupt could fault and thus overwrite | 240 | * re-enable interrupts an interrupt could fault and thus overwrite |
206 | * cr2, or we could even move off to a different CPU. */ | 241 | * cr2, or we could even move off to a different CPU. |
242 | */ | ||
207 | if (cpu->regs->trapnum == 14) | 243 | if (cpu->regs->trapnum == 14) |
208 | cpu->arch.last_pagefault = read_cr2(); | 244 | cpu->arch.last_pagefault = read_cr2(); |
209 | /* Similarly, if we took a trap because the Guest used the FPU, | 245 | /* |
246 | * Similarly, if we took a trap because the Guest used the FPU, | ||
210 | * we have to restore the FPU it expects to see. | 247 | * we have to restore the FPU it expects to see. |
211 | * math_state_restore() may sleep and we may even move off to | 248 | * math_state_restore() may sleep and we may even move off to |
212 | * a different CPU. So all the critical stuff should be done | 249 | * a different CPU. So all the critical stuff should be done |
213 | * before this. */ | 250 | * before this. |
251 | */ | ||
214 | else if (cpu->regs->trapnum == 7) | 252 | else if (cpu->regs->trapnum == 7) |
215 | math_state_restore(); | 253 | math_state_restore(); |
216 | } | 254 | } |
217 | 255 | ||
218 | /*H:130 Now we've examined the hypercall code; our Guest can make requests. | 256 | /*H:130 |
257 | * Now we've examined the hypercall code; our Guest can make requests. | ||
219 | * Our Guest is usually so well behaved; it never tries to do things it isn't | 258 | * Our Guest is usually so well behaved; it never tries to do things it isn't |
220 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual | 259 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual |
221 | * infrastructure isn't quite complete, because it doesn't contain replacements | 260 | * infrastructure isn't quite complete, because it doesn't contain replacements |
@@ -225,26 +264,33 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) | |||
225 | * | 264 | * |
226 | * When the Guest uses one of these instructions, we get a trap (General | 265 | * When the Guest uses one of these instructions, we get a trap (General |
227 | * Protection Fault) and come here. We see if it's one of those troublesome | 266 | * Protection Fault) and come here. We see if it's one of those troublesome |
228 | * instructions and skip over it. We return true if we did. */ | 267 | * instructions and skip over it. We return true if we did. |
268 | */ | ||
229 | static int emulate_insn(struct lg_cpu *cpu) | 269 | static int emulate_insn(struct lg_cpu *cpu) |
230 | { | 270 | { |
231 | u8 insn; | 271 | u8 insn; |
232 | unsigned int insnlen = 0, in = 0, shift = 0; | 272 | unsigned int insnlen = 0, in = 0, shift = 0; |
233 | /* The eip contains the *virtual* address of the Guest's instruction: | 273 | /* |
234 | * guest_pa just subtracts the Guest's page_offset. */ | 274 | * The eip contains the *virtual* address of the Guest's instruction: |
275 | * guest_pa just subtracts the Guest's page_offset. | ||
276 | */ | ||
235 | unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); | 277 | unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); |
236 | 278 | ||
237 | /* This must be the Guest kernel trying to do something, not userspace! | 279 | /* |
280 | * This must be the Guest kernel trying to do something, not userspace! | ||
238 | * The bottom two bits of the CS segment register are the privilege | 281 | * The bottom two bits of the CS segment register are the privilege |
239 | * level. */ | 282 | * level. |
283 | */ | ||
240 | if ((cpu->regs->cs & 3) != GUEST_PL) | 284 | if ((cpu->regs->cs & 3) != GUEST_PL) |
241 | return 0; | 285 | return 0; |
242 | 286 | ||
243 | /* Decoding x86 instructions is icky. */ | 287 | /* Decoding x86 instructions is icky. */ |
244 | insn = lgread(cpu, physaddr, u8); | 288 | insn = lgread(cpu, physaddr, u8); |
245 | 289 | ||
246 | /* 0x66 is an "operand prefix". It means it's using the upper 16 bits | 290 | /* |
247 | of the eax register. */ | 291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits |
292 | * of the eax register. | ||
293 | */ | ||
248 | if (insn == 0x66) { | 294 | if (insn == 0x66) { |
249 | shift = 16; | 295 | shift = 16; |
250 | /* The instruction is 1 byte so far, read the next byte. */ | 296 | /* The instruction is 1 byte so far, read the next byte. */ |
@@ -252,8 +298,10 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
252 | insn = lgread(cpu, physaddr + insnlen, u8); | 298 | insn = lgread(cpu, physaddr + insnlen, u8); |
253 | } | 299 | } |
254 | 300 | ||
255 | /* We can ignore the lower bit for the moment and decode the 4 opcodes | 301 | /* |
256 | * we need to emulate. */ | 302 | * We can ignore the lower bit for the moment and decode the 4 opcodes |
303 | * we need to emulate. | ||
304 | */ | ||
257 | switch (insn & 0xFE) { | 305 | switch (insn & 0xFE) { |
258 | case 0xE4: /* in <next byte>,%al */ | 306 | case 0xE4: /* in <next byte>,%al */ |
259 | insnlen += 2; | 307 | insnlen += 2; |
@@ -274,9 +322,11 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
274 | return 0; | 322 | return 0; |
275 | } | 323 | } |
276 | 324 | ||
277 | /* If it was an "IN" instruction, they expect the result to be read | 325 | /* |
326 | * If it was an "IN" instruction, they expect the result to be read | ||
278 | * into %eax, so we change %eax. We always return all-ones, which | 327 | * into %eax, so we change %eax. We always return all-ones, which |
279 | * traditionally means "there's nothing there". */ | 328 | * traditionally means "there's nothing there". |
329 | */ | ||
280 | if (in) { | 330 | if (in) { |
281 | /* Lower bit tells is whether it's a 16 or 32 bit access */ | 331 | /* Lower bit tells is whether it's a 16 or 32 bit access */ |
282 | if (insn & 0x1) | 332 | if (insn & 0x1) |
@@ -290,7 +340,8 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
290 | return 1; | 340 | return 1; |
291 | } | 341 | } |
292 | 342 | ||
293 | /* Our hypercalls mechanism used to be based on direct software interrupts. | 343 | /* |
344 | * Our hypercalls mechanism used to be based on direct software interrupts. | ||
294 | * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to | 345 | * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to |
295 | * change over to using kvm hypercalls. | 346 | * change over to using kvm hypercalls. |
296 | * | 347 | * |
@@ -318,16 +369,20 @@ static int emulate_insn(struct lg_cpu *cpu) | |||
318 | */ | 369 | */ |
319 | static void rewrite_hypercall(struct lg_cpu *cpu) | 370 | static void rewrite_hypercall(struct lg_cpu *cpu) |
320 | { | 371 | { |
321 | /* This are the opcodes we use to patch the Guest. The opcode for "int | 372 | /* |
373 | * This are the opcodes we use to patch the Guest. The opcode for "int | ||
322 | * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we | 374 | * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we |
323 | * complete the sequence with a NOP (0x90). */ | 375 | * complete the sequence with a NOP (0x90). |
376 | */ | ||
324 | u8 insn[3] = {0xcd, 0x1f, 0x90}; | 377 | u8 insn[3] = {0xcd, 0x1f, 0x90}; |
325 | 378 | ||
326 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); | 379 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); |
327 | /* The above write might have caused a copy of that page to be made | 380 | /* |
381 | * The above write might have caused a copy of that page to be made | ||
328 | * (if it was read-only). We need to make sure the Guest has | 382 | * (if it was read-only). We need to make sure the Guest has |
329 | * up-to-date pagetables. As this doesn't happen often, we can just | 383 | * up-to-date pagetables. As this doesn't happen often, we can just |
330 | * drop them all. */ | 384 | * drop them all. |
385 | */ | ||
331 | guest_pagetable_clear_all(cpu); | 386 | guest_pagetable_clear_all(cpu); |
332 | } | 387 | } |
333 | 388 | ||
@@ -335,9 +390,11 @@ static bool is_hypercall(struct lg_cpu *cpu) | |||
335 | { | 390 | { |
336 | u8 insn[3]; | 391 | u8 insn[3]; |
337 | 392 | ||
338 | /* This must be the Guest kernel trying to do something. | 393 | /* |
394 | * This must be the Guest kernel trying to do something. | ||
339 | * The bottom two bits of the CS segment register are the privilege | 395 | * The bottom two bits of the CS segment register are the privilege |
340 | * level. */ | 396 | * level. |
397 | */ | ||
341 | if ((cpu->regs->cs & 3) != GUEST_PL) | 398 | if ((cpu->regs->cs & 3) != GUEST_PL) |
342 | return false; | 399 | return false; |
343 | 400 | ||
@@ -351,86 +408,105 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) | |||
351 | { | 408 | { |
352 | switch (cpu->regs->trapnum) { | 409 | switch (cpu->regs->trapnum) { |
353 | case 13: /* We've intercepted a General Protection Fault. */ | 410 | case 13: /* We've intercepted a General Protection Fault. */ |
354 | /* Check if this was one of those annoying IN or OUT | 411 | /* |
412 | * Check if this was one of those annoying IN or OUT | ||
355 | * instructions which we need to emulate. If so, we just go | 413 | * instructions which we need to emulate. If so, we just go |
356 | * back into the Guest after we've done it. */ | 414 | * back into the Guest after we've done it. |
415 | */ | ||
357 | if (cpu->regs->errcode == 0) { | 416 | if (cpu->regs->errcode == 0) { |
358 | if (emulate_insn(cpu)) | 417 | if (emulate_insn(cpu)) |
359 | return; | 418 | return; |
360 | } | 419 | } |
361 | /* If KVM is active, the vmcall instruction triggers a | 420 | /* |
362 | * General Protection Fault. Normally it triggers an | 421 | * If KVM is active, the vmcall instruction triggers a General |
363 | * invalid opcode fault (6): */ | 422 | * Protection Fault. Normally it triggers an invalid opcode |
423 | * fault (6): | ||
424 | */ | ||
364 | case 6: | 425 | case 6: |
365 | /* We need to check if ring == GUEST_PL and | 426 | /* |
366 | * faulting instruction == vmcall. */ | 427 | * We need to check if ring == GUEST_PL and faulting |
428 | * instruction == vmcall. | ||
429 | */ | ||
367 | if (is_hypercall(cpu)) { | 430 | if (is_hypercall(cpu)) { |
368 | rewrite_hypercall(cpu); | 431 | rewrite_hypercall(cpu); |
369 | return; | 432 | return; |
370 | } | 433 | } |
371 | break; | 434 | break; |
372 | case 14: /* We've intercepted a Page Fault. */ | 435 | case 14: /* We've intercepted a Page Fault. */ |
373 | /* The Guest accessed a virtual address that wasn't mapped. | 436 | /* |
437 | * The Guest accessed a virtual address that wasn't mapped. | ||
374 | * This happens a lot: we don't actually set up most of the page | 438 | * This happens a lot: we don't actually set up most of the page |
375 | * tables for the Guest at all when we start: as it runs it asks | 439 | * tables for the Guest at all when we start: as it runs it asks |
376 | * for more and more, and we set them up as required. In this | 440 | * for more and more, and we set them up as required. In this |
377 | * case, we don't even tell the Guest that the fault happened. | 441 | * case, we don't even tell the Guest that the fault happened. |
378 | * | 442 | * |
379 | * The errcode tells whether this was a read or a write, and | 443 | * The errcode tells whether this was a read or a write, and |
380 | * whether kernel or userspace code. */ | 444 | * whether kernel or userspace code. |
445 | */ | ||
381 | if (demand_page(cpu, cpu->arch.last_pagefault, | 446 | if (demand_page(cpu, cpu->arch.last_pagefault, |
382 | cpu->regs->errcode)) | 447 | cpu->regs->errcode)) |
383 | return; | 448 | return; |
384 | 449 | ||
385 | /* OK, it's really not there (or not OK): the Guest needs to | 450 | /* |
451 | * OK, it's really not there (or not OK): the Guest needs to | ||
386 | * know. We write out the cr2 value so it knows where the | 452 | * know. We write out the cr2 value so it knows where the |
387 | * fault occurred. | 453 | * fault occurred. |
388 | * | 454 | * |
389 | * Note that if the Guest were really messed up, this could | 455 | * Note that if the Guest were really messed up, this could |
390 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so | 456 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so |
391 | * lg->lguest_data could be NULL */ | 457 | * lg->lguest_data could be NULL |
458 | */ | ||
392 | if (cpu->lg->lguest_data && | 459 | if (cpu->lg->lguest_data && |
393 | put_user(cpu->arch.last_pagefault, | 460 | put_user(cpu->arch.last_pagefault, |
394 | &cpu->lg->lguest_data->cr2)) | 461 | &cpu->lg->lguest_data->cr2)) |
395 | kill_guest(cpu, "Writing cr2"); | 462 | kill_guest(cpu, "Writing cr2"); |
396 | break; | 463 | break; |
397 | case 7: /* We've intercepted a Device Not Available fault. */ | 464 | case 7: /* We've intercepted a Device Not Available fault. */ |
398 | /* If the Guest doesn't want to know, we already restored the | 465 | /* |
399 | * Floating Point Unit, so we just continue without telling | 466 | * If the Guest doesn't want to know, we already restored the |
400 | * it. */ | 467 | * Floating Point Unit, so we just continue without telling it. |
468 | */ | ||
401 | if (!cpu->ts) | 469 | if (!cpu->ts) |
402 | return; | 470 | return; |
403 | break; | 471 | break; |
404 | case 32 ... 255: | 472 | case 32 ... 255: |
405 | /* These values mean a real interrupt occurred, in which case | 473 | /* |
474 | * These values mean a real interrupt occurred, in which case | ||
406 | * the Host handler has already been run. We just do a | 475 | * the Host handler has already been run. We just do a |
407 | * friendly check if another process should now be run, then | 476 | * friendly check if another process should now be run, then |
408 | * return to run the Guest again */ | 477 | * return to run the Guest again |
478 | */ | ||
409 | cond_resched(); | 479 | cond_resched(); |
410 | return; | 480 | return; |
411 | case LGUEST_TRAP_ENTRY: | 481 | case LGUEST_TRAP_ENTRY: |
412 | /* Our 'struct hcall_args' maps directly over our regs: we set | 482 | /* |
413 | * up the pointer now to indicate a hypercall is pending. */ | 483 | * Our 'struct hcall_args' maps directly over our regs: we set |
484 | * up the pointer now to indicate a hypercall is pending. | ||
485 | */ | ||
414 | cpu->hcall = (struct hcall_args *)cpu->regs; | 486 | cpu->hcall = (struct hcall_args *)cpu->regs; |
415 | return; | 487 | return; |
416 | } | 488 | } |
417 | 489 | ||
418 | /* We didn't handle the trap, so it needs to go to the Guest. */ | 490 | /* We didn't handle the trap, so it needs to go to the Guest. */ |
419 | if (!deliver_trap(cpu, cpu->regs->trapnum)) | 491 | if (!deliver_trap(cpu, cpu->regs->trapnum)) |
420 | /* If the Guest doesn't have a handler (either it hasn't | 492 | /* |
493 | * If the Guest doesn't have a handler (either it hasn't | ||
421 | * registered any yet, or it's one of the faults we don't let | 494 | * registered any yet, or it's one of the faults we don't let |
422 | * it handle), it dies with this cryptic error message. */ | 495 | * it handle), it dies with this cryptic error message. |
496 | */ | ||
423 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", | 497 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", |
424 | cpu->regs->trapnum, cpu->regs->eip, | 498 | cpu->regs->trapnum, cpu->regs->eip, |
425 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault | 499 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault |
426 | : cpu->regs->errcode); | 500 | : cpu->regs->errcode); |
427 | } | 501 | } |
428 | 502 | ||
429 | /* Now we can look at each of the routines this calls, in increasing order of | 503 | /* |
504 | * Now we can look at each of the routines this calls, in increasing order of | ||
430 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), | 505 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), |
431 | * deliver_trap() and demand_page(). After all those, we'll be ready to | 506 | * deliver_trap() and demand_page(). After all those, we'll be ready to |
432 | * examine the Switcher, and our philosophical understanding of the Host/Guest | 507 | * examine the Switcher, and our philosophical understanding of the Host/Guest |
433 | * duality will be complete. :*/ | 508 | * duality will be complete. |
509 | :*/ | ||
434 | static void adjust_pge(void *on) | 510 | static void adjust_pge(void *on) |
435 | { | 511 | { |
436 | if (on) | 512 | if (on) |
@@ -439,13 +515,16 @@ static void adjust_pge(void *on) | |||
439 | write_cr4(read_cr4() & ~X86_CR4_PGE); | 515 | write_cr4(read_cr4() & ~X86_CR4_PGE); |
440 | } | 516 | } |
441 | 517 | ||
442 | /*H:020 Now the Switcher is mapped and every thing else is ready, we need to do | 518 | /*H:020 |
443 | * some more i386-specific initialization. */ | 519 | * Now the Switcher is mapped and every thing else is ready, we need to do |
520 | * some more i386-specific initialization. | ||
521 | */ | ||
444 | void __init lguest_arch_host_init(void) | 522 | void __init lguest_arch_host_init(void) |
445 | { | 523 | { |
446 | int i; | 524 | int i; |
447 | 525 | ||
448 | /* Most of the i386/switcher.S doesn't care that it's been moved; on | 526 | /* |
527 | * Most of the i386/switcher.S doesn't care that it's been moved; on | ||
449 | * Intel, jumps are relative, and it doesn't access any references to | 528 | * Intel, jumps are relative, and it doesn't access any references to |
450 | * external code or data. | 529 | * external code or data. |
451 | * | 530 | * |
@@ -453,7 +532,8 @@ void __init lguest_arch_host_init(void) | |||
453 | * addresses are placed in a table (default_idt_entries), so we need to | 532 | * addresses are placed in a table (default_idt_entries), so we need to |
454 | * update the table with the new addresses. switcher_offset() is a | 533 | * update the table with the new addresses. switcher_offset() is a |
455 | * convenience function which returns the distance between the | 534 | * convenience function which returns the distance between the |
456 | * compiled-in switcher code and the high-mapped copy we just made. */ | 535 | * compiled-in switcher code and the high-mapped copy we just made. |
536 | */ | ||
457 | for (i = 0; i < IDT_ENTRIES; i++) | 537 | for (i = 0; i < IDT_ENTRIES; i++) |
458 | default_idt_entries[i] += switcher_offset(); | 538 | default_idt_entries[i] += switcher_offset(); |
459 | 539 | ||
@@ -468,63 +548,81 @@ void __init lguest_arch_host_init(void) | |||
468 | for_each_possible_cpu(i) { | 548 | for_each_possible_cpu(i) { |
469 | /* lguest_pages() returns this CPU's two pages. */ | 549 | /* lguest_pages() returns this CPU's two pages. */ |
470 | struct lguest_pages *pages = lguest_pages(i); | 550 | struct lguest_pages *pages = lguest_pages(i); |
471 | /* This is a convenience pointer to make the code fit one | 551 | /* This is a convenience pointer to make the code neater. */ |
472 | * statement to a line. */ | ||
473 | struct lguest_ro_state *state = &pages->state; | 552 | struct lguest_ro_state *state = &pages->state; |
474 | 553 | ||
475 | /* The Global Descriptor Table: the Host has a different one | 554 | /* |
555 | * The Global Descriptor Table: the Host has a different one | ||
476 | * for each CPU. We keep a descriptor for the GDT which says | 556 | * for each CPU. We keep a descriptor for the GDT which says |
477 | * where it is and how big it is (the size is actually the last | 557 | * where it is and how big it is (the size is actually the last |
478 | * byte, not the size, hence the "-1"). */ | 558 | * byte, not the size, hence the "-1"). |
559 | */ | ||
479 | state->host_gdt_desc.size = GDT_SIZE-1; | 560 | state->host_gdt_desc.size = GDT_SIZE-1; |
480 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | 561 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); |
481 | 562 | ||
482 | /* All CPUs on the Host use the same Interrupt Descriptor | 563 | /* |
564 | * All CPUs on the Host use the same Interrupt Descriptor | ||
483 | * Table, so we just use store_idt(), which gets this CPU's IDT | 565 | * Table, so we just use store_idt(), which gets this CPU's IDT |
484 | * descriptor. */ | 566 | * descriptor. |
567 | */ | ||
485 | store_idt(&state->host_idt_desc); | 568 | store_idt(&state->host_idt_desc); |
486 | 569 | ||
487 | /* The descriptors for the Guest's GDT and IDT can be filled | 570 | /* |
571 | * The descriptors for the Guest's GDT and IDT can be filled | ||
488 | * out now, too. We copy the GDT & IDT into ->guest_gdt and | 572 | * out now, too. We copy the GDT & IDT into ->guest_gdt and |
489 | * ->guest_idt before actually running the Guest. */ | 573 | * ->guest_idt before actually running the Guest. |
574 | */ | ||
490 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; | 575 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; |
491 | state->guest_idt_desc.address = (long)&state->guest_idt; | 576 | state->guest_idt_desc.address = (long)&state->guest_idt; |
492 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | 577 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; |
493 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | 578 | state->guest_gdt_desc.address = (long)&state->guest_gdt; |
494 | 579 | ||
495 | /* We know where we want the stack to be when the Guest enters | 580 | /* |
581 | * We know where we want the stack to be when the Guest enters | ||
496 | * the Switcher: in pages->regs. The stack grows upwards, so | 582 | * the Switcher: in pages->regs. The stack grows upwards, so |
497 | * we start it at the end of that structure. */ | 583 | * we start it at the end of that structure. |
584 | */ | ||
498 | state->guest_tss.sp0 = (long)(&pages->regs + 1); | 585 | state->guest_tss.sp0 = (long)(&pages->regs + 1); |
499 | /* And this is the GDT entry to use for the stack: we keep a | 586 | /* |
500 | * couple of special LGUEST entries. */ | 587 | * And this is the GDT entry to use for the stack: we keep a |
588 | * couple of special LGUEST entries. | ||
589 | */ | ||
501 | state->guest_tss.ss0 = LGUEST_DS; | 590 | state->guest_tss.ss0 = LGUEST_DS; |
502 | 591 | ||
503 | /* x86 can have a finegrained bitmap which indicates what I/O | 592 | /* |
593 | * x86 can have a finegrained bitmap which indicates what I/O | ||
504 | * ports the process can use. We set it to the end of our | 594 | * ports the process can use. We set it to the end of our |
505 | * structure, meaning "none". */ | 595 | * structure, meaning "none". |
596 | */ | ||
506 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); | 597 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); |
507 | 598 | ||
508 | /* Some GDT entries are the same across all Guests, so we can | 599 | /* |
509 | * set them up now. */ | 600 | * Some GDT entries are the same across all Guests, so we can |
601 | * set them up now. | ||
602 | */ | ||
510 | setup_default_gdt_entries(state); | 603 | setup_default_gdt_entries(state); |
511 | /* Most IDT entries are the same for all Guests, too.*/ | 604 | /* Most IDT entries are the same for all Guests, too.*/ |
512 | setup_default_idt_entries(state, default_idt_entries); | 605 | setup_default_idt_entries(state, default_idt_entries); |
513 | 606 | ||
514 | /* The Host needs to be able to use the LGUEST segments on this | 607 | /* |
515 | * CPU, too, so put them in the Host GDT. */ | 608 | * The Host needs to be able to use the LGUEST segments on this |
609 | * CPU, too, so put them in the Host GDT. | ||
610 | */ | ||
516 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; | 611 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
517 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | 612 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; |
518 | } | 613 | } |
519 | 614 | ||
520 | /* In the Switcher, we want the %cs segment register to use the | 615 | /* |
616 | * In the Switcher, we want the %cs segment register to use the | ||
521 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so | 617 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so |
522 | * it will be undisturbed when we switch. To change %cs and jump we | 618 | * it will be undisturbed when we switch. To change %cs and jump we |
523 | * need this structure to feed to Intel's "lcall" instruction. */ | 619 | * need this structure to feed to Intel's "lcall" instruction. |
620 | */ | ||
524 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); | 621 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); |
525 | lguest_entry.segment = LGUEST_CS; | 622 | lguest_entry.segment = LGUEST_CS; |
526 | 623 | ||
527 | /* Finally, we need to turn off "Page Global Enable". PGE is an | 624 | /* |
625 | * Finally, we need to turn off "Page Global Enable". PGE is an | ||
528 | * optimization where page table entries are specially marked to show | 626 | * optimization where page table entries are specially marked to show |
529 | * they never change. The Host kernel marks all the kernel pages this | 627 | * they never change. The Host kernel marks all the kernel pages this |
530 | * way because it's always present, even when userspace is running. | 628 | * way because it's always present, even when userspace is running. |
@@ -534,16 +632,21 @@ void __init lguest_arch_host_init(void) | |||
534 | * you'll get really weird bugs that you'll chase for two days. | 632 | * you'll get really weird bugs that you'll chase for two days. |
535 | * | 633 | * |
536 | * I used to turn PGE off every time we switched to the Guest and back | 634 | * I used to turn PGE off every time we switched to the Guest and back |
537 | * on when we return, but that slowed the Switcher down noticibly. */ | 635 | * on when we return, but that slowed the Switcher down noticibly. |
636 | */ | ||
538 | 637 | ||
539 | /* We don't need the complexity of CPUs coming and going while we're | 638 | /* |
540 | * doing this. */ | 639 | * We don't need the complexity of CPUs coming and going while we're |
640 | * doing this. | ||
641 | */ | ||
541 | get_online_cpus(); | 642 | get_online_cpus(); |
542 | if (cpu_has_pge) { /* We have a broader idea of "global". */ | 643 | if (cpu_has_pge) { /* We have a broader idea of "global". */ |
543 | /* Remember that this was originally set (for cleanup). */ | 644 | /* Remember that this was originally set (for cleanup). */ |
544 | cpu_had_pge = 1; | 645 | cpu_had_pge = 1; |
545 | /* adjust_pge is a helper function which sets or unsets the PGE | 646 | /* |
546 | * bit on its CPU, depending on the argument (0 == unset). */ | 647 | * adjust_pge is a helper function which sets or unsets the PGE |
648 | * bit on its CPU, depending on the argument (0 == unset). | ||
649 | */ | ||
547 | on_each_cpu(adjust_pge, (void *)0, 1); | 650 | on_each_cpu(adjust_pge, (void *)0, 1); |
548 | /* Turn off the feature in the global feature set. */ | 651 | /* Turn off the feature in the global feature set. */ |
549 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); | 652 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
@@ -590,26 +693,32 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) | |||
590 | { | 693 | { |
591 | u32 tsc_speed; | 694 | u32 tsc_speed; |
592 | 695 | ||
593 | /* The pointer to the Guest's "struct lguest_data" is the only argument. | 696 | /* |
594 | * We check that address now. */ | 697 | * The pointer to the Guest's "struct lguest_data" is the only argument. |
698 | * We check that address now. | ||
699 | */ | ||
595 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, | 700 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, |
596 | sizeof(*cpu->lg->lguest_data))) | 701 | sizeof(*cpu->lg->lguest_data))) |
597 | return -EFAULT; | 702 | return -EFAULT; |
598 | 703 | ||
599 | /* Having checked it, we simply set lg->lguest_data to point straight | 704 | /* |
705 | * Having checked it, we simply set lg->lguest_data to point straight | ||
600 | * into the Launcher's memory at the right place and then use | 706 | * into the Launcher's memory at the right place and then use |
601 | * copy_to_user/from_user from now on, instead of lgread/write. I put | 707 | * copy_to_user/from_user from now on, instead of lgread/write. I put |
602 | * this in to show that I'm not immune to writing stupid | 708 | * this in to show that I'm not immune to writing stupid |
603 | * optimizations. */ | 709 | * optimizations. |
710 | */ | ||
604 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; | 711 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; |
605 | 712 | ||
606 | /* We insist that the Time Stamp Counter exist and doesn't change with | 713 | /* |
714 | * We insist that the Time Stamp Counter exist and doesn't change with | ||
607 | * cpu frequency. Some devious chip manufacturers decided that TSC | 715 | * cpu frequency. Some devious chip manufacturers decided that TSC |
608 | * changes could be handled in software. I decided that time going | 716 | * changes could be handled in software. I decided that time going |
609 | * backwards might be good for benchmarks, but it's bad for users. | 717 | * backwards might be good for benchmarks, but it's bad for users. |
610 | * | 718 | * |
611 | * We also insist that the TSC be stable: the kernel detects unreliable | 719 | * We also insist that the TSC be stable: the kernel detects unreliable |
612 | * TSCs for its own purposes, and we use that here. */ | 720 | * TSCs for its own purposes, and we use that here. |
721 | */ | ||
613 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) | 722 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) |
614 | tsc_speed = tsc_khz; | 723 | tsc_speed = tsc_khz; |
615 | else | 724 | else |
@@ -625,38 +734,47 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu) | |||
625 | } | 734 | } |
626 | /*:*/ | 735 | /*:*/ |
627 | 736 | ||
628 | /*L:030 lguest_arch_setup_regs() | 737 | /*L:030 |
738 | * lguest_arch_setup_regs() | ||
629 | * | 739 | * |
630 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to | 740 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to |
631 | * allocate the structure, so they will be 0. */ | 741 | * allocate the structure, so they will be 0. |
742 | */ | ||
632 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) | 743 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) |
633 | { | 744 | { |
634 | struct lguest_regs *regs = cpu->regs; | 745 | struct lguest_regs *regs = cpu->regs; |
635 | 746 | ||
636 | /* There are four "segment" registers which the Guest needs to boot: | 747 | /* |
748 | * There are four "segment" registers which the Guest needs to boot: | ||
637 | * The "code segment" register (cs) refers to the kernel code segment | 749 | * The "code segment" register (cs) refers to the kernel code segment |
638 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers | 750 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers |
639 | * refer to the kernel data segment __KERNEL_DS. | 751 | * refer to the kernel data segment __KERNEL_DS. |
640 | * | 752 | * |
641 | * The privilege level is packed into the lower bits. The Guest runs | 753 | * The privilege level is packed into the lower bits. The Guest runs |
642 | * at privilege level 1 (GUEST_PL).*/ | 754 | * at privilege level 1 (GUEST_PL). |
755 | */ | ||
643 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; | 756 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; |
644 | regs->cs = __KERNEL_CS|GUEST_PL; | 757 | regs->cs = __KERNEL_CS|GUEST_PL; |
645 | 758 | ||
646 | /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | 759 | /* |
760 | * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | ||
647 | * is supposed to always be "1". Bit 9 (0x200) controls whether | 761 | * is supposed to always be "1". Bit 9 (0x200) controls whether |
648 | * interrupts are enabled. We always leave interrupts enabled while | 762 | * interrupts are enabled. We always leave interrupts enabled while |
649 | * running the Guest. */ | 763 | * running the Guest. |
764 | */ | ||
650 | regs->eflags = X86_EFLAGS_IF | 0x2; | 765 | regs->eflags = X86_EFLAGS_IF | 0x2; |
651 | 766 | ||
652 | /* The "Extended Instruction Pointer" register says where the Guest is | 767 | /* |
653 | * running. */ | 768 | * The "Extended Instruction Pointer" register says where the Guest is |
769 | * running. | ||
770 | */ | ||
654 | regs->eip = start; | 771 | regs->eip = start; |
655 | 772 | ||
656 | /* %esi points to our boot information, at physical address 0, so don't | 773 | /* |
657 | * touch it. */ | 774 | * %esi points to our boot information, at physical address 0, so don't |
775 | * touch it. | ||
776 | */ | ||
658 | 777 | ||
659 | /* There are a couple of GDT entries the Guest expects when first | 778 | /* There are a couple of GDT entries the Guest expects at boot. */ |
660 | * booting. */ | ||
661 | setup_guest_gdt(cpu); | 779 | setup_guest_gdt(cpu); |
662 | } | 780 | } |
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S index 3fc15318a80f..40634b0db9f7 100644 --- a/drivers/lguest/x86/switcher_32.S +++ b/drivers/lguest/x86/switcher_32.S | |||
@@ -1,12 +1,15 @@ | |||
1 | /*P:900 This is the Switcher: code which sits at 0xFFC00000 astride both the | 1 | /*P:900 |
2 | * Host and Guest to do the low-level Guest<->Host switch. It is as simple as | 2 | * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride |
3 | * it can be made, but it's naturally very specific to x86. | 3 | * both the Host and Guest to do the low-level Guest<->Host switch. It is as |
4 | * simple as it can be made, but it's naturally very specific to x86. | ||
4 | * | 5 | * |
5 | * You have now completed Preparation. If this has whet your appetite; if you | 6 | * You have now completed Preparation. If this has whet your appetite; if you |
6 | * are feeling invigorated and refreshed then the next, more challenging stage | 7 | * are feeling invigorated and refreshed then the next, more challenging stage |
7 | * can be found in "make Guest". :*/ | 8 | * can be found in "make Guest". |
9 | :*/ | ||
8 | 10 | ||
9 | /*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must | 11 | /*M:012 |
12 | * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must | ||
10 | * gain at least 1% more performance. Since neither LOC nor performance can be | 13 | * gain at least 1% more performance. Since neither LOC nor performance can be |
11 | * measured beforehand, it generally means implementing a feature then deciding | 14 | * measured beforehand, it generally means implementing a feature then deciding |
12 | * if it's worth it. And once it's implemented, who can say no? | 15 | * if it's worth it. And once it's implemented, who can say no? |
@@ -31,11 +34,14 @@ | |||
31 | * Host (which is actually really easy). | 34 | * Host (which is actually really easy). |
32 | * | 35 | * |
33 | * Two questions remain. Would the performance gain outweigh the complexity? | 36 | * Two questions remain. Would the performance gain outweigh the complexity? |
34 | * And who would write the verse documenting it? :*/ | 37 | * And who would write the verse documenting it? |
38 | :*/ | ||
35 | 39 | ||
36 | /*M:011 Lguest64 handles NMI. This gave me NMI envy (until I looked at their | 40 | /*M:011 |
41 | * Lguest64 handles NMI. This gave me NMI envy (until I looked at their | ||
37 | * code). It's worth doing though, since it would let us use oprofile in the | 42 | * code). It's worth doing though, since it would let us use oprofile in the |
38 | * Host when a Guest is running. :*/ | 43 | * Host when a Guest is running. |
44 | :*/ | ||
39 | 45 | ||
40 | /*S:100 | 46 | /*S:100 |
41 | * Welcome to the Switcher itself! | 47 | * Welcome to the Switcher itself! |
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index b34cb5f79eea..2e535a0ccd5e 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
@@ -173,6 +173,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
173 | unsigned segment; | 173 | unsigned segment; |
174 | unsigned offset = (unsigned) off; | 174 | unsigned offset = (unsigned) off; |
175 | u8 *cp = bounce + 1; | 175 | u8 *cp = bounce + 1; |
176 | int sr; | ||
176 | 177 | ||
177 | *cp = AT25_WREN; | 178 | *cp = AT25_WREN; |
178 | status = spi_write(at25->spi, cp, 1); | 179 | status = spi_write(at25->spi, cp, 1); |
@@ -214,7 +215,6 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
214 | timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); | 215 | timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); |
215 | retries = 0; | 216 | retries = 0; |
216 | do { | 217 | do { |
217 | int sr; | ||
218 | 218 | ||
219 | sr = spi_w8r8(at25->spi, AT25_RDSR); | 219 | sr = spi_w8r8(at25->spi, AT25_RDSR); |
220 | if (sr < 0 || (sr & AT25_SR_nRDY)) { | 220 | if (sr < 0 || (sr & AT25_SR_nRDY)) { |
@@ -228,7 +228,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
228 | break; | 228 | break; |
229 | } while (retries++ < 3 || time_before_eq(jiffies, timeout)); | 229 | } while (retries++ < 3 || time_before_eq(jiffies, timeout)); |
230 | 230 | ||
231 | if (time_after(jiffies, timeout)) { | 231 | if ((sr < 0) || (sr & AT25_SR_nRDY)) { |
232 | dev_err(&at25->spi->dev, | 232 | dev_err(&at25->spi->dev, |
233 | "write %d bytes offset %d, " | 233 | "write %d bytes offset %d, " |
234 | "timeout after %u msecs\n", | 234 | "timeout after %u msecs\n", |
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index d79fa55c3b89..908844327db0 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c | |||
@@ -158,6 +158,13 @@ static unsigned int esdhc_get_max_clock(struct sdhci_host *host) | |||
158 | return of_host->clock; | 158 | return of_host->clock; |
159 | } | 159 | } |
160 | 160 | ||
161 | static unsigned int esdhc_get_min_clock(struct sdhci_host *host) | ||
162 | { | ||
163 | struct sdhci_of_host *of_host = sdhci_priv(host); | ||
164 | |||
165 | return of_host->clock / 256 / 16; | ||
166 | } | ||
167 | |||
161 | static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) | 168 | static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) |
162 | { | 169 | { |
163 | struct sdhci_of_host *of_host = sdhci_priv(host); | 170 | struct sdhci_of_host *of_host = sdhci_priv(host); |
@@ -184,6 +191,7 @@ static struct sdhci_of_data sdhci_esdhc = { | |||
184 | .set_clock = esdhc_set_clock, | 191 | .set_clock = esdhc_set_clock, |
185 | .enable_dma = esdhc_enable_dma, | 192 | .enable_dma = esdhc_enable_dma, |
186 | .get_max_clock = esdhc_get_max_clock, | 193 | .get_max_clock = esdhc_get_max_clock, |
194 | .get_min_clock = esdhc_get_min_clock, | ||
187 | .get_timeout_clock = esdhc_get_timeout_clock, | 195 | .get_timeout_clock = esdhc_get_timeout_clock, |
188 | }, | 196 | }, |
189 | }; | 197 | }; |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6779b4ecab18..62041c7e9246 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -1766,7 +1766,10 @@ int sdhci_add_host(struct sdhci_host *host) | |||
1766 | * Set host parameters. | 1766 | * Set host parameters. |
1767 | */ | 1767 | */ |
1768 | mmc->ops = &sdhci_ops; | 1768 | mmc->ops = &sdhci_ops; |
1769 | mmc->f_min = host->max_clk / 256; | 1769 | if (host->ops->get_min_clock) |
1770 | mmc->f_min = host->ops->get_min_clock(host); | ||
1771 | else | ||
1772 | mmc->f_min = host->max_clk / 256; | ||
1770 | mmc->f_max = host->max_clk; | 1773 | mmc->f_max = host->max_clk; |
1771 | mmc->caps = MMC_CAP_SDIO_IRQ; | 1774 | mmc->caps = MMC_CAP_SDIO_IRQ; |
1772 | 1775 | ||
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 831ddf7dcb49..c77e9ff30223 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -302,6 +302,7 @@ struct sdhci_ops { | |||
302 | 302 | ||
303 | int (*enable_dma)(struct sdhci_host *host); | 303 | int (*enable_dma)(struct sdhci_host *host); |
304 | unsigned int (*get_max_clock)(struct sdhci_host *host); | 304 | unsigned int (*get_max_clock)(struct sdhci_host *host); |
305 | unsigned int (*get_min_clock)(struct sdhci_host *host); | ||
305 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); | 306 | unsigned int (*get_timeout_clock)(struct sdhci_host *host); |
306 | }; | 307 | }; |
307 | 308 | ||
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 23e10b6263d6..f7a4701bf863 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -1174,23 +1174,34 @@ static struct platform_driver cmos_platform_driver = { | |||
1174 | } | 1174 | } |
1175 | }; | 1175 | }; |
1176 | 1176 | ||
1177 | #ifdef CONFIG_PNP | ||
1178 | static bool pnp_driver_registered; | ||
1179 | #endif | ||
1180 | static bool platform_driver_registered; | ||
1181 | |||
1177 | static int __init cmos_init(void) | 1182 | static int __init cmos_init(void) |
1178 | { | 1183 | { |
1179 | int retval = 0; | 1184 | int retval = 0; |
1180 | 1185 | ||
1181 | #ifdef CONFIG_PNP | 1186 | #ifdef CONFIG_PNP |
1182 | pnp_register_driver(&cmos_pnp_driver); | 1187 | retval = pnp_register_driver(&cmos_pnp_driver); |
1188 | if (retval == 0) | ||
1189 | pnp_driver_registered = true; | ||
1183 | #endif | 1190 | #endif |
1184 | 1191 | ||
1185 | if (!cmos_rtc.dev) | 1192 | if (!cmos_rtc.dev) { |
1186 | retval = platform_driver_probe(&cmos_platform_driver, | 1193 | retval = platform_driver_probe(&cmos_platform_driver, |
1187 | cmos_platform_probe); | 1194 | cmos_platform_probe); |
1195 | if (retval == 0) | ||
1196 | platform_driver_registered = true; | ||
1197 | } | ||
1188 | 1198 | ||
1189 | if (retval == 0) | 1199 | if (retval == 0) |
1190 | return 0; | 1200 | return 0; |
1191 | 1201 | ||
1192 | #ifdef CONFIG_PNP | 1202 | #ifdef CONFIG_PNP |
1193 | pnp_unregister_driver(&cmos_pnp_driver); | 1203 | if (pnp_driver_registered) |
1204 | pnp_unregister_driver(&cmos_pnp_driver); | ||
1194 | #endif | 1205 | #endif |
1195 | return retval; | 1206 | return retval; |
1196 | } | 1207 | } |
@@ -1199,9 +1210,11 @@ module_init(cmos_init); | |||
1199 | static void __exit cmos_exit(void) | 1210 | static void __exit cmos_exit(void) |
1200 | { | 1211 | { |
1201 | #ifdef CONFIG_PNP | 1212 | #ifdef CONFIG_PNP |
1202 | pnp_unregister_driver(&cmos_pnp_driver); | 1213 | if (pnp_driver_registered) |
1214 | pnp_unregister_driver(&cmos_pnp_driver); | ||
1203 | #endif | 1215 | #endif |
1204 | platform_driver_unregister(&cmos_platform_driver); | 1216 | if (platform_driver_registered) |
1217 | platform_driver_unregister(&cmos_platform_driver); | ||
1205 | } | 1218 | } |
1206 | module_exit(cmos_exit); | 1219 | module_exit(cmos_exit); |
1207 | 1220 | ||
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 338b15c0a548..607d43a31048 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c | |||
@@ -1551,6 +1551,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) | |||
1551 | if (ret) | 1551 | if (ret) |
1552 | goto err_add_port; | 1552 | goto err_add_port; |
1553 | 1553 | ||
1554 | #ifdef CONFIG_SERIAL_ATMEL_CONSOLE | ||
1554 | if (atmel_is_console_port(&port->uart) | 1555 | if (atmel_is_console_port(&port->uart) |
1555 | && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { | 1556 | && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { |
1556 | /* | 1557 | /* |
@@ -1559,6 +1560,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) | |||
1559 | */ | 1560 | */ |
1560 | clk_disable(port->clk); | 1561 | clk_disable(port->clk); |
1561 | } | 1562 | } |
1563 | #endif | ||
1562 | 1564 | ||
1563 | device_init_wakeup(&pdev->dev, 1); | 1565 | device_init_wakeup(&pdev->dev, 1); |
1564 | platform_set_drvdata(pdev, port); | 1566 | platform_set_drvdata(pdev, port); |
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index eee4b6e0af2c..9b80ad36dbba 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c | |||
@@ -59,6 +59,8 @@ | |||
59 | 59 | ||
60 | /* per-register bitmasks: */ | 60 | /* per-register bitmasks: */ |
61 | 61 | ||
62 | #define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE (2 << 3) | ||
63 | #define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP (1 << 2) | ||
62 | #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) | 64 | #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) |
63 | #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) | 65 | #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) |
64 | 66 | ||
@@ -90,6 +92,7 @@ | |||
90 | 92 | ||
91 | #define OMAP2_MCSPI_CHCTRL_EN (1 << 0) | 93 | #define OMAP2_MCSPI_CHCTRL_EN (1 << 0) |
92 | 94 | ||
95 | #define OMAP2_MCSPI_WAKEUPENABLE_WKEN (1 << 0) | ||
93 | 96 | ||
94 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | 97 | /* We have 2 DMA channels per CS, one for RX and one for TX */ |
95 | struct omap2_mcspi_dma { | 98 | struct omap2_mcspi_dma { |
@@ -269,7 +272,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) | |||
269 | 272 | ||
270 | if (rx != NULL) { | 273 | if (rx != NULL) { |
271 | omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, | 274 | omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, |
272 | data_type, element_count, 1, | 275 | data_type, element_count - 1, 1, |
273 | OMAP_DMA_SYNC_ELEMENT, | 276 | OMAP_DMA_SYNC_ELEMENT, |
274 | mcspi_dma->dma_rx_sync_dev, 1); | 277 | mcspi_dma->dma_rx_sync_dev, 1); |
275 | 278 | ||
@@ -300,6 +303,25 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) | |||
300 | if (rx != NULL) { | 303 | if (rx != NULL) { |
301 | wait_for_completion(&mcspi_dma->dma_rx_completion); | 304 | wait_for_completion(&mcspi_dma->dma_rx_completion); |
302 | dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); | 305 | dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); |
306 | omap2_mcspi_set_enable(spi, 0); | ||
307 | if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) | ||
308 | & OMAP2_MCSPI_CHSTAT_RXS)) { | ||
309 | u32 w; | ||
310 | |||
311 | w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); | ||
312 | if (word_len <= 8) | ||
313 | ((u8 *)xfer->rx_buf)[element_count - 1] = w; | ||
314 | else if (word_len <= 16) | ||
315 | ((u16 *)xfer->rx_buf)[element_count - 1] = w; | ||
316 | else /* word_len <= 32 */ | ||
317 | ((u32 *)xfer->rx_buf)[element_count - 1] = w; | ||
318 | } else { | ||
319 | dev_err(&spi->dev, "DMA RX last word empty"); | ||
320 | count -= (word_len <= 8) ? 1 : | ||
321 | (word_len <= 16) ? 2 : | ||
322 | /* word_len <= 32 */ 4; | ||
323 | } | ||
324 | omap2_mcspi_set_enable(spi, 1); | ||
303 | } | 325 | } |
304 | return count; | 326 | return count; |
305 | } | 327 | } |
@@ -873,8 +895,12 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) | |||
873 | } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); | 895 | } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); |
874 | 896 | ||
875 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, | 897 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, |
876 | /* (3 << 8) | (2 << 3) | */ | 898 | OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | |
877 | OMAP2_MCSPI_SYSCONFIG_AUTOIDLE); | 899 | OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | |
900 | OMAP2_MCSPI_SYSCONFIG_SMARTIDLE); | ||
901 | |||
902 | mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, | ||
903 | OMAP2_MCSPI_WAKEUPENABLE_WKEN); | ||
878 | 904 | ||
879 | omap2_mcspi_set_master_mode(master); | 905 | omap2_mcspi_set_master_mode(master); |
880 | 906 | ||
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 348bf61a8fec..975ecddbce30 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
@@ -103,8 +103,6 @@ source "drivers/staging/pohmelfs/Kconfig" | |||
103 | 103 | ||
104 | source "drivers/staging/stlc45xx/Kconfig" | 104 | source "drivers/staging/stlc45xx/Kconfig" |
105 | 105 | ||
106 | source "drivers/staging/uc2322/Kconfig" | ||
107 | |||
108 | source "drivers/staging/b3dfg/Kconfig" | 106 | source "drivers/staging/b3dfg/Kconfig" |
109 | 107 | ||
110 | source "drivers/staging/phison/Kconfig" | 108 | source "drivers/staging/phison/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 8d61d7b4debf..2241ae1b21ee 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
@@ -34,7 +34,6 @@ obj-$(CONFIG_ANDROID) += android/ | |||
34 | obj-$(CONFIG_DST) += dst/ | 34 | obj-$(CONFIG_DST) += dst/ |
35 | obj-$(CONFIG_POHMELFS) += pohmelfs/ | 35 | obj-$(CONFIG_POHMELFS) += pohmelfs/ |
36 | obj-$(CONFIG_STLC45XX) += stlc45xx/ | 36 | obj-$(CONFIG_STLC45XX) += stlc45xx/ |
37 | obj-$(CONFIG_USB_SERIAL_ATEN2011) += uc2322/ | ||
38 | obj-$(CONFIG_B3DFG) += b3dfg/ | 37 | obj-$(CONFIG_B3DFG) += b3dfg/ |
39 | obj-$(CONFIG_IDE_PHISON) += phison/ | 38 | obj-$(CONFIG_IDE_PHISON) += phison/ |
40 | obj-$(CONFIG_PLAN9AUTH) += p9auth/ | 39 | obj-$(CONFIG_PLAN9AUTH) += p9auth/ |
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index fe72240f5a9e..f934393f3959 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c | |||
@@ -96,19 +96,21 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
96 | 96 | ||
97 | read_lock(&tasklist_lock); | 97 | read_lock(&tasklist_lock); |
98 | for_each_process(p) { | 98 | for_each_process(p) { |
99 | struct mm_struct *mm; | ||
99 | int oom_adj; | 100 | int oom_adj; |
100 | 101 | ||
101 | task_lock(p); | 102 | task_lock(p); |
102 | if (!p->mm) { | 103 | mm = p->mm; |
104 | if (!mm) { | ||
103 | task_unlock(p); | 105 | task_unlock(p); |
104 | continue; | 106 | continue; |
105 | } | 107 | } |
106 | oom_adj = p->oomkilladj; | 108 | oom_adj = mm->oom_adj; |
107 | if (oom_adj < min_adj) { | 109 | if (oom_adj < min_adj) { |
108 | task_unlock(p); | 110 | task_unlock(p); |
109 | continue; | 111 | continue; |
110 | } | 112 | } |
111 | tasksize = get_mm_rss(p->mm); | 113 | tasksize = get_mm_rss(mm); |
112 | task_unlock(p); | 114 | task_unlock(p); |
113 | if (tasksize <= 0) | 115 | if (tasksize <= 0) |
114 | continue; | 116 | continue; |
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c index a9bd4106beb7..0fdf8c6dc648 100644 --- a/drivers/staging/serqt_usb2/serqt_usb2.c +++ b/drivers/staging/serqt_usb2/serqt_usb2.c | |||
@@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
360 | if (port_paranoia_check(port, __func__) != 0) { | 360 | if (port_paranoia_check(port, __func__) != 0) { |
361 | dbg("%s - port_paranoia_check, exiting\n", __func__); | 361 | dbg("%s - port_paranoia_check, exiting\n", __func__); |
362 | qt_port->ReadBulkStopped = 1; | 362 | qt_port->ReadBulkStopped = 1; |
363 | return; | 363 | goto exit; |
364 | } | 364 | } |
365 | 365 | ||
366 | if (!serial) { | 366 | if (!serial) { |
367 | dbg("%s - bad serial pointer, exiting\n", __func__); | 367 | dbg("%s - bad serial pointer, exiting\n", __func__); |
368 | return; | 368 | goto exit; |
369 | } | 369 | } |
370 | if (qt_port->closePending == 1) { | 370 | if (qt_port->closePending == 1) { |
371 | /* Were closing , stop reading */ | 371 | /* Were closing , stop reading */ |
372 | dbg("%s - (qt_port->closepending == 1\n", __func__); | 372 | dbg("%s - (qt_port->closepending == 1\n", __func__); |
373 | qt_port->ReadBulkStopped = 1; | 373 | qt_port->ReadBulkStopped = 1; |
374 | return; | 374 | goto exit; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* | 377 | /* |
@@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
381 | */ | 381 | */ |
382 | if (qt_port->RxHolding == 1) { | 382 | if (qt_port->RxHolding == 1) { |
383 | qt_port->ReadBulkStopped = 1; | 383 | qt_port->ReadBulkStopped = 1; |
384 | return; | 384 | goto exit; |
385 | } | 385 | } |
386 | 386 | ||
387 | if (urb->status) { | 387 | if (urb->status) { |
@@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
389 | 389 | ||
390 | dbg("%s - nonzero read bulk status received: %d\n", | 390 | dbg("%s - nonzero read bulk status received: %d\n", |
391 | __func__, urb->status); | 391 | __func__, urb->status); |
392 | return; | 392 | goto exit; |
393 | } | 393 | } |
394 | 394 | ||
395 | if (tty && RxCount) { | 395 | if (tty && RxCount) { |
@@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | schedule_work(&port->work); | 465 | schedule_work(&port->work); |
466 | exit: | ||
467 | tty_kref_put(tty); | ||
466 | } | 468 | } |
467 | 469 | ||
468 | /* | 470 | /* |
@@ -736,6 +738,11 @@ static int qt_startup(struct usb_serial *serial) | |||
736 | if (!qt_port) { | 738 | if (!qt_port) { |
737 | dbg("%s: kmalloc for quatech_port (%d) failed!.", | 739 | dbg("%s: kmalloc for quatech_port (%d) failed!.", |
738 | __func__, i); | 740 | __func__, i); |
741 | for(--i; i >= 0; i--) { | ||
742 | port = serial->port[i]; | ||
743 | kfree(usb_get_serial_port_data(port)); | ||
744 | usb_set_serial_port_data(port, NULL); | ||
745 | } | ||
739 | return -ENOMEM; | 746 | return -ENOMEM; |
740 | } | 747 | } |
741 | spin_lock_init(&qt_port->lock); | 748 | spin_lock_init(&qt_port->lock); |
@@ -1041,7 +1048,7 @@ static void qt_block_until_empty(struct tty_struct *tty, | |||
1041 | } | 1048 | } |
1042 | } | 1049 | } |
1043 | 1050 | ||
1044 | static void qt_close( struct usb_serial_port *port) | 1051 | static void qt_close(struct usb_serial_port *port) |
1045 | { | 1052 | { |
1046 | struct usb_serial *serial = port->serial; | 1053 | struct usb_serial *serial = port->serial; |
1047 | struct quatech_port *qt_port; | 1054 | struct quatech_port *qt_port; |
@@ -1068,6 +1075,7 @@ static void qt_close( struct usb_serial_port *port) | |||
1068 | /* wait up to for transmitter to empty */ | 1075 | /* wait up to for transmitter to empty */ |
1069 | if (serial->dev) | 1076 | if (serial->dev) |
1070 | qt_block_until_empty(tty, qt_port); | 1077 | qt_block_until_empty(tty, qt_port); |
1078 | tty_kref_put(tty); | ||
1071 | 1079 | ||
1072 | /* Close uart channel */ | 1080 | /* Close uart channel */ |
1073 | status = qt_close_channel(serial, index); | 1081 | status = qt_close_channel(serial, index); |
diff --git a/drivers/staging/uc2322/Kconfig b/drivers/staging/uc2322/Kconfig deleted file mode 100644 index 2e0c6e79df2b..000000000000 --- a/drivers/staging/uc2322/Kconfig +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | config USB_SERIAL_ATEN2011 | ||
2 | tristate "ATEN 2011 USB to serial device support" | ||
3 | depends on USB_SERIAL | ||
4 | default N | ||
5 | ---help--- | ||
6 | Say Y here if you want to use a ATEN 2011 dual port USB to serial | ||
7 | adapter. | ||
8 | |||
9 | To compile this driver as a module, choose M here: the module will be | ||
10 | called aten2011. | ||
diff --git a/drivers/staging/uc2322/Makefile b/drivers/staging/uc2322/Makefile deleted file mode 100644 index 49c18d6e579f..000000000000 --- a/drivers/staging/uc2322/Makefile +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | obj-$(CONFIG_USB_SERIAL_ATEN2011) += aten2011.o | ||
diff --git a/drivers/staging/uc2322/TODO b/drivers/staging/uc2322/TODO deleted file mode 100644 index c189a64c4185..000000000000 --- a/drivers/staging/uc2322/TODO +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | TODO: | ||
2 | - checkpatch.pl cleanups | ||
3 | - remove dead and useless code (auditing the tty ioctls to | ||
4 | verify that they really are correct and needed.) | ||
5 | |||
6 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and | ||
7 | Russell Lang <gsview@ghostgum.com.au>. | ||
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c deleted file mode 100644 index 39d0926d1a90..000000000000 --- a/drivers/staging/uc2322/aten2011.c +++ /dev/null | |||
@@ -1,2430 +0,0 @@ | |||
1 | /* | ||
2 | * Aten 2011 USB serial driver for 4 port devices | ||
3 | * | ||
4 | * Copyright (C) 2000 Inside Out Networks | ||
5 | * Copyright (C) 2001-2002, 2009 Greg Kroah-Hartman <greg@kroah.com> | ||
6 | * Copyright (C) 2009 Novell Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/tty.h> | ||
20 | #include <linux/tty_driver.h> | ||
21 | #include <linux/tty_flip.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/serial.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <linux/usb.h> | ||
26 | #include <linux/usb/serial.h> | ||
27 | |||
28 | |||
29 | #define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ | ||
30 | #define ZLP_REG2 0x3B /* Zero_Flag_Reg2 59 */ | ||
31 | #define ZLP_REG3 0x3C /* Zero_Flag_Reg3 60 */ | ||
32 | #define ZLP_REG4 0x3D /* Zero_Flag_Reg4 61 */ | ||
33 | #define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ | ||
34 | |||
35 | /* Interrupt Rotinue Defines */ | ||
36 | #define SERIAL_IIR_RLS 0x06 | ||
37 | #define SERIAL_IIR_RDA 0x04 | ||
38 | #define SERIAL_IIR_CTI 0x0c | ||
39 | #define SERIAL_IIR_THR 0x02 | ||
40 | #define SERIAL_IIR_MS 0x00 | ||
41 | |||
42 | /* Emulation of the bit mask on the LINE STATUS REGISTER. */ | ||
43 | #define SERIAL_LSR_DR 0x0001 | ||
44 | #define SERIAL_LSR_OE 0x0002 | ||
45 | #define SERIAL_LSR_PE 0x0004 | ||
46 | #define SERIAL_LSR_FE 0x0008 | ||
47 | #define SERIAL_LSR_BI 0x0010 | ||
48 | #define SERIAL_LSR_THRE 0x0020 | ||
49 | #define SERIAL_LSR_TEMT 0x0040 | ||
50 | #define SERIAL_LSR_FIFOERR 0x0080 | ||
51 | |||
52 | /* MSR bit defines(place holders) */ | ||
53 | #define ATEN_MSR_DELTA_CTS 0x10 | ||
54 | #define ATEN_MSR_DELTA_DSR 0x20 | ||
55 | #define ATEN_MSR_DELTA_RI 0x40 | ||
56 | #define ATEN_MSR_DELTA_CD 0x80 | ||
57 | |||
58 | /* Serial Port register Address */ | ||
59 | #define RECEIVE_BUFFER_REGISTER ((__u16)(0x00)) | ||
60 | #define TRANSMIT_HOLDING_REGISTER ((__u16)(0x00)) | ||
61 | #define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) | ||
62 | #define INTERRUPT_IDENT_REGISTER ((__u16)(0x02)) | ||
63 | #define FIFO_CONTROL_REGISTER ((__u16)(0x02)) | ||
64 | #define LINE_CONTROL_REGISTER ((__u16)(0x03)) | ||
65 | #define MODEM_CONTROL_REGISTER ((__u16)(0x04)) | ||
66 | #define LINE_STATUS_REGISTER ((__u16)(0x05)) | ||
67 | #define MODEM_STATUS_REGISTER ((__u16)(0x06)) | ||
68 | #define SCRATCH_PAD_REGISTER ((__u16)(0x07)) | ||
69 | #define DIVISOR_LATCH_LSB ((__u16)(0x00)) | ||
70 | #define DIVISOR_LATCH_MSB ((__u16)(0x01)) | ||
71 | |||
72 | #define SP1_REGISTER ((__u16)(0x00)) | ||
73 | #define CONTROL1_REGISTER ((__u16)(0x01)) | ||
74 | #define CLK_MULTI_REGISTER ((__u16)(0x02)) | ||
75 | #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) | ||
76 | #define DCR1_REGISTER ((__u16)(0x04)) | ||
77 | #define GPIO_REGISTER ((__u16)(0x07)) | ||
78 | |||
79 | #define SERIAL_LCR_DLAB ((__u16)(0x0080)) | ||
80 | |||
81 | /* | ||
82 | * URB POOL related defines | ||
83 | */ | ||
84 | #define NUM_URBS 16 /* URB Count */ | ||
85 | #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ | ||
86 | |||
87 | #define USB_VENDOR_ID_ATENINTL 0x0557 | ||
88 | #define ATENINTL_DEVICE_ID_2011 0x2011 | ||
89 | #define ATENINTL_DEVICE_ID_7820 0x7820 | ||
90 | |||
91 | static struct usb_device_id id_table[] = { | ||
92 | { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_2011) }, | ||
93 | { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_7820) }, | ||
94 | { } /* terminating entry */ | ||
95 | }; | ||
96 | MODULE_DEVICE_TABLE(usb, id_table); | ||
97 | |||
98 | /* This structure holds all of the local port information */ | ||
99 | struct ATENINTL_port { | ||
100 | int port_num; /*Actual port number in the device(1,2,etc)*/ | ||
101 | __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ | ||
102 | unsigned char *bulk_out_buffer; /* buffer used for the bulk out endpoint */ | ||
103 | struct urb *write_urb; /* write URB for this port */ | ||
104 | __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ | ||
105 | unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ | ||
106 | struct urb *read_urb; /* read URB for this port */ | ||
107 | __u8 shadowLCR; /* last LCR value received */ | ||
108 | __u8 shadowMCR; /* last MCR value received */ | ||
109 | char open; | ||
110 | char chaseResponsePending; | ||
111 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | ||
112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ | ||
113 | struct async_icount icount; | ||
114 | struct usb_serial_port *port; /* loop back to the owner of this object */ | ||
115 | /*Offsets*/ | ||
116 | __u8 SpRegOffset; | ||
117 | __u8 ControlRegOffset; | ||
118 | __u8 DcrRegOffset; | ||
119 | /* for processing control URBS in interrupt context */ | ||
120 | struct urb *control_urb; | ||
121 | char *ctrl_buf; | ||
122 | int MsrLsr; | ||
123 | |||
124 | struct urb *write_urb_pool[NUM_URBS]; | ||
125 | /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */ | ||
126 | struct ktermios tmp_termios; /* stores the old termios settings */ | ||
127 | spinlock_t lock; /* private lock */ | ||
128 | }; | ||
129 | |||
130 | /* This structure holds all of the individual serial device information */ | ||
131 | struct ATENINTL_serial { | ||
132 | __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ | ||
133 | unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ | ||
134 | struct urb *interrupt_read_urb; /* our interrupt urb */ | ||
135 | __u8 bulk_in_endpoint; /* the bulk in endpoint handle */ | ||
136 | unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */ | ||
137 | struct urb *read_urb; /* our bulk read urb */ | ||
138 | __u8 bulk_out_endpoint; /* the bulk out endpoint handle */ | ||
139 | struct usb_serial *serial; /* loop back to the owner of this object */ | ||
140 | int ATEN2011_spectrum_2or4ports; /* this says the number of ports in the device */ | ||
141 | /* Indicates about the no.of opened ports of an individual USB-serial adapater. */ | ||
142 | unsigned int NoOfOpenPorts; | ||
143 | /* a flag for Status endpoint polling */ | ||
144 | unsigned char status_polling_started; | ||
145 | }; | ||
146 | |||
147 | static void ATEN2011_set_termios(struct tty_struct *tty, | ||
148 | struct usb_serial_port *port, | ||
149 | struct ktermios *old_termios); | ||
150 | static void ATEN2011_change_port_settings(struct tty_struct *tty, | ||
151 | struct ATENINTL_port *ATEN2011_port, | ||
152 | struct ktermios *old_termios); | ||
153 | |||
154 | /************************************* | ||
155 | * Bit definitions for each register * | ||
156 | *************************************/ | ||
157 | #define LCR_BITS_5 0x00 /* 5 bits/char */ | ||
158 | #define LCR_BITS_6 0x01 /* 6 bits/char */ | ||
159 | #define LCR_BITS_7 0x02 /* 7 bits/char */ | ||
160 | #define LCR_BITS_8 0x03 /* 8 bits/char */ | ||
161 | #define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ | ||
162 | |||
163 | #define LCR_STOP_1 0x00 /* 1 stop bit */ | ||
164 | #define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ | ||
165 | #define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ | ||
166 | #define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ | ||
167 | |||
168 | #define LCR_PAR_NONE 0x00 /* No parity */ | ||
169 | #define LCR_PAR_ODD 0x08 /* Odd parity */ | ||
170 | #define LCR_PAR_EVEN 0x18 /* Even parity */ | ||
171 | #define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ | ||
172 | #define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ | ||
173 | #define LCR_PAR_MASK 0x38 /* Mask for parity field */ | ||
174 | |||
175 | #define LCR_SET_BREAK 0x40 /* Set Break condition */ | ||
176 | #define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ | ||
177 | |||
178 | #define MCR_DTR 0x01 /* Assert DTR */ | ||
179 | #define MCR_RTS 0x02 /* Assert RTS */ | ||
180 | #define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ | ||
181 | #define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ | ||
182 | #define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ | ||
183 | #define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ | ||
184 | |||
185 | #define ATEN2011_MSR_CTS 0x10 /* Current state of CTS */ | ||
186 | #define ATEN2011_MSR_DSR 0x20 /* Current state of DSR */ | ||
187 | #define ATEN2011_MSR_RI 0x40 /* Current state of RI */ | ||
188 | #define ATEN2011_MSR_CD 0x80 /* Current state of CD */ | ||
189 | |||
190 | |||
191 | static int debug; | ||
192 | |||
193 | /* | ||
194 | * Version Information | ||
195 | */ | ||
196 | #define DRIVER_VERSION "2.0" | ||
197 | #define DRIVER_DESC "ATENINTL 2011 USB Serial Adapter" | ||
198 | |||
199 | /* | ||
200 | * Defines used for sending commands to port | ||
201 | */ | ||
202 | |||
203 | #define ATEN_WDR_TIMEOUT (50) /* default urb timeout */ | ||
204 | |||
205 | /* Requests */ | ||
206 | #define ATEN_RD_RTYPE 0xC0 | ||
207 | #define ATEN_WR_RTYPE 0x40 | ||
208 | #define ATEN_RDREQ 0x0D | ||
209 | #define ATEN_WRREQ 0x0E | ||
210 | #define ATEN_CTRL_TIMEOUT 500 | ||
211 | #define VENDOR_READ_LENGTH (0x01) | ||
212 | |||
213 | /* set to 1 for RS485 mode and 0 for RS232 mode */ | ||
214 | /* FIXME make this somehow dynamic and not build time specific */ | ||
215 | static int RS485mode; | ||
216 | |||
217 | static int set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) | ||
218 | { | ||
219 | struct usb_device *dev = port->serial->dev; | ||
220 | val = val & 0x00ff; | ||
221 | |||
222 | dbg("%s: is %x, value %x", __func__, reg, val); | ||
223 | |||
224 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, | ||
225 | ATEN_WR_RTYPE, val, reg, NULL, 0, | ||
226 | ATEN_WDR_TIMEOUT); | ||
227 | } | ||
228 | |||
229 | static int get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) | ||
230 | { | ||
231 | struct usb_device *dev = port->serial->dev; | ||
232 | int ret; | ||
233 | |||
234 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, | ||
235 | ATEN_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, | ||
236 | ATEN_WDR_TIMEOUT); | ||
237 | dbg("%s: offset is %x, return val %x", __func__, reg, *val); | ||
238 | *val = (*val) & 0x00ff; | ||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static int set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) | ||
243 | { | ||
244 | struct usb_device *dev = port->serial->dev; | ||
245 | struct ATENINTL_serial *a_serial; | ||
246 | __u16 minor; | ||
247 | |||
248 | a_serial = usb_get_serial_data(port->serial); | ||
249 | minor = port->serial->minor; | ||
250 | if (minor == SERIAL_TTY_NO_MINOR) | ||
251 | minor = 0; | ||
252 | val = val & 0x00ff; | ||
253 | |||
254 | /* | ||
255 | * For the UART control registers, | ||
256 | * the application number need to be Or'ed | ||
257 | */ | ||
258 | if (a_serial->ATEN2011_spectrum_2or4ports == 4) | ||
259 | val |= (((__u16)port->number - minor) + 1) << 8; | ||
260 | else { | ||
261 | if (((__u16) port->number - minor) == 0) | ||
262 | val |= (((__u16)port->number - minor) + 1) << 8; | ||
263 | else | ||
264 | val |= (((__u16)port->number - minor) + 2) << 8; | ||
265 | } | ||
266 | dbg("%s: application number is %x", __func__, val); | ||
267 | |||
268 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ, | ||
269 | ATEN_WR_RTYPE, val, reg, NULL, 0, | ||
270 | ATEN_WDR_TIMEOUT); | ||
271 | } | ||
272 | |||
273 | static int get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) | ||
274 | { | ||
275 | struct usb_device *dev = port->serial->dev; | ||
276 | int ret = 0; | ||
277 | __u16 wval; | ||
278 | struct ATENINTL_serial *a_serial; | ||
279 | __u16 minor = port->serial->minor; | ||
280 | |||
281 | a_serial = usb_get_serial_data(port->serial); | ||
282 | if (minor == SERIAL_TTY_NO_MINOR) | ||
283 | minor = 0; | ||
284 | |||
285 | /* wval is same as application number */ | ||
286 | if (a_serial->ATEN2011_spectrum_2or4ports == 4) | ||
287 | wval = (((__u16)port->number - minor) + 1) << 8; | ||
288 | else { | ||
289 | if (((__u16) port->number - minor) == 0) | ||
290 | wval = (((__u16) port->number - minor) + 1) << 8; | ||
291 | else | ||
292 | wval = (((__u16) port->number - minor) + 2) << 8; | ||
293 | } | ||
294 | dbg("%s: application number is %x", __func__, wval); | ||
295 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ, | ||
296 | ATEN_RD_RTYPE, wval, reg, val, VENDOR_READ_LENGTH, | ||
297 | ATEN_WDR_TIMEOUT); | ||
298 | *val = (*val) & 0x00ff; | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static int handle_newMsr(struct ATENINTL_port *port, __u8 newMsr) | ||
303 | { | ||
304 | struct ATENINTL_port *ATEN2011_port; | ||
305 | struct async_icount *icount; | ||
306 | ATEN2011_port = port; | ||
307 | icount = &ATEN2011_port->icount; | ||
308 | if (newMsr & | ||
309 | (ATEN_MSR_DELTA_CTS | ATEN_MSR_DELTA_DSR | ATEN_MSR_DELTA_RI | | ||
310 | ATEN_MSR_DELTA_CD)) { | ||
311 | icount = &ATEN2011_port->icount; | ||
312 | |||
313 | /* update input line counters */ | ||
314 | if (newMsr & ATEN_MSR_DELTA_CTS) | ||
315 | icount->cts++; | ||
316 | if (newMsr & ATEN_MSR_DELTA_DSR) | ||
317 | icount->dsr++; | ||
318 | if (newMsr & ATEN_MSR_DELTA_CD) | ||
319 | icount->dcd++; | ||
320 | if (newMsr & ATEN_MSR_DELTA_RI) | ||
321 | icount->rng++; | ||
322 | } | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static int handle_newLsr(struct ATENINTL_port *port, __u8 newLsr) | ||
328 | { | ||
329 | struct async_icount *icount; | ||
330 | |||
331 | dbg("%s - %02x", __func__, newLsr); | ||
332 | |||
333 | if (newLsr & SERIAL_LSR_BI) { | ||
334 | /* | ||
335 | * Parity and Framing errors only count if they occur exclusive | ||
336 | * of a break being received. | ||
337 | */ | ||
338 | newLsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI); | ||
339 | } | ||
340 | |||
341 | /* update input line counters */ | ||
342 | icount = &port->icount; | ||
343 | if (newLsr & SERIAL_LSR_BI) | ||
344 | icount->brk++; | ||
345 | if (newLsr & SERIAL_LSR_OE) | ||
346 | icount->overrun++; | ||
347 | if (newLsr & SERIAL_LSR_PE) | ||
348 | icount->parity++; | ||
349 | if (newLsr & SERIAL_LSR_FE) | ||
350 | icount->frame++; | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static void ATEN2011_control_callback(struct urb *urb) | ||
356 | { | ||
357 | unsigned char *data; | ||
358 | struct ATENINTL_port *ATEN2011_port; | ||
359 | __u8 regval = 0x0; | ||
360 | |||
361 | switch (urb->status) { | ||
362 | case 0: | ||
363 | /* success */ | ||
364 | break; | ||
365 | case -ECONNRESET: | ||
366 | case -ENOENT: | ||
367 | case -ESHUTDOWN: | ||
368 | /* this urb is terminated, clean up */ | ||
369 | dbg("%s - urb shutting down with status: %d", __func__, | ||
370 | urb->status); | ||
371 | return; | ||
372 | default: | ||
373 | dbg("%s - nonzero urb status received: %d", __func__, | ||
374 | urb->status); | ||
375 | goto exit; | ||
376 | } | ||
377 | |||
378 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
379 | |||
380 | dbg("%s urb buffer size is %d", __func__, urb->actual_length); | ||
381 | dbg("%s ATEN2011_port->MsrLsr is %d port %d", __func__, | ||
382 | ATEN2011_port->MsrLsr, ATEN2011_port->port_num); | ||
383 | data = urb->transfer_buffer; | ||
384 | regval = (__u8) data[0]; | ||
385 | dbg("%s data is %x", __func__, regval); | ||
386 | if (ATEN2011_port->MsrLsr == 0) | ||
387 | handle_newMsr(ATEN2011_port, regval); | ||
388 | else if (ATEN2011_port->MsrLsr == 1) | ||
389 | handle_newLsr(ATEN2011_port, regval); | ||
390 | |||
391 | exit: | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | static int ATEN2011_get_reg(struct ATENINTL_port *ATEN, __u16 Wval, __u16 reg, | ||
396 | __u16 *val) | ||
397 | { | ||
398 | struct usb_device *dev = ATEN->port->serial->dev; | ||
399 | struct usb_ctrlrequest *dr = NULL; | ||
400 | unsigned char *buffer = NULL; | ||
401 | int ret = 0; | ||
402 | buffer = (__u8 *) ATEN->ctrl_buf; | ||
403 | |||
404 | dr = (void *)(buffer + 2); | ||
405 | dr->bRequestType = ATEN_RD_RTYPE; | ||
406 | dr->bRequest = ATEN_RDREQ; | ||
407 | dr->wValue = cpu_to_le16(Wval); | ||
408 | dr->wIndex = cpu_to_le16(reg); | ||
409 | dr->wLength = cpu_to_le16(2); | ||
410 | |||
411 | usb_fill_control_urb(ATEN->control_urb, dev, usb_rcvctrlpipe(dev, 0), | ||
412 | (unsigned char *)dr, buffer, 2, | ||
413 | ATEN2011_control_callback, ATEN); | ||
414 | ATEN->control_urb->transfer_buffer_length = 2; | ||
415 | ret = usb_submit_urb(ATEN->control_urb, GFP_ATOMIC); | ||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | static void ATEN2011_interrupt_callback(struct urb *urb) | ||
420 | { | ||
421 | int result; | ||
422 | int length; | ||
423 | struct ATENINTL_port *ATEN2011_port; | ||
424 | struct ATENINTL_serial *ATEN2011_serial; | ||
425 | struct usb_serial *serial; | ||
426 | __u16 Data; | ||
427 | unsigned char *data; | ||
428 | __u8 sp[5], st; | ||
429 | int i; | ||
430 | __u16 wval; | ||
431 | int minor; | ||
432 | |||
433 | dbg("%s", " : Entering"); | ||
434 | |||
435 | ATEN2011_serial = (struct ATENINTL_serial *)urb->context; | ||
436 | |||
437 | switch (urb->status) { | ||
438 | case 0: | ||
439 | /* success */ | ||
440 | break; | ||
441 | case -ECONNRESET: | ||
442 | case -ENOENT: | ||
443 | case -ESHUTDOWN: | ||
444 | /* this urb is terminated, clean up */ | ||
445 | dbg("%s - urb shutting down with status: %d", __func__, | ||
446 | urb->status); | ||
447 | return; | ||
448 | default: | ||
449 | dbg("%s - nonzero urb status received: %d", __func__, | ||
450 | urb->status); | ||
451 | goto exit; | ||
452 | } | ||
453 | length = urb->actual_length; | ||
454 | data = urb->transfer_buffer; | ||
455 | |||
456 | serial = ATEN2011_serial->serial; | ||
457 | |||
458 | /* ATENINTL get 5 bytes | ||
459 | * Byte 1 IIR Port 1 (port.number is 0) | ||
460 | * Byte 2 IIR Port 2 (port.number is 1) | ||
461 | * Byte 3 IIR Port 3 (port.number is 2) | ||
462 | * Byte 4 IIR Port 4 (port.number is 3) | ||
463 | * Byte 5 FIFO status for both */ | ||
464 | |||
465 | if (length && length > 5) { | ||
466 | dbg("%s", "Wrong data !!!"); | ||
467 | return; | ||
468 | } | ||
469 | |||
470 | /* MATRIX */ | ||
471 | if (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 4) { | ||
472 | sp[0] = (__u8) data[0]; | ||
473 | sp[1] = (__u8) data[1]; | ||
474 | sp[2] = (__u8) data[2]; | ||
475 | sp[3] = (__u8) data[3]; | ||
476 | st = (__u8) data[4]; | ||
477 | } else { | ||
478 | sp[0] = (__u8) data[0]; | ||
479 | sp[1] = (__u8) data[2]; | ||
480 | /* sp[2]=(__u8)data[2]; */ | ||
481 | /* sp[3]=(__u8)data[3]; */ | ||
482 | st = (__u8) data[4]; | ||
483 | |||
484 | } | ||
485 | for (i = 0; i < serial->num_ports; i++) { | ||
486 | ATEN2011_port = usb_get_serial_port_data(serial->port[i]); | ||
487 | minor = serial->minor; | ||
488 | if (minor == SERIAL_TTY_NO_MINOR) | ||
489 | minor = 0; | ||
490 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
491 | && (i != 0)) | ||
492 | wval = | ||
493 | (((__u16) serial->port[i]->number - | ||
494 | (__u16) (minor)) + 2) << 8; | ||
495 | else | ||
496 | wval = | ||
497 | (((__u16) serial->port[i]->number - | ||
498 | (__u16) (minor)) + 1) << 8; | ||
499 | if (ATEN2011_port->open != 0) { | ||
500 | if (sp[i] & 0x01) { | ||
501 | dbg("SP%d No Interrupt !!!", i); | ||
502 | } else { | ||
503 | switch (sp[i] & 0x0f) { | ||
504 | case SERIAL_IIR_RLS: | ||
505 | dbg("Serial Port %d: Receiver status error or address bit detected in 9-bit mode", i); | ||
506 | ATEN2011_port->MsrLsr = 1; | ||
507 | ATEN2011_get_reg(ATEN2011_port, wval, | ||
508 | LINE_STATUS_REGISTER, | ||
509 | &Data); | ||
510 | break; | ||
511 | case SERIAL_IIR_MS: | ||
512 | dbg("Serial Port %d: Modem status change", i); | ||
513 | ATEN2011_port->MsrLsr = 0; | ||
514 | ATEN2011_get_reg(ATEN2011_port, wval, | ||
515 | MODEM_STATUS_REGISTER, | ||
516 | &Data); | ||
517 | break; | ||
518 | } | ||
519 | } | ||
520 | } | ||
521 | |||
522 | } | ||
523 | exit: | ||
524 | if (ATEN2011_serial->status_polling_started == 0) | ||
525 | return; | ||
526 | |||
527 | result = usb_submit_urb(urb, GFP_ATOMIC); | ||
528 | if (result) { | ||
529 | dev_err(&urb->dev->dev, | ||
530 | "%s - Error %d submitting interrupt urb\n", | ||
531 | __func__, result); | ||
532 | } | ||
533 | |||
534 | return; | ||
535 | } | ||
536 | |||
537 | static void ATEN2011_bulk_in_callback(struct urb *urb) | ||
538 | { | ||
539 | int status; | ||
540 | unsigned char *data; | ||
541 | struct usb_serial *serial; | ||
542 | struct usb_serial_port *port; | ||
543 | struct ATENINTL_serial *ATEN2011_serial; | ||
544 | struct ATENINTL_port *ATEN2011_port; | ||
545 | struct tty_struct *tty; | ||
546 | |||
547 | if (urb->status) { | ||
548 | dbg("nonzero read bulk status received: %d", urb->status); | ||
549 | return; | ||
550 | } | ||
551 | |||
552 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
553 | |||
554 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
555 | serial = port->serial; | ||
556 | |||
557 | dbg("%s", "Entering..."); | ||
558 | |||
559 | data = urb->transfer_buffer; | ||
560 | ATEN2011_serial = usb_get_serial_data(serial); | ||
561 | |||
562 | if (urb->actual_length) { | ||
563 | tty = tty_port_tty_get(&ATEN2011_port->port->port); | ||
564 | if (tty) { | ||
565 | tty_buffer_request_room(tty, urb->actual_length); | ||
566 | tty_insert_flip_string(tty, data, urb->actual_length); | ||
567 | tty_flip_buffer_push(tty); | ||
568 | tty_kref_put(tty); | ||
569 | } | ||
570 | |||
571 | ATEN2011_port->icount.rx += urb->actual_length; | ||
572 | dbg("ATEN2011_port->icount.rx is %d:", | ||
573 | ATEN2011_port->icount.rx); | ||
574 | } | ||
575 | |||
576 | if (!ATEN2011_port->read_urb) { | ||
577 | dbg("%s", "URB KILLED !!!"); | ||
578 | return; | ||
579 | } | ||
580 | |||
581 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
582 | ATEN2011_port->read_urb->dev = serial->dev; | ||
583 | |||
584 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
585 | if (status) | ||
586 | dbg("usb_submit_urb(read bulk) failed, status = %d", status); | ||
587 | } | ||
588 | } | ||
589 | |||
590 | static void ATEN2011_bulk_out_data_callback(struct urb *urb) | ||
591 | { | ||
592 | struct ATENINTL_port *ATEN2011_port; | ||
593 | struct tty_struct *tty; | ||
594 | |||
595 | if (urb->status) { | ||
596 | dbg("nonzero write bulk status received:%d", urb->status); | ||
597 | return; | ||
598 | } | ||
599 | |||
600 | ATEN2011_port = (struct ATENINTL_port *)urb->context; | ||
601 | |||
602 | dbg("%s", "Entering ........."); | ||
603 | |||
604 | tty = tty_port_tty_get(&ATEN2011_port->port->port); | ||
605 | |||
606 | if (tty && ATEN2011_port->open) | ||
607 | /* tell the tty driver that something has changed */ | ||
608 | tty_wakeup(tty); | ||
609 | |||
610 | /* schedule_work(&ATEN2011_port->port->work); */ | ||
611 | tty_kref_put(tty); | ||
612 | |||
613 | } | ||
614 | |||
615 | #ifdef ATENSerialProbe | ||
616 | static int ATEN2011_serial_probe(struct usb_serial *serial, | ||
617 | const struct usb_device_id *id) | ||
618 | { | ||
619 | |||
620 | /*need to implement the mode_reg reading and updating\ | ||
621 | structures usb_serial_ device_type\ | ||
622 | (i.e num_ports, num_bulkin,bulkout etc) */ | ||
623 | /* Also we can update the changes attach */ | ||
624 | return 1; | ||
625 | } | ||
626 | #endif | ||
627 | |||
628 | static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port, | ||
629 | struct file *filp) | ||
630 | { | ||
631 | int response; | ||
632 | int j; | ||
633 | struct usb_serial *serial; | ||
634 | struct urb *urb; | ||
635 | __u16 Data; | ||
636 | int status; | ||
637 | struct ATENINTL_serial *ATEN2011_serial; | ||
638 | struct ATENINTL_port *ATEN2011_port; | ||
639 | struct ktermios tmp_termios; | ||
640 | int minor; | ||
641 | |||
642 | serial = port->serial; | ||
643 | |||
644 | ATEN2011_port = usb_get_serial_port_data(port); | ||
645 | |||
646 | if (ATEN2011_port == NULL) | ||
647 | return -ENODEV; | ||
648 | |||
649 | ATEN2011_serial = usb_get_serial_data(serial); | ||
650 | if (ATEN2011_serial == NULL) | ||
651 | return -ENODEV; | ||
652 | |||
653 | /* increment the number of opened ports counter here */ | ||
654 | ATEN2011_serial->NoOfOpenPorts++; | ||
655 | |||
656 | usb_clear_halt(serial->dev, port->write_urb->pipe); | ||
657 | usb_clear_halt(serial->dev, port->read_urb->pipe); | ||
658 | |||
659 | /* Initialising the write urb pool */ | ||
660 | for (j = 0; j < NUM_URBS; ++j) { | ||
661 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
662 | ATEN2011_port->write_urb_pool[j] = urb; | ||
663 | |||
664 | if (urb == NULL) { | ||
665 | err("No more urbs???"); | ||
666 | continue; | ||
667 | } | ||
668 | |||
669 | urb->transfer_buffer = NULL; | ||
670 | urb->transfer_buffer = | ||
671 | kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); | ||
672 | if (!urb->transfer_buffer) { | ||
673 | err("%s-out of memory for urb buffers.", __func__); | ||
674 | continue; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | /***************************************************************************** | ||
679 | * Initialize ATEN2011 -- Write Init values to corresponding Registers | ||
680 | * | ||
681 | * Register Index | ||
682 | * 1 : IER | ||
683 | * 2 : FCR | ||
684 | * 3 : LCR | ||
685 | * 4 : MCR | ||
686 | * | ||
687 | * 0x08 : SP1/2 Control Reg | ||
688 | *****************************************************************************/ | ||
689 | |||
690 | /* NEED to check the fallowing Block */ | ||
691 | |||
692 | Data = 0x0; | ||
693 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
694 | if (status < 0) { | ||
695 | dbg("Reading Spreg failed"); | ||
696 | return -1; | ||
697 | } | ||
698 | Data |= 0x80; | ||
699 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
700 | if (status < 0) { | ||
701 | dbg("writing Spreg failed"); | ||
702 | return -1; | ||
703 | } | ||
704 | |||
705 | Data &= ~0x80; | ||
706 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
707 | if (status < 0) { | ||
708 | dbg("writing Spreg failed"); | ||
709 | return -1; | ||
710 | } | ||
711 | |||
712 | /* End of block to be checked */ | ||
713 | /**************************CHECK***************************/ | ||
714 | |||
715 | if (RS485mode == 0) | ||
716 | Data = 0xC0; | ||
717 | else | ||
718 | Data = 0x00; | ||
719 | status = set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); | ||
720 | if (status < 0) { | ||
721 | dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status); | ||
722 | return -1; | ||
723 | } else | ||
724 | dbg("SCRATCH_PAD_REGISTER Writing success status%d", status); | ||
725 | |||
726 | /**************************CHECK***************************/ | ||
727 | |||
728 | Data = 0x0; | ||
729 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
730 | if (status < 0) { | ||
731 | dbg("Reading Controlreg failed"); | ||
732 | return -1; | ||
733 | } | ||
734 | Data |= 0x08; /* Driver done bit */ | ||
735 | Data |= 0x20; /* rx_disable */ | ||
736 | status = 0; | ||
737 | status = | ||
738 | set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
739 | if (status < 0) { | ||
740 | dbg("writing Controlreg failed"); | ||
741 | return -1; | ||
742 | } | ||
743 | /* | ||
744 | * do register settings here | ||
745 | * Set all regs to the device default values. | ||
746 | * First Disable all interrupts. | ||
747 | */ | ||
748 | |||
749 | Data = 0x00; | ||
750 | status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
751 | if (status < 0) { | ||
752 | dbg("disableing interrupts failed"); | ||
753 | return -1; | ||
754 | } | ||
755 | /* Set FIFO_CONTROL_REGISTER to the default value */ | ||
756 | Data = 0x00; | ||
757 | status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
758 | if (status < 0) { | ||
759 | dbg("Writing FIFO_CONTROL_REGISTER failed"); | ||
760 | return -1; | ||
761 | } | ||
762 | |||
763 | Data = 0xcf; /* chk */ | ||
764 | status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
765 | if (status < 0) { | ||
766 | dbg("Writing FIFO_CONTROL_REGISTER failed"); | ||
767 | return -1; | ||
768 | } | ||
769 | |||
770 | Data = 0x03; /* LCR_BITS_8 */ | ||
771 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
772 | ATEN2011_port->shadowLCR = Data; | ||
773 | |||
774 | Data = 0x0b; /* MCR_DTR|MCR_RTS|MCR_MASTER_IE */ | ||
775 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
776 | ATEN2011_port->shadowMCR = Data; | ||
777 | |||
778 | #ifdef Check | ||
779 | Data = 0x00; | ||
780 | status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); | ||
781 | ATEN2011_port->shadowLCR = Data; | ||
782 | |||
783 | Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ | ||
784 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
785 | |||
786 | Data = 0x0c; | ||
787 | status = set_uart_reg(port, DIVISOR_LATCH_LSB, Data); | ||
788 | |||
789 | Data = 0x0; | ||
790 | status = set_uart_reg(port, DIVISOR_LATCH_MSB, Data); | ||
791 | |||
792 | Data = 0x00; | ||
793 | status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); | ||
794 | |||
795 | /* Data = ATEN2011_port->shadowLCR; */ /* data latch disable */ | ||
796 | Data = Data & ~SERIAL_LCR_DLAB; | ||
797 | status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
798 | ATEN2011_port->shadowLCR = Data; | ||
799 | #endif | ||
800 | /* clearing Bulkin and Bulkout Fifo */ | ||
801 | Data = 0x0; | ||
802 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
803 | |||
804 | Data = Data | 0x0c; | ||
805 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
806 | |||
807 | Data = Data & ~0x0c; | ||
808 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
809 | /* Finally enable all interrupts */ | ||
810 | Data = 0x0; | ||
811 | Data = 0x0c; | ||
812 | status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
813 | |||
814 | /* clearing rx_disable */ | ||
815 | Data = 0x0; | ||
816 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
817 | Data = Data & ~0x20; | ||
818 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
819 | |||
820 | /* rx_negate */ | ||
821 | Data = 0x0; | ||
822 | status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data); | ||
823 | Data = Data | 0x10; | ||
824 | status = 0; | ||
825 | status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data); | ||
826 | |||
827 | /* | ||
828 | * Check to see if we've set up our endpoint info yet | ||
829 | * (can't set it up in ATEN2011_startup as the structures | ||
830 | * were not set up at that time.) | ||
831 | */ | ||
832 | if (ATEN2011_serial->NoOfOpenPorts == 1) { | ||
833 | /* start the status polling here */ | ||
834 | ATEN2011_serial->status_polling_started = 1; | ||
835 | /* If not yet set, Set here */ | ||
836 | ATEN2011_serial->interrupt_in_buffer = | ||
837 | serial->port[0]->interrupt_in_buffer; | ||
838 | ATEN2011_serial->interrupt_in_endpoint = | ||
839 | serial->port[0]->interrupt_in_endpointAddress; | ||
840 | ATEN2011_serial->interrupt_read_urb = | ||
841 | serial->port[0]->interrupt_in_urb; | ||
842 | |||
843 | /* set up interrupt urb */ | ||
844 | usb_fill_int_urb(ATEN2011_serial->interrupt_read_urb, | ||
845 | serial->dev, | ||
846 | usb_rcvintpipe(serial->dev, | ||
847 | ATEN2011_serial-> | ||
848 | interrupt_in_endpoint), | ||
849 | ATEN2011_serial->interrupt_in_buffer, | ||
850 | ATEN2011_serial->interrupt_read_urb-> | ||
851 | transfer_buffer_length, | ||
852 | ATEN2011_interrupt_callback, ATEN2011_serial, | ||
853 | ATEN2011_serial->interrupt_read_urb->interval); | ||
854 | |||
855 | /* start interrupt read for ATEN2011 * | ||
856 | * will continue as long as ATEN2011 is connected */ | ||
857 | |||
858 | response = | ||
859 | usb_submit_urb(ATEN2011_serial->interrupt_read_urb, | ||
860 | GFP_KERNEL); | ||
861 | if (response) { | ||
862 | dbg("%s - Error %d submitting interrupt urb", | ||
863 | __func__, response); | ||
864 | } | ||
865 | |||
866 | } | ||
867 | |||
868 | /* | ||
869 | * See if we've set up our endpoint info yet | ||
870 | * (can't set it up in ATEN2011_startup as the | ||
871 | * structures were not set up at that time.) | ||
872 | */ | ||
873 | |||
874 | dbg("port number is %d", port->number); | ||
875 | dbg("serial number is %d", port->serial->minor); | ||
876 | dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress); | ||
877 | dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress); | ||
878 | dbg("Interrupt endpoint is %d", | ||
879 | port->interrupt_in_endpointAddress); | ||
880 | dbg("port's number in the device is %d", ATEN2011_port->port_num); | ||
881 | ATEN2011_port->bulk_in_buffer = port->bulk_in_buffer; | ||
882 | ATEN2011_port->bulk_in_endpoint = port->bulk_in_endpointAddress; | ||
883 | ATEN2011_port->read_urb = port->read_urb; | ||
884 | ATEN2011_port->bulk_out_endpoint = port->bulk_out_endpointAddress; | ||
885 | |||
886 | minor = port->serial->minor; | ||
887 | if (minor == SERIAL_TTY_NO_MINOR) | ||
888 | minor = 0; | ||
889 | |||
890 | /* set up our bulk in urb */ | ||
891 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
892 | && (((__u16) port->number - (__u16) (minor)) != 0)) { | ||
893 | usb_fill_bulk_urb(ATEN2011_port->read_urb, serial->dev, | ||
894 | usb_rcvbulkpipe(serial->dev, | ||
895 | (port-> | ||
896 | bulk_in_endpointAddress + | ||
897 | 2)), port->bulk_in_buffer, | ||
898 | ATEN2011_port->read_urb-> | ||
899 | transfer_buffer_length, | ||
900 | ATEN2011_bulk_in_callback, ATEN2011_port); | ||
901 | } else | ||
902 | usb_fill_bulk_urb(ATEN2011_port->read_urb, | ||
903 | serial->dev, | ||
904 | usb_rcvbulkpipe(serial->dev, | ||
905 | port-> | ||
906 | bulk_in_endpointAddress), | ||
907 | port->bulk_in_buffer, | ||
908 | ATEN2011_port->read_urb-> | ||
909 | transfer_buffer_length, | ||
910 | ATEN2011_bulk_in_callback, ATEN2011_port); | ||
911 | |||
912 | dbg("ATEN2011_open: bulkin endpoint is %d", | ||
913 | port->bulk_in_endpointAddress); | ||
914 | response = usb_submit_urb(ATEN2011_port->read_urb, GFP_KERNEL); | ||
915 | if (response) { | ||
916 | err("%s - Error %d submitting control urb", __func__, | ||
917 | response); | ||
918 | } | ||
919 | |||
920 | /* initialize our wait queues */ | ||
921 | init_waitqueue_head(&ATEN2011_port->wait_chase); | ||
922 | init_waitqueue_head(&ATEN2011_port->wait_command); | ||
923 | |||
924 | /* initialize our icount structure */ | ||
925 | memset(&(ATEN2011_port->icount), 0x00, sizeof(ATEN2011_port->icount)); | ||
926 | |||
927 | /* initialize our port settings */ | ||
928 | ATEN2011_port->shadowMCR = MCR_MASTER_IE; /* Must set to enable ints! */ | ||
929 | ATEN2011_port->chaseResponsePending = 0; | ||
930 | /* send a open port command */ | ||
931 | ATEN2011_port->open = 1; | ||
932 | /* ATEN2011_change_port_settings(ATEN2011_port,old_termios); */ | ||
933 | /* Setup termios */ | ||
934 | ATEN2011_set_termios(tty, port, &tmp_termios); | ||
935 | ATEN2011_port->icount.tx = 0; | ||
936 | ATEN2011_port->icount.rx = 0; | ||
937 | |||
938 | dbg("usb_serial serial:%x ATEN2011_port:%x\nATEN2011_serial:%x usb_serial_port port:%x", | ||
939 | (unsigned int)serial, (unsigned int)ATEN2011_port, | ||
940 | (unsigned int)ATEN2011_serial, (unsigned int)port); | ||
941 | |||
942 | return 0; | ||
943 | |||
944 | } | ||
945 | |||
946 | static int ATEN2011_chars_in_buffer(struct tty_struct *tty) | ||
947 | { | ||
948 | struct usb_serial_port *port = tty->driver_data; | ||
949 | int i; | ||
950 | int chars = 0; | ||
951 | struct ATENINTL_port *ATEN2011_port; | ||
952 | |||
953 | /* dbg("%s"," ATEN2011_chars_in_buffer:entering ..........."); */ | ||
954 | |||
955 | ATEN2011_port = usb_get_serial_port_data(port); | ||
956 | if (ATEN2011_port == NULL) { | ||
957 | dbg("%s", "ATEN2011_break:leaving ..........."); | ||
958 | return -1; | ||
959 | } | ||
960 | |||
961 | for (i = 0; i < NUM_URBS; ++i) | ||
962 | if (ATEN2011_port->write_urb_pool[i]->status == -EINPROGRESS) | ||
963 | chars += URB_TRANSFER_BUFFER_SIZE; | ||
964 | |||
965 | dbg("%s - returns %d", __func__, chars); | ||
966 | return chars; | ||
967 | |||
968 | } | ||
969 | |||
970 | static void ATEN2011_block_until_tx_empty(struct tty_struct *tty, | ||
971 | struct ATENINTL_port *ATEN2011_port) | ||
972 | { | ||
973 | int timeout = HZ / 10; | ||
974 | int wait = 30; | ||
975 | int count; | ||
976 | |||
977 | while (1) { | ||
978 | count = ATEN2011_chars_in_buffer(tty); | ||
979 | |||
980 | /* Check for Buffer status */ | ||
981 | if (count <= 0) | ||
982 | return; | ||
983 | |||
984 | /* Block the thread for a while */ | ||
985 | interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, | ||
986 | timeout); | ||
987 | |||
988 | /* No activity.. count down section */ | ||
989 | wait--; | ||
990 | if (wait == 0) { | ||
991 | dbg("%s - TIMEOUT", __func__); | ||
992 | return; | ||
993 | } else { | ||
994 | /* Reset timout value back to seconds */ | ||
995 | wait = 30; | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void ATEN2011_close(struct tty_struct *tty, struct usb_serial_port *port, | ||
1001 | struct file *filp) | ||
1002 | { | ||
1003 | struct usb_serial *serial; | ||
1004 | struct ATENINTL_serial *ATEN2011_serial; | ||
1005 | struct ATENINTL_port *ATEN2011_port; | ||
1006 | int no_urbs; | ||
1007 | __u16 Data; | ||
1008 | |||
1009 | dbg("%s", "ATEN2011_close:entering..."); | ||
1010 | serial = port->serial; | ||
1011 | |||
1012 | /* take the Adpater and port's private data */ | ||
1013 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1014 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1015 | if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) | ||
1016 | return; | ||
1017 | |||
1018 | if (serial->dev) { | ||
1019 | /* flush and block(wait) until tx is empty */ | ||
1020 | ATEN2011_block_until_tx_empty(tty, ATEN2011_port); | ||
1021 | } | ||
1022 | /* kill the ports URB's */ | ||
1023 | for (no_urbs = 0; no_urbs < NUM_URBS; no_urbs++) | ||
1024 | usb_kill_urb(ATEN2011_port->write_urb_pool[no_urbs]); | ||
1025 | /* Freeing Write URBs */ | ||
1026 | for (no_urbs = 0; no_urbs < NUM_URBS; ++no_urbs) { | ||
1027 | kfree(ATEN2011_port->write_urb_pool[no_urbs]->transfer_buffer); | ||
1028 | usb_free_urb(ATEN2011_port->write_urb_pool[no_urbs]); | ||
1029 | } | ||
1030 | /* While closing port, shutdown all bulk read, write * | ||
1031 | * and interrupt read if they exists */ | ||
1032 | if (serial->dev) { | ||
1033 | if (ATEN2011_port->write_urb) { | ||
1034 | dbg("%s", "Shutdown bulk write"); | ||
1035 | usb_kill_urb(ATEN2011_port->write_urb); | ||
1036 | } | ||
1037 | if (ATEN2011_port->read_urb) { | ||
1038 | dbg("%s", "Shutdown bulk read"); | ||
1039 | usb_kill_urb(ATEN2011_port->read_urb); | ||
1040 | } | ||
1041 | if ((&ATEN2011_port->control_urb)) { | ||
1042 | dbg("%s", "Shutdown control read"); | ||
1043 | /* usb_kill_urb (ATEN2011_port->control_urb); */ | ||
1044 | |||
1045 | } | ||
1046 | } | ||
1047 | /* if(ATEN2011_port->ctrl_buf != NULL) */ | ||
1048 | /* kfree(ATEN2011_port->ctrl_buf); */ | ||
1049 | /* decrement the no.of open ports counter of an individual USB-serial adapter. */ | ||
1050 | ATEN2011_serial->NoOfOpenPorts--; | ||
1051 | dbg("NoOfOpenPorts in close%d:in port%d", | ||
1052 | ATEN2011_serial->NoOfOpenPorts, port->number); | ||
1053 | if (ATEN2011_serial->NoOfOpenPorts == 0) { | ||
1054 | /* stop the stus polling here */ | ||
1055 | ATEN2011_serial->status_polling_started = 0; | ||
1056 | if (ATEN2011_serial->interrupt_read_urb) { | ||
1057 | dbg("%s", "Shutdown interrupt_read_urb"); | ||
1058 | /* ATEN2011_serial->interrupt_in_buffer=NULL; */ | ||
1059 | /* usb_kill_urb (ATEN2011_serial->interrupt_read_urb); */ | ||
1060 | } | ||
1061 | } | ||
1062 | if (ATEN2011_port->write_urb) { | ||
1063 | /* if this urb had a transfer buffer already (old tx) free it */ | ||
1064 | kfree(ATEN2011_port->write_urb->transfer_buffer); | ||
1065 | usb_free_urb(ATEN2011_port->write_urb); | ||
1066 | } | ||
1067 | |||
1068 | /* clear the MCR & IER */ | ||
1069 | Data = 0x00; | ||
1070 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1071 | Data = 0x00; | ||
1072 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
1073 | |||
1074 | ATEN2011_port->open = 0; | ||
1075 | dbg("%s", "Leaving ............"); | ||
1076 | |||
1077 | } | ||
1078 | |||
1079 | static void ATEN2011_block_until_chase_response(struct tty_struct *tty, | ||
1080 | struct ATENINTL_port | ||
1081 | *ATEN2011_port) | ||
1082 | { | ||
1083 | int timeout = 1 * HZ; | ||
1084 | int wait = 10; | ||
1085 | int count; | ||
1086 | |||
1087 | while (1) { | ||
1088 | count = ATEN2011_chars_in_buffer(tty); | ||
1089 | |||
1090 | /* Check for Buffer status */ | ||
1091 | if (count <= 0) { | ||
1092 | ATEN2011_port->chaseResponsePending = 0; | ||
1093 | return; | ||
1094 | } | ||
1095 | |||
1096 | /* Block the thread for a while */ | ||
1097 | interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase, | ||
1098 | timeout); | ||
1099 | /* No activity.. count down section */ | ||
1100 | wait--; | ||
1101 | if (wait == 0) { | ||
1102 | dbg("%s - TIMEOUT", __func__); | ||
1103 | return; | ||
1104 | } else { | ||
1105 | /* Reset timout value back to seconds */ | ||
1106 | wait = 10; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | } | ||
1111 | |||
1112 | static void ATEN2011_break(struct tty_struct *tty, int break_state) | ||
1113 | { | ||
1114 | struct usb_serial_port *port = tty->driver_data; | ||
1115 | unsigned char data; | ||
1116 | struct usb_serial *serial; | ||
1117 | struct ATENINTL_serial *ATEN2011_serial; | ||
1118 | struct ATENINTL_port *ATEN2011_port; | ||
1119 | |||
1120 | dbg("%s", "Entering ..........."); | ||
1121 | dbg("ATEN2011_break: Start"); | ||
1122 | |||
1123 | serial = port->serial; | ||
1124 | |||
1125 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1126 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1127 | |||
1128 | if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL)) | ||
1129 | return; | ||
1130 | |||
1131 | /* flush and chase */ | ||
1132 | ATEN2011_port->chaseResponsePending = 1; | ||
1133 | |||
1134 | if (serial->dev) { | ||
1135 | /* flush and block until tx is empty */ | ||
1136 | ATEN2011_block_until_chase_response(tty, ATEN2011_port); | ||
1137 | } | ||
1138 | |||
1139 | if (break_state == -1) | ||
1140 | data = ATEN2011_port->shadowLCR | LCR_SET_BREAK; | ||
1141 | else | ||
1142 | data = ATEN2011_port->shadowLCR & ~LCR_SET_BREAK; | ||
1143 | |||
1144 | ATEN2011_port->shadowLCR = data; | ||
1145 | dbg("ATEN2011_break ATEN2011_port->shadowLCR is %x", | ||
1146 | ATEN2011_port->shadowLCR); | ||
1147 | set_uart_reg(port, LINE_CONTROL_REGISTER, ATEN2011_port->shadowLCR); | ||
1148 | |||
1149 | return; | ||
1150 | } | ||
1151 | |||
1152 | static int ATEN2011_write_room(struct tty_struct *tty) | ||
1153 | { | ||
1154 | struct usb_serial_port *port = tty->driver_data; | ||
1155 | int i; | ||
1156 | int room = 0; | ||
1157 | struct ATENINTL_port *ATEN2011_port; | ||
1158 | |||
1159 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1160 | if (ATEN2011_port == NULL) { | ||
1161 | dbg("%s", "ATEN2011_break:leaving ..........."); | ||
1162 | return -1; | ||
1163 | } | ||
1164 | |||
1165 | for (i = 0; i < NUM_URBS; ++i) | ||
1166 | if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) | ||
1167 | room += URB_TRANSFER_BUFFER_SIZE; | ||
1168 | |||
1169 | dbg("%s - returns %d", __func__, room); | ||
1170 | return room; | ||
1171 | |||
1172 | } | ||
1173 | |||
1174 | static int ATEN2011_write(struct tty_struct *tty, struct usb_serial_port *port, | ||
1175 | const unsigned char *data, int count) | ||
1176 | { | ||
1177 | int status; | ||
1178 | int i; | ||
1179 | int bytes_sent = 0; | ||
1180 | int transfer_size; | ||
1181 | int minor; | ||
1182 | |||
1183 | struct ATENINTL_port *ATEN2011_port; | ||
1184 | struct usb_serial *serial; | ||
1185 | struct ATENINTL_serial *ATEN2011_serial; | ||
1186 | struct urb *urb; | ||
1187 | const unsigned char *current_position = data; | ||
1188 | unsigned char *data1; | ||
1189 | dbg("%s", "entering ..........."); | ||
1190 | |||
1191 | serial = port->serial; | ||
1192 | |||
1193 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1194 | if (ATEN2011_port == NULL) { | ||
1195 | dbg("%s", "ATEN2011_port is NULL"); | ||
1196 | return -1; | ||
1197 | } | ||
1198 | |||
1199 | ATEN2011_serial = usb_get_serial_data(serial); | ||
1200 | if (ATEN2011_serial == NULL) { | ||
1201 | dbg("%s", "ATEN2011_serial is NULL"); | ||
1202 | return -1; | ||
1203 | } | ||
1204 | |||
1205 | /* try to find a free urb in the list */ | ||
1206 | urb = NULL; | ||
1207 | |||
1208 | for (i = 0; i < NUM_URBS; ++i) { | ||
1209 | if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) { | ||
1210 | urb = ATEN2011_port->write_urb_pool[i]; | ||
1211 | dbg("URB:%d", i); | ||
1212 | break; | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | if (urb == NULL) { | ||
1217 | dbg("%s - no more free urbs", __func__); | ||
1218 | goto exit; | ||
1219 | } | ||
1220 | |||
1221 | if (urb->transfer_buffer == NULL) { | ||
1222 | urb->transfer_buffer = | ||
1223 | kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); | ||
1224 | |||
1225 | if (urb->transfer_buffer == NULL) { | ||
1226 | err("%s no more kernel memory...", __func__); | ||
1227 | goto exit; | ||
1228 | } | ||
1229 | } | ||
1230 | transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); | ||
1231 | |||
1232 | memcpy(urb->transfer_buffer, current_position, transfer_size); | ||
1233 | /* usb_serial_debug_data (__FILE__, __func__, transfer_size, urb->transfer_buffer); */ | ||
1234 | |||
1235 | /* fill urb with data and submit */ | ||
1236 | minor = port->serial->minor; | ||
1237 | if (minor == SERIAL_TTY_NO_MINOR) | ||
1238 | minor = 0; | ||
1239 | if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2) | ||
1240 | && (((__u16) port->number - (__u16) (minor)) != 0)) { | ||
1241 | usb_fill_bulk_urb(urb, ATEN2011_serial->serial->dev, | ||
1242 | usb_sndbulkpipe(ATEN2011_serial->serial->dev, | ||
1243 | (port-> | ||
1244 | bulk_out_endpointAddress) + | ||
1245 | 2), urb->transfer_buffer, | ||
1246 | transfer_size, | ||
1247 | ATEN2011_bulk_out_data_callback, | ||
1248 | ATEN2011_port); | ||
1249 | } else | ||
1250 | |||
1251 | usb_fill_bulk_urb(urb, | ||
1252 | ATEN2011_serial->serial->dev, | ||
1253 | usb_sndbulkpipe(ATEN2011_serial->serial->dev, | ||
1254 | port-> | ||
1255 | bulk_out_endpointAddress), | ||
1256 | urb->transfer_buffer, transfer_size, | ||
1257 | ATEN2011_bulk_out_data_callback, | ||
1258 | ATEN2011_port); | ||
1259 | |||
1260 | data1 = urb->transfer_buffer; | ||
1261 | dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); | ||
1262 | /* for(i=0;i < urb->actual_length;i++) */ | ||
1263 | /* dbg("Data is %c ",data1[i]); */ | ||
1264 | |||
1265 | /* send it down the pipe */ | ||
1266 | status = usb_submit_urb(urb, GFP_ATOMIC); | ||
1267 | |||
1268 | if (status) { | ||
1269 | err("%s - usb_submit_urb(write bulk) failed with status = %d", | ||
1270 | __func__, status); | ||
1271 | bytes_sent = status; | ||
1272 | goto exit; | ||
1273 | } | ||
1274 | bytes_sent = transfer_size; | ||
1275 | ATEN2011_port->icount.tx += transfer_size; | ||
1276 | dbg("ATEN2011_port->icount.tx is %d:", ATEN2011_port->icount.tx); | ||
1277 | |||
1278 | exit: | ||
1279 | return bytes_sent; | ||
1280 | } | ||
1281 | |||
1282 | static void ATEN2011_throttle(struct tty_struct *tty) | ||
1283 | { | ||
1284 | struct usb_serial_port *port = tty->driver_data; | ||
1285 | struct ATENINTL_port *ATEN2011_port; | ||
1286 | int status; | ||
1287 | |||
1288 | dbg("- port %d", port->number); | ||
1289 | |||
1290 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1291 | |||
1292 | if (ATEN2011_port == NULL) | ||
1293 | return; | ||
1294 | |||
1295 | if (!ATEN2011_port->open) { | ||
1296 | dbg("%s", "port not opened"); | ||
1297 | return; | ||
1298 | } | ||
1299 | |||
1300 | dbg("%s", "Entering .......... "); | ||
1301 | |||
1302 | if (!tty) { | ||
1303 | dbg("%s - no tty available", __func__); | ||
1304 | return; | ||
1305 | } | ||
1306 | |||
1307 | /* if we are implementing XON/XOFF, send the stop character */ | ||
1308 | if (I_IXOFF(tty)) { | ||
1309 | unsigned char stop_char = STOP_CHAR(tty); | ||
1310 | status = ATEN2011_write(tty, port, &stop_char, 1); | ||
1311 | if (status <= 0) | ||
1312 | return; | ||
1313 | } | ||
1314 | |||
1315 | /* if we are implementing RTS/CTS, toggle that line */ | ||
1316 | if (tty->termios->c_cflag & CRTSCTS) { | ||
1317 | ATEN2011_port->shadowMCR &= ~MCR_RTS; | ||
1318 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, | ||
1319 | ATEN2011_port->shadowMCR); | ||
1320 | if (status < 0) | ||
1321 | return; | ||
1322 | } | ||
1323 | |||
1324 | return; | ||
1325 | } | ||
1326 | |||
1327 | static void ATEN2011_unthrottle(struct tty_struct *tty) | ||
1328 | { | ||
1329 | struct usb_serial_port *port = tty->driver_data; | ||
1330 | int status; | ||
1331 | struct ATENINTL_port *ATEN2011_port = usb_get_serial_port_data(port); | ||
1332 | |||
1333 | if (ATEN2011_port == NULL) | ||
1334 | return; | ||
1335 | |||
1336 | if (!ATEN2011_port->open) { | ||
1337 | dbg("%s - port not opened", __func__); | ||
1338 | return; | ||
1339 | } | ||
1340 | |||
1341 | dbg("%s", "Entering .......... "); | ||
1342 | |||
1343 | if (!tty) { | ||
1344 | dbg("%s - no tty available", __func__); | ||
1345 | return; | ||
1346 | } | ||
1347 | |||
1348 | /* if we are implementing XON/XOFF, send the start character */ | ||
1349 | if (I_IXOFF(tty)) { | ||
1350 | unsigned char start_char = START_CHAR(tty); | ||
1351 | status = ATEN2011_write(tty, port, &start_char, 1); | ||
1352 | if (status <= 0) | ||
1353 | return; | ||
1354 | } | ||
1355 | |||
1356 | /* if we are implementing RTS/CTS, toggle that line */ | ||
1357 | if (tty->termios->c_cflag & CRTSCTS) { | ||
1358 | ATEN2011_port->shadowMCR |= MCR_RTS; | ||
1359 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, | ||
1360 | ATEN2011_port->shadowMCR); | ||
1361 | if (status < 0) | ||
1362 | return; | ||
1363 | } | ||
1364 | |||
1365 | return; | ||
1366 | } | ||
1367 | |||
1368 | static int ATEN2011_tiocmget(struct tty_struct *tty, struct file *file) | ||
1369 | { | ||
1370 | struct usb_serial_port *port = tty->driver_data; | ||
1371 | struct ATENINTL_port *ATEN2011_port; | ||
1372 | unsigned int result; | ||
1373 | __u16 msr; | ||
1374 | __u16 mcr; | ||
1375 | /* unsigned int mcr; */ | ||
1376 | int status = 0; | ||
1377 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1378 | |||
1379 | dbg("%s - port %d", __func__, port->number); | ||
1380 | |||
1381 | if (ATEN2011_port == NULL) | ||
1382 | return -ENODEV; | ||
1383 | |||
1384 | status = get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); | ||
1385 | status = get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); | ||
1386 | /* mcr = ATEN2011_port->shadowMCR; */ | ||
1387 | /* COMMENT2: the Fallowing three line are commented for updating only MSR values */ | ||
1388 | result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ||
1389 | | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ||
1390 | | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) | ||
1391 | | ((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) | ||
1392 | | ((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) | ||
1393 | | ((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) | ||
1394 | | ((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); | ||
1395 | |||
1396 | dbg("%s - 0x%04X", __func__, result); | ||
1397 | |||
1398 | return result; | ||
1399 | } | ||
1400 | |||
1401 | static int ATEN2011_tiocmset(struct tty_struct *tty, struct file *file, | ||
1402 | unsigned int set, unsigned int clear) | ||
1403 | { | ||
1404 | struct usb_serial_port *port = tty->driver_data; | ||
1405 | struct ATENINTL_port *ATEN2011_port; | ||
1406 | unsigned int mcr; | ||
1407 | unsigned int status; | ||
1408 | |||
1409 | dbg("%s - port %d", __func__, port->number); | ||
1410 | |||
1411 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1412 | |||
1413 | if (ATEN2011_port == NULL) | ||
1414 | return -ENODEV; | ||
1415 | |||
1416 | mcr = ATEN2011_port->shadowMCR; | ||
1417 | if (clear & TIOCM_RTS) | ||
1418 | mcr &= ~MCR_RTS; | ||
1419 | if (clear & TIOCM_DTR) | ||
1420 | mcr &= ~MCR_DTR; | ||
1421 | if (clear & TIOCM_LOOP) | ||
1422 | mcr &= ~MCR_LOOPBACK; | ||
1423 | |||
1424 | if (set & TIOCM_RTS) | ||
1425 | mcr |= MCR_RTS; | ||
1426 | if (set & TIOCM_DTR) | ||
1427 | mcr |= MCR_DTR; | ||
1428 | if (set & TIOCM_LOOP) | ||
1429 | mcr |= MCR_LOOPBACK; | ||
1430 | |||
1431 | ATEN2011_port->shadowMCR = mcr; | ||
1432 | |||
1433 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); | ||
1434 | if (status < 0) { | ||
1435 | dbg("setting MODEM_CONTROL_REGISTER Failed"); | ||
1436 | return -1; | ||
1437 | } | ||
1438 | |||
1439 | return 0; | ||
1440 | } | ||
1441 | |||
1442 | static void ATEN2011_set_termios(struct tty_struct *tty, | ||
1443 | struct usb_serial_port *port, | ||
1444 | struct ktermios *old_termios) | ||
1445 | { | ||
1446 | int status; | ||
1447 | unsigned int cflag; | ||
1448 | struct usb_serial *serial; | ||
1449 | struct ATENINTL_port *ATEN2011_port; | ||
1450 | |||
1451 | dbg("ATEN2011_set_termios: START"); | ||
1452 | |||
1453 | serial = port->serial; | ||
1454 | |||
1455 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1456 | |||
1457 | if (ATEN2011_port == NULL) | ||
1458 | return; | ||
1459 | |||
1460 | if (!ATEN2011_port->open) { | ||
1461 | dbg("%s - port not opened", __func__); | ||
1462 | return; | ||
1463 | } | ||
1464 | |||
1465 | dbg("%s", "setting termios - "); | ||
1466 | |||
1467 | cflag = tty->termios->c_cflag; | ||
1468 | |||
1469 | dbg("%s - cflag %08x iflag %08x", __func__, | ||
1470 | tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); | ||
1471 | |||
1472 | if (old_termios) { | ||
1473 | dbg("%s - old clfag %08x old iflag %08x", __func__, | ||
1474 | old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); | ||
1475 | } | ||
1476 | |||
1477 | dbg("%s - port %d", __func__, port->number); | ||
1478 | |||
1479 | /* change the port settings to the new ones specified */ | ||
1480 | |||
1481 | ATEN2011_change_port_settings(tty, ATEN2011_port, old_termios); | ||
1482 | |||
1483 | if (!ATEN2011_port->read_urb) { | ||
1484 | dbg("%s", "URB KILLED !!!!!"); | ||
1485 | return; | ||
1486 | } | ||
1487 | |||
1488 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
1489 | ATEN2011_port->read_urb->dev = serial->dev; | ||
1490 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
1491 | if (status) { | ||
1492 | dbg | ||
1493 | (" usb_submit_urb(read bulk) failed, status = %d", | ||
1494 | status); | ||
1495 | } | ||
1496 | } | ||
1497 | return; | ||
1498 | } | ||
1499 | |||
1500 | static int get_lsr_info(struct tty_struct *tty, | ||
1501 | struct ATENINTL_port *ATEN2011_port, | ||
1502 | unsigned int __user *value) | ||
1503 | { | ||
1504 | int count; | ||
1505 | unsigned int result = 0; | ||
1506 | |||
1507 | count = ATEN2011_chars_in_buffer(tty); | ||
1508 | if (count == 0) { | ||
1509 | dbg("%s -- Empty", __func__); | ||
1510 | result = TIOCSER_TEMT; | ||
1511 | } | ||
1512 | |||
1513 | if (copy_to_user(value, &result, sizeof(int))) | ||
1514 | return -EFAULT; | ||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int get_number_bytes_avail(struct tty_struct *tty, | ||
1519 | struct ATENINTL_port *ATEN2011_port, | ||
1520 | unsigned int __user *value) | ||
1521 | { | ||
1522 | unsigned int result = 0; | ||
1523 | |||
1524 | if (!tty) | ||
1525 | return -ENOIOCTLCMD; | ||
1526 | |||
1527 | result = tty->read_cnt; | ||
1528 | |||
1529 | dbg("%s(%d) = %d", __func__, ATEN2011_port->port->number, result); | ||
1530 | if (copy_to_user(value, &result, sizeof(int))) | ||
1531 | return -EFAULT; | ||
1532 | |||
1533 | return -ENOIOCTLCMD; | ||
1534 | } | ||
1535 | |||
1536 | static int set_modem_info(struct ATENINTL_port *ATEN2011_port, unsigned int cmd, | ||
1537 | unsigned int __user *value) | ||
1538 | { | ||
1539 | unsigned int mcr; | ||
1540 | unsigned int arg; | ||
1541 | __u16 Data; | ||
1542 | int status; | ||
1543 | struct usb_serial_port *port; | ||
1544 | |||
1545 | if (ATEN2011_port == NULL) | ||
1546 | return -1; | ||
1547 | |||
1548 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1549 | |||
1550 | mcr = ATEN2011_port->shadowMCR; | ||
1551 | |||
1552 | if (copy_from_user(&arg, value, sizeof(int))) | ||
1553 | return -EFAULT; | ||
1554 | |||
1555 | switch (cmd) { | ||
1556 | case TIOCMBIS: | ||
1557 | if (arg & TIOCM_RTS) | ||
1558 | mcr |= MCR_RTS; | ||
1559 | if (arg & TIOCM_DTR) | ||
1560 | mcr |= MCR_RTS; | ||
1561 | if (arg & TIOCM_LOOP) | ||
1562 | mcr |= MCR_LOOPBACK; | ||
1563 | break; | ||
1564 | |||
1565 | case TIOCMBIC: | ||
1566 | if (arg & TIOCM_RTS) | ||
1567 | mcr &= ~MCR_RTS; | ||
1568 | if (arg & TIOCM_DTR) | ||
1569 | mcr &= ~MCR_RTS; | ||
1570 | if (arg & TIOCM_LOOP) | ||
1571 | mcr &= ~MCR_LOOPBACK; | ||
1572 | break; | ||
1573 | |||
1574 | case TIOCMSET: | ||
1575 | /* turn off the RTS and DTR and LOOPBACK | ||
1576 | * and then only turn on what was asked to */ | ||
1577 | mcr &= ~(MCR_RTS | MCR_DTR | MCR_LOOPBACK); | ||
1578 | mcr |= ((arg & TIOCM_RTS) ? MCR_RTS : 0); | ||
1579 | mcr |= ((arg & TIOCM_DTR) ? MCR_DTR : 0); | ||
1580 | mcr |= ((arg & TIOCM_LOOP) ? MCR_LOOPBACK : 0); | ||
1581 | break; | ||
1582 | } | ||
1583 | |||
1584 | ATEN2011_port->shadowMCR = mcr; | ||
1585 | |||
1586 | Data = ATEN2011_port->shadowMCR; | ||
1587 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1588 | if (status < 0) { | ||
1589 | dbg("setting MODEM_CONTROL_REGISTER Failed"); | ||
1590 | return -1; | ||
1591 | } | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | static int get_modem_info(struct ATENINTL_port *ATEN2011_port, | ||
1597 | unsigned int __user *value) | ||
1598 | { | ||
1599 | unsigned int result = 0; | ||
1600 | __u16 msr; | ||
1601 | unsigned int mcr = ATEN2011_port->shadowMCR; | ||
1602 | int status; | ||
1603 | |||
1604 | status = get_uart_reg(ATEN2011_port->port, MODEM_STATUS_REGISTER, &msr); | ||
1605 | result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */ | ||
1606 | |((mcr & MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */ | ||
1607 | |((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */ | ||
1608 | |((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) /* 0x040 */ | ||
1609 | |((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */ | ||
1610 | |((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */ | ||
1611 | |||
1612 | dbg("%s -- %x", __func__, result); | ||
1613 | |||
1614 | if (copy_to_user(value, &result, sizeof(int))) | ||
1615 | return -EFAULT; | ||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | static int get_serial_info(struct ATENINTL_port *ATEN2011_port, | ||
1620 | struct serial_struct __user *retinfo) | ||
1621 | { | ||
1622 | struct serial_struct tmp; | ||
1623 | |||
1624 | if (ATEN2011_port == NULL) | ||
1625 | return -1; | ||
1626 | |||
1627 | if (!retinfo) | ||
1628 | return -EFAULT; | ||
1629 | |||
1630 | memset(&tmp, 0, sizeof(tmp)); | ||
1631 | |||
1632 | tmp.type = PORT_16550A; | ||
1633 | tmp.line = ATEN2011_port->port->serial->minor; | ||
1634 | if (tmp.line == SERIAL_TTY_NO_MINOR) | ||
1635 | tmp.line = 0; | ||
1636 | tmp.port = ATEN2011_port->port->number; | ||
1637 | tmp.irq = 0; | ||
1638 | tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; | ||
1639 | tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE; | ||
1640 | tmp.baud_base = 9600; | ||
1641 | tmp.close_delay = 5 * HZ; | ||
1642 | tmp.closing_wait = 30 * HZ; | ||
1643 | |||
1644 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | ||
1645 | return -EFAULT; | ||
1646 | return 0; | ||
1647 | } | ||
1648 | |||
1649 | static int ATEN2011_ioctl(struct tty_struct *tty, struct file *file, | ||
1650 | unsigned int cmd, unsigned long arg) | ||
1651 | { | ||
1652 | struct usb_serial_port *port = tty->driver_data; | ||
1653 | struct ATENINTL_port *ATEN2011_port; | ||
1654 | struct async_icount cnow; | ||
1655 | struct async_icount cprev; | ||
1656 | struct serial_icounter_struct icount; | ||
1657 | int ATENret = 0; | ||
1658 | unsigned int __user *user_arg = (unsigned int __user *)arg; | ||
1659 | |||
1660 | ATEN2011_port = usb_get_serial_port_data(port); | ||
1661 | |||
1662 | if (ATEN2011_port == NULL) | ||
1663 | return -1; | ||
1664 | |||
1665 | dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); | ||
1666 | |||
1667 | switch (cmd) { | ||
1668 | /* return number of bytes available */ | ||
1669 | |||
1670 | case TIOCINQ: | ||
1671 | dbg("%s (%d) TIOCINQ", __func__, port->number); | ||
1672 | return get_number_bytes_avail(tty, ATEN2011_port, user_arg); | ||
1673 | break; | ||
1674 | |||
1675 | case TIOCOUTQ: | ||
1676 | dbg("%s (%d) TIOCOUTQ", __func__, port->number); | ||
1677 | return put_user(ATEN2011_chars_in_buffer(tty), user_arg); | ||
1678 | break; | ||
1679 | |||
1680 | case TIOCSERGETLSR: | ||
1681 | dbg("%s (%d) TIOCSERGETLSR", __func__, port->number); | ||
1682 | return get_lsr_info(tty, ATEN2011_port, user_arg); | ||
1683 | return 0; | ||
1684 | |||
1685 | case TIOCMBIS: | ||
1686 | case TIOCMBIC: | ||
1687 | case TIOCMSET: | ||
1688 | dbg("%s (%d) TIOCMSET/TIOCMBIC/TIOCMSET", __func__, | ||
1689 | port->number); | ||
1690 | ATENret = set_modem_info(ATEN2011_port, cmd, user_arg); | ||
1691 | return ATENret; | ||
1692 | |||
1693 | case TIOCMGET: | ||
1694 | dbg("%s (%d) TIOCMGET", __func__, port->number); | ||
1695 | return get_modem_info(ATEN2011_port, user_arg); | ||
1696 | |||
1697 | case TIOCGSERIAL: | ||
1698 | dbg("%s (%d) TIOCGSERIAL", __func__, port->number); | ||
1699 | return get_serial_info(ATEN2011_port, | ||
1700 | (struct serial_struct __user *)arg); | ||
1701 | |||
1702 | case TIOCSSERIAL: | ||
1703 | dbg("%s (%d) TIOCSSERIAL", __func__, port->number); | ||
1704 | break; | ||
1705 | |||
1706 | case TIOCMIWAIT: | ||
1707 | dbg("%s (%d) TIOCMIWAIT", __func__, port->number); | ||
1708 | cprev = ATEN2011_port->icount; | ||
1709 | while (1) { | ||
1710 | /* see if a signal did it */ | ||
1711 | if (signal_pending(current)) | ||
1712 | return -ERESTARTSYS; | ||
1713 | cnow = ATEN2011_port->icount; | ||
1714 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | ||
1715 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | ||
1716 | return -EIO; /* no change => error */ | ||
1717 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | ||
1718 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | ||
1719 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || | ||
1720 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { | ||
1721 | return 0; | ||
1722 | } | ||
1723 | cprev = cnow; | ||
1724 | } | ||
1725 | /* NOTREACHED */ | ||
1726 | break; | ||
1727 | |||
1728 | case TIOCGICOUNT: | ||
1729 | cnow = ATEN2011_port->icount; | ||
1730 | icount.cts = cnow.cts; | ||
1731 | icount.dsr = cnow.dsr; | ||
1732 | icount.rng = cnow.rng; | ||
1733 | icount.dcd = cnow.dcd; | ||
1734 | icount.rx = cnow.rx; | ||
1735 | icount.tx = cnow.tx; | ||
1736 | icount.frame = cnow.frame; | ||
1737 | icount.overrun = cnow.overrun; | ||
1738 | icount.parity = cnow.parity; | ||
1739 | icount.brk = cnow.brk; | ||
1740 | icount.buf_overrun = cnow.buf_overrun; | ||
1741 | |||
1742 | dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, | ||
1743 | port->number, icount.rx, icount.tx); | ||
1744 | if (copy_to_user((void __user *)arg, &icount, sizeof(icount))) | ||
1745 | return -EFAULT; | ||
1746 | return 0; | ||
1747 | |||
1748 | default: | ||
1749 | break; | ||
1750 | } | ||
1751 | |||
1752 | return -ENOIOCTLCMD; | ||
1753 | } | ||
1754 | |||
1755 | static int ATEN2011_calc_baud_rate_divisor(int baudRate, int *divisor, | ||
1756 | __u16 *clk_sel_val) | ||
1757 | { | ||
1758 | dbg("%s - %d", __func__, baudRate); | ||
1759 | |||
1760 | if (baudRate <= 115200) { | ||
1761 | *divisor = 115200 / baudRate; | ||
1762 | *clk_sel_val = 0x0; | ||
1763 | } | ||
1764 | if ((baudRate > 115200) && (baudRate <= 230400)) { | ||
1765 | *divisor = 230400 / baudRate; | ||
1766 | *clk_sel_val = 0x10; | ||
1767 | } else if ((baudRate > 230400) && (baudRate <= 403200)) { | ||
1768 | *divisor = 403200 / baudRate; | ||
1769 | *clk_sel_val = 0x20; | ||
1770 | } else if ((baudRate > 403200) && (baudRate <= 460800)) { | ||
1771 | *divisor = 460800 / baudRate; | ||
1772 | *clk_sel_val = 0x30; | ||
1773 | } else if ((baudRate > 460800) && (baudRate <= 806400)) { | ||
1774 | *divisor = 806400 / baudRate; | ||
1775 | *clk_sel_val = 0x40; | ||
1776 | } else if ((baudRate > 806400) && (baudRate <= 921600)) { | ||
1777 | *divisor = 921600 / baudRate; | ||
1778 | *clk_sel_val = 0x50; | ||
1779 | } else if ((baudRate > 921600) && (baudRate <= 1572864)) { | ||
1780 | *divisor = 1572864 / baudRate; | ||
1781 | *clk_sel_val = 0x60; | ||
1782 | } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { | ||
1783 | *divisor = 3145728 / baudRate; | ||
1784 | *clk_sel_val = 0x70; | ||
1785 | } | ||
1786 | return 0; | ||
1787 | } | ||
1788 | |||
1789 | static int ATEN2011_send_cmd_write_baud_rate(struct ATENINTL_port | ||
1790 | *ATEN2011_port, int baudRate) | ||
1791 | { | ||
1792 | int divisor = 0; | ||
1793 | int status; | ||
1794 | __u16 Data; | ||
1795 | unsigned char number; | ||
1796 | __u16 clk_sel_val; | ||
1797 | struct usb_serial_port *port; | ||
1798 | int minor; | ||
1799 | |||
1800 | if (ATEN2011_port == NULL) | ||
1801 | return -1; | ||
1802 | |||
1803 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1804 | |||
1805 | dbg("%s", "Entering .......... "); | ||
1806 | |||
1807 | minor = ATEN2011_port->port->serial->minor; | ||
1808 | if (minor == SERIAL_TTY_NO_MINOR) | ||
1809 | minor = 0; | ||
1810 | number = ATEN2011_port->port->number - minor; | ||
1811 | |||
1812 | dbg("%s - port = %d, baud = %d", __func__, | ||
1813 | ATEN2011_port->port->number, baudRate); | ||
1814 | /* reset clk_uart_sel in spregOffset */ | ||
1815 | if (baudRate > 115200) { | ||
1816 | #ifdef HW_flow_control | ||
1817 | /* | ||
1818 | * NOTE: need to see the pther register to modify | ||
1819 | * setting h/w flow control bit to 1; | ||
1820 | */ | ||
1821 | /* Data = ATEN2011_port->shadowMCR; */ | ||
1822 | Data = 0x2b; | ||
1823 | ATEN2011_port->shadowMCR = Data; | ||
1824 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1825 | if (status < 0) { | ||
1826 | dbg("Writing spreg failed in set_serial_baud"); | ||
1827 | return -1; | ||
1828 | } | ||
1829 | #endif | ||
1830 | |||
1831 | } else { | ||
1832 | #ifdef HW_flow_control | ||
1833 | /* setting h/w flow control bit to 0; */ | ||
1834 | /* Data = ATEN2011_port->shadowMCR; */ | ||
1835 | Data = 0xb; | ||
1836 | ATEN2011_port->shadowMCR = Data; | ||
1837 | status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
1838 | if (status < 0) { | ||
1839 | dbg("Writing spreg failed in set_serial_baud"); | ||
1840 | return -1; | ||
1841 | } | ||
1842 | #endif | ||
1843 | |||
1844 | } | ||
1845 | |||
1846 | if (1) /* baudRate <= 115200) */ { | ||
1847 | clk_sel_val = 0x0; | ||
1848 | Data = 0x0; | ||
1849 | status = | ||
1850 | ATEN2011_calc_baud_rate_divisor(baudRate, &divisor, | ||
1851 | &clk_sel_val); | ||
1852 | status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data); | ||
1853 | if (status < 0) { | ||
1854 | dbg("reading spreg failed in set_serial_baud"); | ||
1855 | return -1; | ||
1856 | } | ||
1857 | Data = (Data & 0x8f) | clk_sel_val; | ||
1858 | status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data); | ||
1859 | if (status < 0) { | ||
1860 | dbg("Writing spreg failed in set_serial_baud"); | ||
1861 | return -1; | ||
1862 | } | ||
1863 | /* Calculate the Divisor */ | ||
1864 | |||
1865 | if (status) { | ||
1866 | err("%s - bad baud rate", __func__); | ||
1867 | dbg("%s", "bad baud rate"); | ||
1868 | return status; | ||
1869 | } | ||
1870 | /* Enable access to divisor latch */ | ||
1871 | Data = ATEN2011_port->shadowLCR | SERIAL_LCR_DLAB; | ||
1872 | ATEN2011_port->shadowLCR = Data; | ||
1873 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
1874 | |||
1875 | /* Write the divisor */ | ||
1876 | Data = (unsigned char)(divisor & 0xff); | ||
1877 | dbg("set_serial_baud Value to write DLL is %x", Data); | ||
1878 | set_uart_reg(port, DIVISOR_LATCH_LSB, Data); | ||
1879 | |||
1880 | Data = (unsigned char)((divisor & 0xff00) >> 8); | ||
1881 | dbg("set_serial_baud Value to write DLM is %x", Data); | ||
1882 | set_uart_reg(port, DIVISOR_LATCH_MSB, Data); | ||
1883 | |||
1884 | /* Disable access to divisor latch */ | ||
1885 | Data = ATEN2011_port->shadowLCR & ~SERIAL_LCR_DLAB; | ||
1886 | ATEN2011_port->shadowLCR = Data; | ||
1887 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
1888 | |||
1889 | } | ||
1890 | |||
1891 | return status; | ||
1892 | } | ||
1893 | |||
1894 | static void ATEN2011_change_port_settings(struct tty_struct *tty, | ||
1895 | struct ATENINTL_port *ATEN2011_port, | ||
1896 | struct ktermios *old_termios) | ||
1897 | { | ||
1898 | int baud; | ||
1899 | unsigned cflag; | ||
1900 | unsigned iflag; | ||
1901 | __u8 lData; | ||
1902 | __u8 lParity; | ||
1903 | __u8 lStop; | ||
1904 | int status; | ||
1905 | __u16 Data; | ||
1906 | struct usb_serial_port *port; | ||
1907 | struct usb_serial *serial; | ||
1908 | |||
1909 | if (ATEN2011_port == NULL) | ||
1910 | return; | ||
1911 | |||
1912 | port = (struct usb_serial_port *)ATEN2011_port->port; | ||
1913 | |||
1914 | serial = port->serial; | ||
1915 | |||
1916 | dbg("%s - port %d", __func__, ATEN2011_port->port->number); | ||
1917 | |||
1918 | if (!ATEN2011_port->open) { | ||
1919 | dbg("%s - port not opened", __func__); | ||
1920 | return; | ||
1921 | } | ||
1922 | |||
1923 | if ((!tty) || (!tty->termios)) { | ||
1924 | dbg("%s - no tty structures", __func__); | ||
1925 | return; | ||
1926 | } | ||
1927 | |||
1928 | dbg("%s", "Entering .......... "); | ||
1929 | |||
1930 | lData = LCR_BITS_8; | ||
1931 | lStop = LCR_STOP_1; | ||
1932 | lParity = LCR_PAR_NONE; | ||
1933 | |||
1934 | cflag = tty->termios->c_cflag; | ||
1935 | iflag = tty->termios->c_iflag; | ||
1936 | |||
1937 | /* Change the number of bits */ | ||
1938 | |||
1939 | /* COMMENT1: the below Line"if(cflag & CSIZE)" is added for the errors we get for serial loop data test i.e serial_loopback.pl -v */ | ||
1940 | /* if(cflag & CSIZE) */ | ||
1941 | { | ||
1942 | switch (cflag & CSIZE) { | ||
1943 | case CS5: | ||
1944 | lData = LCR_BITS_5; | ||
1945 | break; | ||
1946 | |||
1947 | case CS6: | ||
1948 | lData = LCR_BITS_6; | ||
1949 | break; | ||
1950 | |||
1951 | case CS7: | ||
1952 | lData = LCR_BITS_7; | ||
1953 | break; | ||
1954 | default: | ||
1955 | case CS8: | ||
1956 | lData = LCR_BITS_8; | ||
1957 | break; | ||
1958 | } | ||
1959 | } | ||
1960 | /* Change the Parity bit */ | ||
1961 | if (cflag & PARENB) { | ||
1962 | if (cflag & PARODD) { | ||
1963 | lParity = LCR_PAR_ODD; | ||
1964 | dbg("%s - parity = odd", __func__); | ||
1965 | } else { | ||
1966 | lParity = LCR_PAR_EVEN; | ||
1967 | dbg("%s - parity = even", __func__); | ||
1968 | } | ||
1969 | |||
1970 | } else { | ||
1971 | dbg("%s - parity = none", __func__); | ||
1972 | } | ||
1973 | |||
1974 | if (cflag & CMSPAR) | ||
1975 | lParity = lParity | 0x20; | ||
1976 | |||
1977 | /* Change the Stop bit */ | ||
1978 | if (cflag & CSTOPB) { | ||
1979 | lStop = LCR_STOP_2; | ||
1980 | dbg("%s - stop bits = 2", __func__); | ||
1981 | } else { | ||
1982 | lStop = LCR_STOP_1; | ||
1983 | dbg("%s - stop bits = 1", __func__); | ||
1984 | } | ||
1985 | |||
1986 | /* Update the LCR with the correct value */ | ||
1987 | ATEN2011_port->shadowLCR &= | ||
1988 | ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); | ||
1989 | ATEN2011_port->shadowLCR |= (lData | lParity | lStop); | ||
1990 | |||
1991 | dbg | ||
1992 | ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is %x", | ||
1993 | ATEN2011_port->shadowLCR); | ||
1994 | /* Disable Interrupts */ | ||
1995 | Data = 0x00; | ||
1996 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
1997 | |||
1998 | Data = 0x00; | ||
1999 | set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
2000 | |||
2001 | Data = 0xcf; | ||
2002 | set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); | ||
2003 | |||
2004 | /* Send the updated LCR value to the ATEN2011 */ | ||
2005 | Data = ATEN2011_port->shadowLCR; | ||
2006 | |||
2007 | set_uart_reg(port, LINE_CONTROL_REGISTER, Data); | ||
2008 | |||
2009 | Data = 0x00b; | ||
2010 | ATEN2011_port->shadowMCR = Data; | ||
2011 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2012 | Data = 0x00b; | ||
2013 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2014 | |||
2015 | /* set up the MCR register and send it to the ATEN2011 */ | ||
2016 | |||
2017 | ATEN2011_port->shadowMCR = MCR_MASTER_IE; | ||
2018 | if (cflag & CBAUD) | ||
2019 | ATEN2011_port->shadowMCR |= (MCR_DTR | MCR_RTS); | ||
2020 | |||
2021 | if (cflag & CRTSCTS) | ||
2022 | ATEN2011_port->shadowMCR |= (MCR_XON_ANY); | ||
2023 | else | ||
2024 | ATEN2011_port->shadowMCR &= ~(MCR_XON_ANY); | ||
2025 | |||
2026 | Data = ATEN2011_port->shadowMCR; | ||
2027 | set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | ||
2028 | |||
2029 | /* Determine divisor based on baud rate */ | ||
2030 | baud = tty_get_baud_rate(tty); | ||
2031 | |||
2032 | if (!baud) { | ||
2033 | /* pick a default, any default... */ | ||
2034 | dbg("%s", "Picked default baud..."); | ||
2035 | baud = 9600; | ||
2036 | } | ||
2037 | |||
2038 | dbg("%s - baud rate = %d", __func__, baud); | ||
2039 | status = ATEN2011_send_cmd_write_baud_rate(ATEN2011_port, baud); | ||
2040 | |||
2041 | /* Enable Interrupts */ | ||
2042 | Data = 0x0c; | ||
2043 | set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); | ||
2044 | |||
2045 | if (ATEN2011_port->read_urb->status != -EINPROGRESS) { | ||
2046 | ATEN2011_port->read_urb->dev = serial->dev; | ||
2047 | |||
2048 | status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC); | ||
2049 | |||
2050 | if (status) { | ||
2051 | dbg | ||
2052 | (" usb_submit_urb(read bulk) failed, status = %d", | ||
2053 | status); | ||
2054 | } | ||
2055 | } | ||
2056 | dbg | ||
2057 | ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is End %x", | ||
2058 | ATEN2011_port->shadowLCR); | ||
2059 | |||
2060 | return; | ||
2061 | } | ||
2062 | |||
2063 | static int ATEN2011_calc_num_ports(struct usb_serial *serial) | ||
2064 | { | ||
2065 | |||
2066 | __u16 Data = 0x00; | ||
2067 | int ret = 0; | ||
2068 | int ATEN2011_2or4ports; | ||
2069 | ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), | ||
2070 | ATEN_RDREQ, ATEN_RD_RTYPE, 0, GPIO_REGISTER, | ||
2071 | &Data, VENDOR_READ_LENGTH, ATEN_WDR_TIMEOUT); | ||
2072 | |||
2073 | /* ghostgum: here is where the problem appears to bet */ | ||
2074 | /* Which of the following are needed? */ | ||
2075 | /* Greg used the serial->type->num_ports=2 */ | ||
2076 | /* But the code in the ATEN2011_open relies on serial->num_ports=2 */ | ||
2077 | if ((Data & 0x01) == 0) { | ||
2078 | ATEN2011_2or4ports = 2; | ||
2079 | serial->type->num_ports = 2; | ||
2080 | serial->num_ports = 2; | ||
2081 | } | ||
2082 | /* else if(serial->interface->cur_altsetting->desc.bNumEndpoints == 9) */ | ||
2083 | else { | ||
2084 | ATEN2011_2or4ports = 4; | ||
2085 | serial->type->num_ports = 4; | ||
2086 | serial->num_ports = 4; | ||
2087 | |||
2088 | } | ||
2089 | |||
2090 | return ATEN2011_2or4ports; | ||
2091 | } | ||
2092 | |||
2093 | static int ATEN2011_startup(struct usb_serial *serial) | ||
2094 | { | ||
2095 | struct ATENINTL_serial *ATEN2011_serial; | ||
2096 | struct ATENINTL_port *ATEN2011_port; | ||
2097 | struct usb_device *dev; | ||
2098 | int i, status; | ||
2099 | int minor; | ||
2100 | |||
2101 | __u16 Data; | ||
2102 | dbg("%s", " ATEN2011_startup :entering.........."); | ||
2103 | |||
2104 | if (!serial) { | ||
2105 | dbg("%s", "Invalid Handler"); | ||
2106 | return -1; | ||
2107 | } | ||
2108 | |||
2109 | dev = serial->dev; | ||
2110 | |||
2111 | dbg("%s", "Entering..."); | ||
2112 | |||
2113 | /* create our private serial structure */ | ||
2114 | ATEN2011_serial = kzalloc(sizeof(struct ATENINTL_serial), GFP_KERNEL); | ||
2115 | if (ATEN2011_serial == NULL) { | ||
2116 | err("%s - Out of memory", __func__); | ||
2117 | return -ENOMEM; | ||
2118 | } | ||
2119 | |||
2120 | /* resetting the private structure field values to zero */ | ||
2121 | memset(ATEN2011_serial, 0, sizeof(struct ATENINTL_serial)); | ||
2122 | |||
2123 | ATEN2011_serial->serial = serial; | ||
2124 | /* initilize status polling flag to 0 */ | ||
2125 | ATEN2011_serial->status_polling_started = 0; | ||
2126 | |||
2127 | usb_set_serial_data(serial, ATEN2011_serial); | ||
2128 | ATEN2011_serial->ATEN2011_spectrum_2or4ports = | ||
2129 | ATEN2011_calc_num_ports(serial); | ||
2130 | /* we set up the pointers to the endpoints in the ATEN2011_open * | ||
2131 | * function, as the structures aren't created yet. */ | ||
2132 | |||
2133 | /* set up port private structures */ | ||
2134 | for (i = 0; i < serial->num_ports; ++i) { | ||
2135 | ATEN2011_port = | ||
2136 | kmalloc(sizeof(struct ATENINTL_port), GFP_KERNEL); | ||
2137 | if (ATEN2011_port == NULL) { | ||
2138 | err("%s - Out of memory", __func__); | ||
2139 | usb_set_serial_data(serial, NULL); | ||
2140 | kfree(ATEN2011_serial); | ||
2141 | return -ENOMEM; | ||
2142 | } | ||
2143 | memset(ATEN2011_port, 0, sizeof(struct ATENINTL_port)); | ||
2144 | |||
2145 | /* | ||
2146 | * Initialize all port interrupt end point to port 0 | ||
2147 | * int endpoint. Our device has only one interrupt end point | ||
2148 | * comman to all port | ||
2149 | */ | ||
2150 | /* serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; */ | ||
2151 | |||
2152 | ATEN2011_port->port = serial->port[i]; | ||
2153 | usb_set_serial_port_data(serial->port[i], ATEN2011_port); | ||
2154 | |||
2155 | minor = serial->port[i]->serial->minor; | ||
2156 | if (minor == SERIAL_TTY_NO_MINOR) | ||
2157 | minor = 0; | ||
2158 | ATEN2011_port->port_num = | ||
2159 | ((serial->port[i]->number - minor) + 1); | ||
2160 | |||
2161 | if (ATEN2011_port->port_num == 1) { | ||
2162 | ATEN2011_port->SpRegOffset = 0x0; | ||
2163 | ATEN2011_port->ControlRegOffset = 0x1; | ||
2164 | ATEN2011_port->DcrRegOffset = 0x4; | ||
2165 | } else if ((ATEN2011_port->port_num == 2) | ||
2166 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2167 | 4)) { | ||
2168 | ATEN2011_port->SpRegOffset = 0x8; | ||
2169 | ATEN2011_port->ControlRegOffset = 0x9; | ||
2170 | ATEN2011_port->DcrRegOffset = 0x16; | ||
2171 | } else if ((ATEN2011_port->port_num == 2) | ||
2172 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2173 | 2)) { | ||
2174 | ATEN2011_port->SpRegOffset = 0xa; | ||
2175 | ATEN2011_port->ControlRegOffset = 0xb; | ||
2176 | ATEN2011_port->DcrRegOffset = 0x19; | ||
2177 | } else if ((ATEN2011_port->port_num == 3) | ||
2178 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2179 | 4)) { | ||
2180 | ATEN2011_port->SpRegOffset = 0xa; | ||
2181 | ATEN2011_port->ControlRegOffset = 0xb; | ||
2182 | ATEN2011_port->DcrRegOffset = 0x19; | ||
2183 | } else if ((ATEN2011_port->port_num == 4) | ||
2184 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == | ||
2185 | 4)) { | ||
2186 | ATEN2011_port->SpRegOffset = 0xc; | ||
2187 | ATEN2011_port->ControlRegOffset = 0xd; | ||
2188 | ATEN2011_port->DcrRegOffset = 0x1c; | ||
2189 | } | ||
2190 | |||
2191 | usb_set_serial_port_data(serial->port[i], ATEN2011_port); | ||
2192 | |||
2193 | /* enable rx_disable bit in control register */ | ||
2194 | |||
2195 | status = get_reg_sync(serial->port[i], | ||
2196 | ATEN2011_port->ControlRegOffset, &Data); | ||
2197 | if (status < 0) { | ||
2198 | dbg("Reading ControlReg failed status-0x%x", | ||
2199 | status); | ||
2200 | break; | ||
2201 | } else | ||
2202 | dbg | ||
2203 | ("ControlReg Reading success val is %x, status%d", | ||
2204 | Data, status); | ||
2205 | Data |= 0x08; /* setting driver done bit */ | ||
2206 | Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ | ||
2207 | |||
2208 | /* Data |= 0x20; */ /* rx_disable bit */ | ||
2209 | status = set_reg_sync(serial->port[i], | ||
2210 | ATEN2011_port->ControlRegOffset, Data); | ||
2211 | if (status < 0) { | ||
2212 | dbg | ||
2213 | ("Writing ControlReg failed(rx_disable) status-0x%x", | ||
2214 | status); | ||
2215 | break; | ||
2216 | } else | ||
2217 | dbg | ||
2218 | ("ControlReg Writing success(rx_disable) status%d", | ||
2219 | status); | ||
2220 | |||
2221 | /* | ||
2222 | * Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 | ||
2223 | * and 0x24 in DCR3 | ||
2224 | */ | ||
2225 | Data = 0x01; | ||
2226 | status = set_reg_sync(serial->port[i], | ||
2227 | (__u16)(ATEN2011_port->DcrRegOffset + 0), | ||
2228 | Data); | ||
2229 | if (status < 0) { | ||
2230 | dbg("Writing DCR0 failed status-0x%x", status); | ||
2231 | break; | ||
2232 | } else | ||
2233 | dbg("DCR0 Writing success status%d", status); | ||
2234 | |||
2235 | Data = 0x05; | ||
2236 | status = set_reg_sync(serial->port[i], | ||
2237 | (__u16)(ATEN2011_port->DcrRegOffset + 1), | ||
2238 | Data); | ||
2239 | if (status < 0) { | ||
2240 | dbg("Writing DCR1 failed status-0x%x", status); | ||
2241 | break; | ||
2242 | } else | ||
2243 | dbg("DCR1 Writing success status%d", status); | ||
2244 | |||
2245 | Data = 0x24; | ||
2246 | status = set_reg_sync(serial->port[i], | ||
2247 | (__u16)(ATEN2011_port->DcrRegOffset + 2), | ||
2248 | Data); | ||
2249 | if (status < 0) { | ||
2250 | dbg("Writing DCR2 failed status-0x%x", status); | ||
2251 | break; | ||
2252 | } else | ||
2253 | dbg("DCR2 Writing success status%d", status); | ||
2254 | |||
2255 | /* write values in clkstart0x0 and clkmulti 0x20 */ | ||
2256 | Data = 0x0; | ||
2257 | status = set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER, | ||
2258 | Data); | ||
2259 | if (status < 0) { | ||
2260 | dbg | ||
2261 | ("Writing CLK_START_VALUE_REGISTER failed status-0x%x", | ||
2262 | status); | ||
2263 | break; | ||
2264 | } else | ||
2265 | dbg | ||
2266 | ("CLK_START_VALUE_REGISTER Writing success status%d", | ||
2267 | status); | ||
2268 | |||
2269 | Data = 0x20; | ||
2270 | status = set_reg_sync(serial->port[i], CLK_MULTI_REGISTER, | ||
2271 | Data); | ||
2272 | if (status < 0) { | ||
2273 | dbg | ||
2274 | ("Writing CLK_MULTI_REGISTER failed status-0x%x", | ||
2275 | status); | ||
2276 | break; | ||
2277 | } else | ||
2278 | dbg("CLK_MULTI_REGISTER Writing success status%d", | ||
2279 | status); | ||
2280 | |||
2281 | /* Zero Length flag register */ | ||
2282 | if ((ATEN2011_port->port_num != 1) | ||
2283 | && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)) { | ||
2284 | |||
2285 | Data = 0xff; | ||
2286 | status = set_reg_sync(serial->port[i], | ||
2287 | (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num)), | ||
2288 | Data); | ||
2289 | dbg("ZLIP offset%x", | ||
2290 | (__u16) (ZLP_REG1 + | ||
2291 | ((__u16) ATEN2011_port->port_num))); | ||
2292 | if (status < 0) { | ||
2293 | dbg | ||
2294 | ("Writing ZLP_REG%d failed status-0x%x", | ||
2295 | i + 2, status); | ||
2296 | break; | ||
2297 | } else | ||
2298 | dbg("ZLP_REG%d Writing success status%d", | ||
2299 | i + 2, status); | ||
2300 | } else { | ||
2301 | Data = 0xff; | ||
2302 | status = set_reg_sync(serial->port[i], | ||
2303 | (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num) - 0x1), | ||
2304 | Data); | ||
2305 | dbg("ZLIP offset%x", | ||
2306 | (__u16) (ZLP_REG1 + | ||
2307 | ((__u16) ATEN2011_port->port_num) - | ||
2308 | 0x1)); | ||
2309 | if (status < 0) { | ||
2310 | dbg | ||
2311 | ("Writing ZLP_REG%d failed status-0x%x", | ||
2312 | i + 1, status); | ||
2313 | break; | ||
2314 | } else | ||
2315 | dbg("ZLP_REG%d Writing success status%d", | ||
2316 | i + 1, status); | ||
2317 | |||
2318 | } | ||
2319 | ATEN2011_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
2320 | ATEN2011_port->ctrl_buf = kmalloc(16, GFP_KERNEL); | ||
2321 | |||
2322 | } | ||
2323 | |||
2324 | /* Zero Length flag enable */ | ||
2325 | Data = 0x0f; | ||
2326 | status = set_reg_sync(serial->port[0], ZLP_REG5, Data); | ||
2327 | if (status < 0) { | ||
2328 | dbg("Writing ZLP_REG5 failed status-0x%x", status); | ||
2329 | return -1; | ||
2330 | } else | ||
2331 | dbg("ZLP_REG5 Writing success status%d", status); | ||
2332 | |||
2333 | /* setting configuration feature to one */ | ||
2334 | usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), | ||
2335 | (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); | ||
2336 | return 0; | ||
2337 | } | ||
2338 | |||
2339 | static void ATEN2011_release(struct usb_serial *serial) | ||
2340 | { | ||
2341 | int i; | ||
2342 | struct ATENINTL_port *ATEN2011_port; | ||
2343 | |||
2344 | /* check for the ports to be closed,close the ports and disconnect */ | ||
2345 | |||
2346 | /* free private structure allocated for serial port * | ||
2347 | * stop reads and writes on all ports */ | ||
2348 | |||
2349 | for (i = 0; i < serial->num_ports; ++i) { | ||
2350 | ATEN2011_port = usb_get_serial_port_data(serial->port[i]); | ||
2351 | kfree(ATEN2011_port->ctrl_buf); | ||
2352 | usb_kill_urb(ATEN2011_port->control_urb); | ||
2353 | kfree(ATEN2011_port); | ||
2354 | usb_set_serial_port_data(serial->port[i], NULL); | ||
2355 | } | ||
2356 | |||
2357 | /* free private structure allocated for serial device */ | ||
2358 | |||
2359 | kfree(usb_get_serial_data(serial)); | ||
2360 | usb_set_serial_data(serial, NULL); | ||
2361 | } | ||
2362 | |||
2363 | static struct usb_serial_driver aten_serial_driver = { | ||
2364 | .driver = { | ||
2365 | .owner = THIS_MODULE, | ||
2366 | .name = "aten2011", | ||
2367 | }, | ||
2368 | .description = DRIVER_DESC, | ||
2369 | .id_table = id_table, | ||
2370 | .open = ATEN2011_open, | ||
2371 | .close = ATEN2011_close, | ||
2372 | .write = ATEN2011_write, | ||
2373 | .write_room = ATEN2011_write_room, | ||
2374 | .chars_in_buffer = ATEN2011_chars_in_buffer, | ||
2375 | .throttle = ATEN2011_throttle, | ||
2376 | .unthrottle = ATEN2011_unthrottle, | ||
2377 | .calc_num_ports = ATEN2011_calc_num_ports, | ||
2378 | |||
2379 | .ioctl = ATEN2011_ioctl, | ||
2380 | .set_termios = ATEN2011_set_termios, | ||
2381 | .break_ctl = ATEN2011_break, | ||
2382 | .tiocmget = ATEN2011_tiocmget, | ||
2383 | .tiocmset = ATEN2011_tiocmset, | ||
2384 | .attach = ATEN2011_startup, | ||
2385 | .release = ATEN2011_release, | ||
2386 | .read_bulk_callback = ATEN2011_bulk_in_callback, | ||
2387 | .read_int_callback = ATEN2011_interrupt_callback, | ||
2388 | }; | ||
2389 | |||
2390 | static struct usb_driver aten_driver = { | ||
2391 | .name = "aten2011", | ||
2392 | .probe = usb_serial_probe, | ||
2393 | .disconnect = usb_serial_disconnect, | ||
2394 | .id_table = id_table, | ||
2395 | }; | ||
2396 | |||
2397 | static int __init aten_init(void) | ||
2398 | { | ||
2399 | int retval; | ||
2400 | |||
2401 | /* Register with the usb serial */ | ||
2402 | retval = usb_serial_register(&aten_serial_driver); | ||
2403 | if (retval) | ||
2404 | return retval; | ||
2405 | |||
2406 | printk(KERN_INFO KBUILD_MODNAME ":" | ||
2407 | DRIVER_DESC " " DRIVER_VERSION "\n"); | ||
2408 | |||
2409 | /* Register with the usb */ | ||
2410 | retval = usb_register(&aten_driver); | ||
2411 | if (retval) | ||
2412 | usb_serial_deregister(&aten_serial_driver); | ||
2413 | |||
2414 | return retval; | ||
2415 | } | ||
2416 | |||
2417 | static void __exit aten_exit(void) | ||
2418 | { | ||
2419 | usb_deregister(&aten_driver); | ||
2420 | usb_serial_deregister(&aten_serial_driver); | ||
2421 | } | ||
2422 | |||
2423 | module_init(aten_init); | ||
2424 | module_exit(aten_exit); | ||
2425 | |||
2426 | /* Module information */ | ||
2427 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
2428 | MODULE_LICENSE("GPL"); | ||
2429 | |||
2430 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index 0ab9d15f3439..f5416af1e902 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/fb.h> | 22 | #include <linux/fb.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/vmalloc.h> | ||
24 | 25 | ||
25 | #include "udlfb.h" | 26 | #include "udlfb.h" |
26 | 27 | ||
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 24dfb33f90cb..a16c538d0132 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
80 | int max_tx; | 80 | int max_tx; |
81 | int i; | 81 | int i; |
82 | 82 | ||
83 | /* Allocate space for the SS endpoint companion descriptor */ | ||
84 | ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
85 | GFP_KERNEL); | ||
86 | if (!ep->ss_ep_comp) | ||
87 | return -ENOMEM; | ||
88 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; | 83 | desc = (struct usb_ss_ep_comp_descriptor *) buffer; |
89 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { | 84 | if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { |
90 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " | 85 | dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " |
91 | " interface %d altsetting %d ep %d: " | 86 | " interface %d altsetting %d ep %d: " |
92 | "using minimum values\n", | 87 | "using minimum values\n", |
93 | cfgno, inum, asnum, ep->desc.bEndpointAddress); | 88 | cfgno, inum, asnum, ep->desc.bEndpointAddress); |
94 | ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
95 | ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
96 | ep->ss_ep_comp->desc.bMaxBurst = 0; | ||
97 | /* | ||
98 | * Leave bmAttributes as zero, which will mean no streams for | ||
99 | * bulk, and isoc won't support multiple bursts of packets. | ||
100 | * With bursts of only one packet, and a Mult of 1, the max | ||
101 | * amount of data moved per endpoint service interval is one | ||
102 | * packet. | ||
103 | */ | ||
104 | if (usb_endpoint_xfer_isoc(&ep->desc) || | ||
105 | usb_endpoint_xfer_int(&ep->desc)) | ||
106 | ep->ss_ep_comp->desc.wBytesPerInterval = | ||
107 | ep->desc.wMaxPacketSize; | ||
108 | /* | 89 | /* |
109 | * The next descriptor is for an Endpoint or Interface, | 90 | * The next descriptor is for an Endpoint or Interface, |
110 | * no extra descriptors to copy into the companion structure, | 91 | * no extra descriptors to copy into the companion structure, |
111 | * and we didn't eat up any of the buffer. | 92 | * and we didn't eat up any of the buffer. |
112 | */ | 93 | */ |
113 | retval = 0; | 94 | return 0; |
114 | goto valid; | ||
115 | } | 95 | } |
116 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); | 96 | memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); |
117 | desc = &ep->ss_ep_comp->desc; | 97 | desc = &ep->ss_ep_comp->desc; |
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
320 | buffer += i; | 300 | buffer += i; |
321 | size -= i; | 301 | size -= i; |
322 | 302 | ||
303 | /* Allocate space for the SS endpoint companion descriptor */ | ||
304 | endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp), | ||
305 | GFP_KERNEL); | ||
306 | if (!endpoint->ss_ep_comp) | ||
307 | return -ENOMEM; | ||
308 | |||
309 | /* Fill in some default values (may be overwritten later) */ | ||
310 | endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE; | ||
311 | endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP; | ||
312 | endpoint->ss_ep_comp->desc.bMaxBurst = 0; | ||
313 | /* | ||
314 | * Leave bmAttributes as zero, which will mean no streams for | ||
315 | * bulk, and isoc won't support multiple bursts of packets. | ||
316 | * With bursts of only one packet, and a Mult of 1, the max | ||
317 | * amount of data moved per endpoint service interval is one | ||
318 | * packet. | ||
319 | */ | ||
320 | if (usb_endpoint_xfer_isoc(&endpoint->desc) || | ||
321 | usb_endpoint_xfer_int(&endpoint->desc)) | ||
322 | endpoint->ss_ep_comp->desc.wBytesPerInterval = | ||
323 | endpoint->desc.wMaxPacketSize; | ||
324 | |||
323 | if (size > 0) { | 325 | if (size > 0) { |
324 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, | 326 | retval = usb_parse_ss_endpoint_companion(ddev, cfgno, |
325 | inum, asnum, endpoint, num_ep, buffer, | 327 | inum, asnum, endpoint, num_ep, buffer, |
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
329 | retval = buffer - buffer0; | 331 | retval = buffer - buffer0; |
330 | } | 332 | } |
331 | } else { | 333 | } else { |
334 | dev_warn(ddev, "config %d interface %d altsetting %d " | ||
335 | "endpoint 0x%X has no " | ||
336 | "SuperSpeed companion descriptor\n", | ||
337 | cfgno, inum, asnum, d->bEndpointAddress); | ||
332 | retval = buffer - buffer0; | 338 | retval = buffer - buffer0; |
333 | } | 339 | } |
334 | } else { | 340 | } else { |
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index dc2ac613a9d1..1d283e1b2b8d 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c | |||
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 105 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
106 | int retval; | 106 | int retval; |
107 | 107 | ||
108 | ehci_reset(ehci); | ||
108 | retval = ehci_halt(ehci); | 109 | retval = ehci_halt(ehci); |
109 | if (retval) | 110 | if (retval) |
110 | return retval; | 111 | return retval; |
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) | |||
118 | 119 | ||
119 | hcd->has_tt = 1; | 120 | hcd->has_tt = 1; |
120 | 121 | ||
121 | ehci_reset(ehci); | ||
122 | ehci_port_power(ehci, 0); | 122 | ehci_port_power(ehci, 0); |
123 | 123 | ||
124 | return retval; | 124 | return retval; |
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index f3aaba35e912..83cbecd2a1ed 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c | |||
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) | |||
282 | static void ohci_omap_stop(struct usb_hcd *hcd) | 282 | static void ohci_omap_stop(struct usb_hcd *hcd) |
283 | { | 283 | { |
284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); | 284 | dev_dbg(hcd->self.controller, "stopping USB Controller\n"); |
285 | ohci_stop(hcd); | ||
285 | omap_ohci_clock_power(0); | 286 | omap_ohci_clock_power(0); |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 2501c571f855..705e34324156 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
173 | { | 173 | { |
174 | void *addr; | 174 | void *addr; |
175 | u32 temp; | 175 | u32 temp; |
176 | u64 temp_64; | ||
176 | 177 | ||
177 | addr = &ir_set->irq_pending; | 178 | addr = &ir_set->irq_pending; |
178 | temp = xhci_readl(xhci, addr); | 179 | temp = xhci_readl(xhci, addr); |
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int | |||
200 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", | 201 | xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", |
201 | addr, (unsigned int)temp); | 202 | addr, (unsigned int)temp); |
202 | 203 | ||
203 | addr = &ir_set->erst_base[0]; | 204 | addr = &ir_set->erst_base; |
204 | temp = xhci_readl(xhci, addr); | 205 | temp_64 = xhci_read_64(xhci, addr); |
205 | xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", | 206 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", |
206 | addr, (unsigned int) temp); | 207 | addr, temp_64); |
207 | |||
208 | addr = &ir_set->erst_base[1]; | ||
209 | temp = xhci_readl(xhci, addr); | ||
210 | xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n", | ||
211 | addr, (unsigned int) temp); | ||
212 | 208 | ||
213 | addr = &ir_set->erst_dequeue[0]; | 209 | addr = &ir_set->erst_dequeue; |
214 | temp = xhci_readl(xhci, addr); | 210 | temp_64 = xhci_read_64(xhci, addr); |
215 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", | 211 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", |
216 | addr, (unsigned int) temp); | 212 | addr, temp_64); |
217 | |||
218 | addr = &ir_set->erst_dequeue[1]; | ||
219 | temp = xhci_readl(xhci, addr); | ||
220 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n", | ||
221 | addr, (unsigned int) temp); | ||
222 | } | 213 | } |
223 | 214 | ||
224 | void xhci_print_run_regs(struct xhci_hcd *xhci) | 215 | void xhci_print_run_regs(struct xhci_hcd *xhci) |
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
268 | xhci_dbg(xhci, "Link TRB:\n"); | 259 | xhci_dbg(xhci, "Link TRB:\n"); |
269 | xhci_print_trb_offsets(xhci, trb); | 260 | xhci_print_trb_offsets(xhci, trb); |
270 | 261 | ||
271 | address = trb->link.segment_ptr[0] + | 262 | address = trb->link.segment_ptr; |
272 | (((u64) trb->link.segment_ptr[1]) << 32); | ||
273 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); | 263 | xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); |
274 | 264 | ||
275 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", | 265 | xhci_dbg(xhci, "Interrupter target = 0x%x\n", |
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
282 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); | 272 | (unsigned int) (trb->link.control & TRB_NO_SNOOP)); |
283 | break; | 273 | break; |
284 | case TRB_TYPE(TRB_TRANSFER): | 274 | case TRB_TYPE(TRB_TRANSFER): |
285 | address = trb->trans_event.buffer[0] + | 275 | address = trb->trans_event.buffer; |
286 | (((u64) trb->trans_event.buffer[1]) << 32); | ||
287 | /* | 276 | /* |
288 | * FIXME: look at flags to figure out if it's an address or if | 277 | * FIXME: look at flags to figure out if it's an address or if |
289 | * the data is directly in the buffer field. | 278 | * the data is directly in the buffer field. |
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) | |||
291 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); | 280 | xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); |
292 | break; | 281 | break; |
293 | case TRB_TYPE(TRB_COMPLETION): | 282 | case TRB_TYPE(TRB_COMPLETION): |
294 | address = trb->event_cmd.cmd_trb[0] + | 283 | address = trb->event_cmd.cmd_trb; |
295 | (((u64) trb->event_cmd.cmd_trb[1]) << 32); | ||
296 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); | 284 | xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); |
297 | xhci_dbg(xhci, "Completion status = %u\n", | 285 | xhci_dbg(xhci, "Completion status = %u\n", |
298 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); | 286 | (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); |
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) | |||
328 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { | 316 | for (i = 0; i < TRBS_PER_SEGMENT; ++i) { |
329 | trb = &seg->trbs[i]; | 317 | trb = &seg->trbs[i]; |
330 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, | 318 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, |
331 | (unsigned int) trb->link.segment_ptr[0], | 319 | lower_32_bits(trb->link.segment_ptr), |
332 | (unsigned int) trb->link.segment_ptr[1], | 320 | upper_32_bits(trb->link.segment_ptr), |
333 | (unsigned int) trb->link.intr_target, | 321 | (unsigned int) trb->link.intr_target, |
334 | (unsigned int) trb->link.control); | 322 | (unsigned int) trb->link.control); |
335 | addr += sizeof(*trb); | 323 | addr += sizeof(*trb); |
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
386 | entry = &erst->entries[i]; | 374 | entry = &erst->entries[i]; |
387 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", | 375 | xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", |
388 | (unsigned int) addr, | 376 | (unsigned int) addr, |
389 | (unsigned int) entry->seg_addr[0], | 377 | lower_32_bits(entry->seg_addr), |
390 | (unsigned int) entry->seg_addr[1], | 378 | upper_32_bits(entry->seg_addr), |
391 | (unsigned int) entry->seg_size, | 379 | (unsigned int) entry->seg_size, |
392 | (unsigned int) entry->rsvd); | 380 | (unsigned int) entry->rsvd); |
393 | addr += sizeof(*entry); | 381 | addr += sizeof(*entry); |
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) | |||
396 | 384 | ||
397 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | 385 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) |
398 | { | 386 | { |
399 | u32 val; | 387 | u64 val; |
400 | 388 | ||
401 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 389 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
402 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); | 390 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", |
403 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); | 391 | lower_32_bits(val)); |
404 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); | 392 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", |
393 | upper_32_bits(val)); | ||
405 | } | 394 | } |
406 | 395 | ||
407 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) | 396 | /* Print the last 32 bytes for 64-byte contexts */ |
397 | static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) | ||
398 | { | ||
399 | int i; | ||
400 | for (i = 0; i < 4; ++i) { | ||
401 | xhci_dbg(xhci, "@%p (virt) @%08llx " | ||
402 | "(dma) %#08llx - rsvd64[%d]\n", | ||
403 | &ctx[4 + i], (unsigned long long)dma, | ||
404 | ctx[4 + i], i); | ||
405 | dma += 8; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | ||
408 | { | 410 | { |
409 | int i, j; | ||
410 | int last_ep_ctx = 31; | ||
411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 411 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
412 | int field_size = 32 / 8; | 412 | int field_size = 32 / 8; |
413 | int i; | ||
413 | 414 | ||
414 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | 415 | struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); |
415 | &ctx->drop_flags, (unsigned long long)dma, | 416 | dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); |
416 | ctx->drop_flags); | 417 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); |
417 | dma += field_size; | ||
418 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
419 | &ctx->add_flags, (unsigned long long)dma, | ||
420 | ctx->add_flags); | ||
421 | dma += field_size; | ||
422 | for (i = 0; i > 6; ++i) { | ||
423 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | ||
424 | &ctx->rsvd[i], (unsigned long long)dma, | ||
425 | ctx->rsvd[i], i); | ||
426 | dma += field_size; | ||
427 | } | ||
428 | 418 | ||
429 | xhci_dbg(xhci, "Slot Context:\n"); | 419 | xhci_dbg(xhci, "Slot Context:\n"); |
430 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", | 420 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", |
431 | &ctx->slot.dev_info, | 421 | &slot_ctx->dev_info, |
432 | (unsigned long long)dma, ctx->slot.dev_info); | 422 | (unsigned long long)dma, slot_ctx->dev_info); |
433 | dma += field_size; | 423 | dma += field_size; |
434 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", | 424 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", |
435 | &ctx->slot.dev_info2, | 425 | &slot_ctx->dev_info2, |
436 | (unsigned long long)dma, ctx->slot.dev_info2); | 426 | (unsigned long long)dma, slot_ctx->dev_info2); |
437 | dma += field_size; | 427 | dma += field_size; |
438 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", | 428 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", |
439 | &ctx->slot.tt_info, | 429 | &slot_ctx->tt_info, |
440 | (unsigned long long)dma, ctx->slot.tt_info); | 430 | (unsigned long long)dma, slot_ctx->tt_info); |
441 | dma += field_size; | 431 | dma += field_size; |
442 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", | 432 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", |
443 | &ctx->slot.dev_state, | 433 | &slot_ctx->dev_state, |
444 | (unsigned long long)dma, ctx->slot.dev_state); | 434 | (unsigned long long)dma, slot_ctx->dev_state); |
445 | dma += field_size; | 435 | dma += field_size; |
446 | for (i = 0; i > 4; ++i) { | 436 | for (i = 0; i < 4; ++i) { |
447 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 437 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
448 | &ctx->slot.reserved[i], (unsigned long long)dma, | 438 | &slot_ctx->reserved[i], (unsigned long long)dma, |
449 | ctx->slot.reserved[i], i); | 439 | slot_ctx->reserved[i], i); |
450 | dma += field_size; | 440 | dma += field_size; |
451 | } | 441 | } |
452 | 442 | ||
443 | if (csz) | ||
444 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | ||
445 | } | ||
446 | |||
447 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | ||
448 | struct xhci_container_ctx *ctx, | ||
449 | unsigned int last_ep) | ||
450 | { | ||
451 | int i, j; | ||
452 | int last_ep_ctx = 31; | ||
453 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
454 | int field_size = 32 / 8; | ||
455 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
456 | |||
453 | if (last_ep < 31) | 457 | if (last_ep < 31) |
454 | last_ep_ctx = last_ep + 1; | 458 | last_ep_ctx = last_ep + 1; |
455 | for (i = 0; i < last_ep_ctx; ++i) { | 459 | for (i = 0; i < last_ep_ctx; ++i) { |
460 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); | ||
461 | dma_addr_t dma = ctx->dma + | ||
462 | ((unsigned long)ep_ctx - (unsigned long)ctx); | ||
463 | |||
456 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); | 464 | xhci_dbg(xhci, "Endpoint %02d Context:\n", i); |
457 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", | 465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", |
458 | &ctx->ep[i].ep_info, | 466 | &ep_ctx->ep_info, |
459 | (unsigned long long)dma, ctx->ep[i].ep_info); | 467 | (unsigned long long)dma, ep_ctx->ep_info); |
460 | dma += field_size; | 468 | dma += field_size; |
461 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", | 469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", |
462 | &ctx->ep[i].ep_info2, | 470 | &ep_ctx->ep_info2, |
463 | (unsigned long long)dma, ctx->ep[i].ep_info2); | 471 | (unsigned long long)dma, ep_ctx->ep_info2); |
464 | dma += field_size; | ||
465 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n", | ||
466 | &ctx->ep[i].deq[0], | ||
467 | (unsigned long long)dma, ctx->ep[i].deq[0]); | ||
468 | dma += field_size; | ||
469 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n", | ||
470 | &ctx->ep[i].deq[1], | ||
471 | (unsigned long long)dma, ctx->ep[i].deq[1]); | ||
472 | dma += field_size; | 472 | dma += field_size; |
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", | ||
474 | &ep_ctx->deq, | ||
475 | (unsigned long long)dma, ep_ctx->deq); | ||
476 | dma += 2*field_size; | ||
473 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", | 477 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", |
474 | &ctx->ep[i].tx_info, | 478 | &ep_ctx->tx_info, |
475 | (unsigned long long)dma, ctx->ep[i].tx_info); | 479 | (unsigned long long)dma, ep_ctx->tx_info); |
476 | dma += field_size; | 480 | dma += field_size; |
477 | for (j = 0; j < 3; ++j) { | 481 | for (j = 0; j < 3; ++j) { |
478 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", | 482 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", |
479 | &ctx->ep[i].reserved[j], | 483 | &ep_ctx->reserved[j], |
480 | (unsigned long long)dma, | 484 | (unsigned long long)dma, |
481 | ctx->ep[i].reserved[j], j); | 485 | ep_ctx->reserved[j], j); |
486 | dma += field_size; | ||
487 | } | ||
488 | |||
489 | if (csz) | ||
490 | dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | void xhci_dbg_ctx(struct xhci_hcd *xhci, | ||
495 | struct xhci_container_ctx *ctx, | ||
496 | unsigned int last_ep) | ||
497 | { | ||
498 | int i; | ||
499 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | ||
500 | int field_size = 32 / 8; | ||
501 | struct xhci_slot_ctx *slot_ctx; | ||
502 | dma_addr_t dma = ctx->dma; | ||
503 | int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); | ||
504 | |||
505 | if (ctx->type == XHCI_CTX_TYPE_INPUT) { | ||
506 | struct xhci_input_control_ctx *ctrl_ctx = | ||
507 | xhci_get_input_control_ctx(xhci, ctx); | ||
508 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", | ||
509 | &ctrl_ctx->drop_flags, (unsigned long long)dma, | ||
510 | ctrl_ctx->drop_flags); | ||
511 | dma += field_size; | ||
512 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", | ||
513 | &ctrl_ctx->add_flags, (unsigned long long)dma, | ||
514 | ctrl_ctx->add_flags); | ||
515 | dma += field_size; | ||
516 | for (i = 0; i < 6; ++i) { | ||
517 | xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", | ||
518 | &ctrl_ctx->rsvd2[i], (unsigned long long)dma, | ||
519 | ctrl_ctx->rsvd2[i], i); | ||
482 | dma += field_size; | 520 | dma += field_size; |
483 | } | 521 | } |
522 | |||
523 | if (csz) | ||
524 | dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); | ||
484 | } | 525 | } |
526 | |||
527 | slot_ctx = xhci_get_slot_ctx(xhci, ctx); | ||
528 | xhci_dbg_slot_ctx(xhci, ctx); | ||
529 | xhci_dbg_ep_ctx(xhci, ctx, last_ep); | ||
485 | } | 530 | } |
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index dba3e07ccd09..816c39caca1c 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c | |||
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
103 | u32 state; | 103 | u32 state; |
104 | 104 | ||
105 | state = xhci_readl(xhci, &xhci->op_regs->status); | 105 | state = xhci_readl(xhci, &xhci->op_regs->status); |
106 | BUG_ON((state & STS_HALT) == 0); | 106 | if ((state & STS_HALT) == 0) { |
107 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | ||
108 | return 0; | ||
109 | } | ||
107 | 110 | ||
108 | xhci_dbg(xhci, "// Reset the HC\n"); | 111 | xhci_dbg(xhci, "// Reset the HC\n"); |
109 | command = xhci_readl(xhci, &xhci->op_regs->command); | 112 | command = xhci_readl(xhci, &xhci->op_regs->command); |
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
226 | static void xhci_work(struct xhci_hcd *xhci) | 229 | static void xhci_work(struct xhci_hcd *xhci) |
227 | { | 230 | { |
228 | u32 temp; | 231 | u32 temp; |
232 | u64 temp_64; | ||
229 | 233 | ||
230 | /* | 234 | /* |
231 | * Clear the op reg interrupt status first, | 235 | * Clear the op reg interrupt status first, |
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci) | |||
248 | /* FIXME this should be a delayed service routine that clears the EHB */ | 252 | /* FIXME this should be a delayed service routine that clears the EHB */ |
249 | xhci_handle_event(xhci); | 253 | xhci_handle_event(xhci); |
250 | 254 | ||
251 | /* Clear the event handler busy flag; the event ring should be empty. */ | 255 | /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ |
252 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 256 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
253 | xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); | 257 | xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); |
254 | /* Flush posted writes -- FIXME is this necessary? */ | 258 | /* Flush posted writes -- FIXME is this necessary? */ |
255 | xhci_readl(xhci, &xhci->ir_set->irq_pending); | 259 | xhci_readl(xhci, &xhci->ir_set->irq_pending); |
256 | } | 260 | } |
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) | |||
266 | { | 270 | { |
267 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 271 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
268 | u32 temp, temp2; | 272 | u32 temp, temp2; |
273 | union xhci_trb *trb; | ||
269 | 274 | ||
270 | spin_lock(&xhci->lock); | 275 | spin_lock(&xhci->lock); |
276 | trb = xhci->event_ring->dequeue; | ||
271 | /* Check if the xHC generated the interrupt, or the irq is shared */ | 277 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
272 | temp = xhci_readl(xhci, &xhci->op_regs->status); | 278 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
273 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 279 | temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
280 | if (temp == 0xffffffff && temp2 == 0xffffffff) | ||
281 | goto hw_died; | ||
282 | |||
274 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { | 283 | if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { |
275 | spin_unlock(&xhci->lock); | 284 | spin_unlock(&xhci->lock); |
276 | return IRQ_NONE; | 285 | return IRQ_NONE; |
277 | } | 286 | } |
287 | xhci_dbg(xhci, "op reg status = %08x\n", temp); | ||
288 | xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); | ||
289 | xhci_dbg(xhci, "Event ring dequeue ptr:\n"); | ||
290 | xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", | ||
291 | (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), | ||
292 | lower_32_bits(trb->link.segment_ptr), | ||
293 | upper_32_bits(trb->link.segment_ptr), | ||
294 | (unsigned int) trb->link.intr_target, | ||
295 | (unsigned int) trb->link.control); | ||
278 | 296 | ||
279 | if (temp & STS_FATAL) { | 297 | if (temp & STS_FATAL) { |
280 | xhci_warn(xhci, "WARNING: Host System Error\n"); | 298 | xhci_warn(xhci, "WARNING: Host System Error\n"); |
281 | xhci_halt(xhci); | 299 | xhci_halt(xhci); |
300 | hw_died: | ||
282 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; | 301 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; |
283 | spin_unlock(&xhci->lock); | 302 | spin_unlock(&xhci->lock); |
284 | return -ESHUTDOWN; | 303 | return -ESHUTDOWN; |
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
295 | { | 314 | { |
296 | unsigned long flags; | 315 | unsigned long flags; |
297 | int temp; | 316 | int temp; |
317 | u64 temp_64; | ||
298 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; | 318 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
299 | int i, j; | 319 | int i, j; |
300 | 320 | ||
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg) | |||
311 | xhci_dbg(xhci, "Event ring:\n"); | 331 | xhci_dbg(xhci, "Event ring:\n"); |
312 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); | 332 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); |
313 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | 333 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
314 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 334 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
315 | temp &= ERST_PTR_MASK; | 335 | temp_64 &= ~ERST_PTR_MASK; |
316 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | 336 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
317 | xhci_dbg(xhci, "Command ring:\n"); | 337 | xhci_dbg(xhci, "Command ring:\n"); |
318 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); | 338 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); |
319 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | 339 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg) | |||
356 | int xhci_run(struct usb_hcd *hcd) | 376 | int xhci_run(struct usb_hcd *hcd) |
357 | { | 377 | { |
358 | u32 temp; | 378 | u32 temp; |
379 | u64 temp_64; | ||
359 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 380 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
360 | void (*doorbell)(struct xhci_hcd *) = NULL; | 381 | void (*doorbell)(struct xhci_hcd *) = NULL; |
361 | 382 | ||
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd) | |||
382 | add_timer(&xhci->event_ring_timer); | 403 | add_timer(&xhci->event_ring_timer); |
383 | #endif | 404 | #endif |
384 | 405 | ||
406 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
407 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
408 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
409 | xhci_dbg_cmd_ptrs(xhci); | ||
410 | |||
411 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
412 | xhci_dbg_erst(xhci, &xhci->erst); | ||
413 | xhci_dbg(xhci, "Event ring:\n"); | ||
414 | xhci_debug_ring(xhci, xhci->event_ring); | ||
415 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
416 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | ||
417 | temp_64 &= ~ERST_PTR_MASK; | ||
418 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); | ||
419 | |||
385 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); | 420 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
386 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); | 421 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); |
387 | temp &= ~ER_IRQ_INTERVAL_MASK; | 422 | temp &= ~ER_IRQ_INTERVAL_MASK; |
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd) | |||
406 | if (NUM_TEST_NOOPS > 0) | 441 | if (NUM_TEST_NOOPS > 0) |
407 | doorbell = xhci_setup_one_noop(xhci); | 442 | doorbell = xhci_setup_one_noop(xhci); |
408 | 443 | ||
409 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | ||
410 | xhci_debug_ring(xhci, xhci->cmd_ring); | ||
411 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | ||
412 | xhci_dbg_cmd_ptrs(xhci); | ||
413 | |||
414 | xhci_dbg(xhci, "ERST memory map follows:\n"); | ||
415 | xhci_dbg_erst(xhci, &xhci->erst); | ||
416 | xhci_dbg(xhci, "Event ring:\n"); | ||
417 | xhci_debug_ring(xhci, xhci->event_ring); | ||
418 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | ||
419 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | ||
420 | temp &= ERST_PTR_MASK; | ||
421 | xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); | ||
422 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]); | ||
423 | xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp); | ||
424 | |||
425 | temp = xhci_readl(xhci, &xhci->op_regs->command); | 444 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
426 | temp |= (CMD_RUN); | 445 | temp |= (CMD_RUN); |
427 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", | 446 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
601 | goto exit; | 620 | goto exit; |
602 | } | 621 | } |
603 | if (usb_endpoint_xfer_control(&urb->ep->desc)) | 622 | if (usb_endpoint_xfer_control(&urb->ep->desc)) |
604 | ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, | 623 | /* We have a spinlock and interrupts disabled, so we must pass |
624 | * atomic context to this function, which may allocate memory. | ||
625 | */ | ||
626 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | ||
605 | slot_id, ep_index); | 627 | slot_id, ep_index); |
606 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) | 628 | else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) |
607 | ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, | 629 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
608 | slot_id, ep_index); | 630 | slot_id, ep_index); |
609 | else | 631 | else |
610 | ret = -EINVAL; | 632 | ret = -EINVAL; |
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |||
661 | goto done; | 683 | goto done; |
662 | 684 | ||
663 | xhci_dbg(xhci, "Cancel URB %p\n", urb); | 685 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
686 | xhci_dbg(xhci, "Event ring:\n"); | ||
687 | xhci_debug_ring(xhci, xhci->event_ring); | ||
664 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | 688 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
665 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; | 689 | ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; |
690 | xhci_dbg(xhci, "Endpoint ring:\n"); | ||
691 | xhci_debug_ring(xhci, ep_ring); | ||
666 | td = (struct xhci_td *) urb->hcpriv; | 692 | td = (struct xhci_td *) urb->hcpriv; |
667 | 693 | ||
668 | ep_ring->cancels_pending++; | 694 | ep_ring->cancels_pending++; |
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
696 | struct usb_host_endpoint *ep) | 722 | struct usb_host_endpoint *ep) |
697 | { | 723 | { |
698 | struct xhci_hcd *xhci; | 724 | struct xhci_hcd *xhci; |
699 | struct xhci_device_control *in_ctx; | 725 | struct xhci_container_ctx *in_ctx, *out_ctx; |
726 | struct xhci_input_control_ctx *ctrl_ctx; | ||
727 | struct xhci_slot_ctx *slot_ctx; | ||
700 | unsigned int last_ctx; | 728 | unsigned int last_ctx; |
701 | unsigned int ep_index; | 729 | unsigned int ep_index; |
702 | struct xhci_ep_ctx *ep_ctx; | 730 | struct xhci_ep_ctx *ep_ctx; |
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
724 | } | 752 | } |
725 | 753 | ||
726 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 754 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
755 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
756 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
727 | ep_index = xhci_get_endpoint_index(&ep->desc); | 757 | ep_index = xhci_get_endpoint_index(&ep->desc); |
728 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 758 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
729 | /* If the HC already knows the endpoint is disabled, | 759 | /* If the HC already knows the endpoint is disabled, |
730 | * or the HCD has noted it is disabled, ignore this request | 760 | * or the HCD has noted it is disabled, ignore this request |
731 | */ | 761 | */ |
732 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || | 762 | if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || |
733 | in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { | 763 | ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { |
734 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | 764 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
735 | __func__, ep); | 765 | __func__, ep); |
736 | return 0; | 766 | return 0; |
737 | } | 767 | } |
738 | 768 | ||
739 | in_ctx->drop_flags |= drop_flag; | 769 | ctrl_ctx->drop_flags |= drop_flag; |
740 | new_drop_flags = in_ctx->drop_flags; | 770 | new_drop_flags = ctrl_ctx->drop_flags; |
741 | 771 | ||
742 | in_ctx->add_flags = ~drop_flag; | 772 | ctrl_ctx->add_flags = ~drop_flag; |
743 | new_add_flags = in_ctx->add_flags; | 773 | new_add_flags = ctrl_ctx->add_flags; |
744 | 774 | ||
745 | last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); | 775 | last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); |
776 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
746 | /* Update the last valid endpoint context, if we deleted the last one */ | 777 | /* Update the last valid endpoint context, if we deleted the last one */ |
747 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { | 778 | if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { |
748 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 779 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
749 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 780 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
750 | } | 781 | } |
751 | new_slot_info = in_ctx->slot.dev_info; | 782 | new_slot_info = slot_ctx->dev_info; |
752 | 783 | ||
753 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | 784 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
754 | 785 | ||
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
778 | struct usb_host_endpoint *ep) | 809 | struct usb_host_endpoint *ep) |
779 | { | 810 | { |
780 | struct xhci_hcd *xhci; | 811 | struct xhci_hcd *xhci; |
781 | struct xhci_device_control *in_ctx; | 812 | struct xhci_container_ctx *in_ctx, *out_ctx; |
782 | unsigned int ep_index; | 813 | unsigned int ep_index; |
783 | struct xhci_ep_ctx *ep_ctx; | 814 | struct xhci_ep_ctx *ep_ctx; |
815 | struct xhci_slot_ctx *slot_ctx; | ||
816 | struct xhci_input_control_ctx *ctrl_ctx; | ||
784 | u32 added_ctxs; | 817 | u32 added_ctxs; |
785 | unsigned int last_ctx; | 818 | unsigned int last_ctx; |
786 | u32 new_add_flags, new_drop_flags, new_slot_info; | 819 | u32 new_add_flags, new_drop_flags, new_slot_info; |
787 | int ret = 0; | 820 | int ret = 0; |
788 | 821 | ||
789 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); | 822 | ret = xhci_check_args(hcd, udev, ep, 1, __func__); |
790 | if (ret <= 0) | 823 | if (ret <= 0) { |
824 | /* So we won't queue a reset ep command for a root hub */ | ||
825 | ep->hcpriv = NULL; | ||
791 | return ret; | 826 | return ret; |
827 | } | ||
792 | xhci = hcd_to_xhci(hcd); | 828 | xhci = hcd_to_xhci(hcd); |
793 | 829 | ||
794 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | 830 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
810 | } | 846 | } |
811 | 847 | ||
812 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | 848 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
849 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | ||
850 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); | ||
813 | ep_index = xhci_get_endpoint_index(&ep->desc); | 851 | ep_index = xhci_get_endpoint_index(&ep->desc); |
814 | ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; | 852 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
815 | /* If the HCD has already noted the endpoint is enabled, | 853 | /* If the HCD has already noted the endpoint is enabled, |
816 | * ignore this request. | 854 | * ignore this request. |
817 | */ | 855 | */ |
818 | if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { | 856 | if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { |
819 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | 857 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
820 | __func__, ep); | 858 | __func__, ep); |
821 | return 0; | 859 | return 0; |
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
833 | return -ENOMEM; | 871 | return -ENOMEM; |
834 | } | 872 | } |
835 | 873 | ||
836 | in_ctx->add_flags |= added_ctxs; | 874 | ctrl_ctx->add_flags |= added_ctxs; |
837 | new_add_flags = in_ctx->add_flags; | 875 | new_add_flags = ctrl_ctx->add_flags; |
838 | 876 | ||
839 | /* If xhci_endpoint_disable() was called for this endpoint, but the | 877 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
840 | * xHC hasn't been notified yet through the check_bandwidth() call, | 878 | * xHC hasn't been notified yet through the check_bandwidth() call, |
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
842 | * descriptors. We must drop and re-add this endpoint, so we leave the | 880 | * descriptors. We must drop and re-add this endpoint, so we leave the |
843 | * drop flags alone. | 881 | * drop flags alone. |
844 | */ | 882 | */ |
845 | new_drop_flags = in_ctx->drop_flags; | 883 | new_drop_flags = ctrl_ctx->drop_flags; |
846 | 884 | ||
885 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); | ||
847 | /* Update the last valid endpoint context, if we just added one past */ | 886 | /* Update the last valid endpoint context, if we just added one past */ |
848 | if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { | 887 | if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { |
849 | in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 888 | slot_ctx->dev_info &= ~LAST_CTX_MASK; |
850 | in_ctx->slot.dev_info |= LAST_CTX(last_ctx); | 889 | slot_ctx->dev_info |= LAST_CTX(last_ctx); |
851 | } | 890 | } |
852 | new_slot_info = in_ctx->slot.dev_info; | 891 | new_slot_info = slot_ctx->dev_info; |
892 | |||
893 | /* Store the usb_device pointer for later use */ | ||
894 | ep->hcpriv = udev; | ||
853 | 895 | ||
854 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", | 896 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
855 | (unsigned int) ep->desc.bEndpointAddress, | 897 | (unsigned int) ep->desc.bEndpointAddress, |
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
860 | return 0; | 902 | return 0; |
861 | } | 903 | } |
862 | 904 | ||
863 | static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | 905 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
864 | { | 906 | { |
907 | struct xhci_input_control_ctx *ctrl_ctx; | ||
865 | struct xhci_ep_ctx *ep_ctx; | 908 | struct xhci_ep_ctx *ep_ctx; |
909 | struct xhci_slot_ctx *slot_ctx; | ||
866 | int i; | 910 | int i; |
867 | 911 | ||
868 | /* When a device's add flag and drop flag are zero, any subsequent | 912 | /* When a device's add flag and drop flag are zero, any subsequent |
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) | |||
870 | * untouched. Make sure we don't leave any old state in the input | 914 | * untouched. Make sure we don't leave any old state in the input |
871 | * endpoint contexts. | 915 | * endpoint contexts. |
872 | */ | 916 | */ |
873 | virt_dev->in_ctx->drop_flags = 0; | 917 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
874 | virt_dev->in_ctx->add_flags = 0; | 918 | ctrl_ctx->drop_flags = 0; |
875 | virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; | 919 | ctrl_ctx->add_flags = 0; |
920 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | ||
921 | slot_ctx->dev_info &= ~LAST_CTX_MASK; | ||
876 | /* Endpoint 0 is always valid */ | 922 | /* Endpoint 0 is always valid */ |
877 | virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 923 | slot_ctx->dev_info |= LAST_CTX(1); |
878 | for (i = 1; i < 31; ++i) { | 924 | for (i = 1; i < 31; ++i) { |
879 | ep_ctx = &virt_dev->in_ctx->ep[i]; | 925 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
880 | ep_ctx->ep_info = 0; | 926 | ep_ctx->ep_info = 0; |
881 | ep_ctx->ep_info2 = 0; | 927 | ep_ctx->ep_info2 = 0; |
882 | ep_ctx->deq[0] = 0; | 928 | ep_ctx->deq = 0; |
883 | ep_ctx->deq[1] = 0; | ||
884 | ep_ctx->tx_info = 0; | 929 | ep_ctx->tx_info = 0; |
885 | } | 930 | } |
886 | } | 931 | } |
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
903 | unsigned long flags; | 948 | unsigned long flags; |
904 | struct xhci_hcd *xhci; | 949 | struct xhci_hcd *xhci; |
905 | struct xhci_virt_device *virt_dev; | 950 | struct xhci_virt_device *virt_dev; |
951 | struct xhci_input_control_ctx *ctrl_ctx; | ||
952 | struct xhci_slot_ctx *slot_ctx; | ||
906 | 953 | ||
907 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); | 954 | ret = xhci_check_args(hcd, udev, NULL, 0, __func__); |
908 | if (ret <= 0) | 955 | if (ret <= 0) |
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
918 | virt_dev = xhci->devs[udev->slot_id]; | 965 | virt_dev = xhci->devs[udev->slot_id]; |
919 | 966 | ||
920 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | 967 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
921 | virt_dev->in_ctx->add_flags |= SLOT_FLAG; | 968 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
922 | virt_dev->in_ctx->add_flags &= ~EP0_FLAG; | 969 | ctrl_ctx->add_flags |= SLOT_FLAG; |
923 | virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; | 970 | ctrl_ctx->add_flags &= ~EP0_FLAG; |
924 | virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; | 971 | ctrl_ctx->drop_flags &= ~SLOT_FLAG; |
972 | ctrl_ctx->drop_flags &= ~EP0_FLAG; | ||
925 | xhci_dbg(xhci, "New Input Control Context:\n"); | 973 | xhci_dbg(xhci, "New Input Control Context:\n"); |
926 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, | 974 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
927 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 975 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
976 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); | ||
928 | 977 | ||
929 | spin_lock_irqsave(&xhci->lock, flags); | 978 | spin_lock_irqsave(&xhci->lock, flags); |
930 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, | 979 | ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, |
931 | udev->slot_id); | 980 | udev->slot_id); |
932 | if (ret < 0) { | 981 | if (ret < 0) { |
933 | spin_unlock_irqrestore(&xhci->lock, flags); | 982 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
982 | } | 1031 | } |
983 | 1032 | ||
984 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | 1033 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
985 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, | 1034 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
986 | LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); | 1035 | LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); |
987 | 1036 | ||
988 | xhci_zero_in_ctx(virt_dev); | 1037 | xhci_zero_in_ctx(xhci, virt_dev); |
989 | /* Free any old rings */ | 1038 | /* Free any old rings */ |
990 | for (i = 1; i < 31; ++i) { | 1039 | for (i = 1; i < 31; ++i) { |
991 | if (virt_dev->new_ep_rings[i]) { | 1040 | if (virt_dev->new_ep_rings[i]) { |
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |||
1023 | virt_dev->new_ep_rings[i] = NULL; | 1072 | virt_dev->new_ep_rings[i] = NULL; |
1024 | } | 1073 | } |
1025 | } | 1074 | } |
1026 | xhci_zero_in_ctx(virt_dev); | 1075 | xhci_zero_in_ctx(xhci, virt_dev); |
1076 | } | ||
1077 | |||
1078 | /* Deal with stalled endpoints. The core should have sent the control message | ||
1079 | * to clear the halt condition. However, we need to make the xHCI hardware | ||
1080 | * reset its sequence number, since a device will expect a sequence number of | ||
1081 | * zero after the halt condition is cleared. | ||
1082 | * Context: in_interrupt | ||
1083 | */ | ||
1084 | void xhci_endpoint_reset(struct usb_hcd *hcd, | ||
1085 | struct usb_host_endpoint *ep) | ||
1086 | { | ||
1087 | struct xhci_hcd *xhci; | ||
1088 | struct usb_device *udev; | ||
1089 | unsigned int ep_index; | ||
1090 | unsigned long flags; | ||
1091 | int ret; | ||
1092 | struct xhci_dequeue_state deq_state; | ||
1093 | struct xhci_ring *ep_ring; | ||
1094 | |||
1095 | xhci = hcd_to_xhci(hcd); | ||
1096 | udev = (struct usb_device *) ep->hcpriv; | ||
1097 | /* Called with a root hub endpoint (or an endpoint that wasn't added | ||
1098 | * with xhci_add_endpoint() | ||
1099 | */ | ||
1100 | if (!ep->hcpriv) | ||
1101 | return; | ||
1102 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
1103 | ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; | ||
1104 | if (!ep_ring->stopped_td) { | ||
1105 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", | ||
1106 | ep->desc.bEndpointAddress); | ||
1107 | return; | ||
1108 | } | ||
1109 | |||
1110 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); | ||
1111 | spin_lock_irqsave(&xhci->lock, flags); | ||
1112 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); | ||
1113 | /* | ||
1114 | * Can't change the ring dequeue pointer until it's transitioned to the | ||
1115 | * stopped state, which is only upon a successful reset endpoint | ||
1116 | * command. Better hope that last command worked! | ||
1117 | */ | ||
1118 | if (!ret) { | ||
1119 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); | ||
1120 | /* We need to move the HW's dequeue pointer past this TD, | ||
1121 | * or it will attempt to resend it on the next doorbell ring. | ||
1122 | */ | ||
1123 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | ||
1124 | ep_index, ep_ring->stopped_td, &deq_state); | ||
1125 | xhci_dbg(xhci, "Queueing new dequeue state\n"); | ||
1126 | xhci_queue_new_dequeue_state(xhci, ep_ring, | ||
1127 | udev->slot_id, | ||
1128 | ep_index, &deq_state); | ||
1129 | kfree(ep_ring->stopped_td); | ||
1130 | xhci_ring_cmd_db(xhci); | ||
1131 | } | ||
1132 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
1133 | |||
1134 | if (ret) | ||
1135 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); | ||
1027 | } | 1136 | } |
1028 | 1137 | ||
1029 | /* | 1138 | /* |
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1120 | struct xhci_virt_device *virt_dev; | 1229 | struct xhci_virt_device *virt_dev; |
1121 | int ret = 0; | 1230 | int ret = 0; |
1122 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 1231 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
1123 | u32 temp; | 1232 | struct xhci_slot_ctx *slot_ctx; |
1233 | struct xhci_input_control_ctx *ctrl_ctx; | ||
1234 | u64 temp_64; | ||
1124 | 1235 | ||
1125 | if (!udev->slot_id) { | 1236 | if (!udev->slot_id) { |
1126 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); | 1237 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); |
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1133 | if (!udev->config) | 1244 | if (!udev->config) |
1134 | xhci_setup_addressable_virt_dev(xhci, udev); | 1245 | xhci_setup_addressable_virt_dev(xhci, udev); |
1135 | /* Otherwise, assume the core has the device configured how it wants */ | 1246 | /* Otherwise, assume the core has the device configured how it wants */ |
1247 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | ||
1248 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | ||
1136 | 1249 | ||
1137 | spin_lock_irqsave(&xhci->lock, flags); | 1250 | spin_lock_irqsave(&xhci->lock, flags); |
1138 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, | 1251 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
1139 | udev->slot_id); | 1252 | udev->slot_id); |
1140 | if (ret) { | 1253 | if (ret) { |
1141 | spin_unlock_irqrestore(&xhci->lock, flags); | 1254 | spin_unlock_irqrestore(&xhci->lock, flags); |
1142 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | 1255 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
1176 | default: | 1289 | default: |
1177 | xhci_err(xhci, "ERROR: unexpected command completion " | 1290 | xhci_err(xhci, "ERROR: unexpected command completion " |
1178 | "code 0x%x.\n", virt_dev->cmd_status); | 1291 | "code 0x%x.\n", virt_dev->cmd_status); |
1292 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | ||
1293 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | ||
1179 | ret = -EINVAL; | 1294 | ret = -EINVAL; |
1180 | break; | 1295 | break; |
1181 | } | 1296 | } |
1182 | if (ret) { | 1297 | if (ret) { |
1183 | return ret; | 1298 | return ret; |
1184 | } | 1299 | } |
1185 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); | 1300 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
1186 | xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); | 1301 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
1187 | temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); | 1302 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
1188 | xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); | ||
1189 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n", | ||
1190 | udev->slot_id, | ||
1191 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], | ||
1192 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); | ||
1193 | xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n", | ||
1194 | udev->slot_id, | 1303 | udev->slot_id, |
1195 | &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], | 1304 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
1196 | xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); | 1305 | (unsigned long long) |
1306 | xhci->dcbaa->dev_context_ptrs[udev->slot_id]); | ||
1197 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", | 1307 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
1198 | (unsigned long long)virt_dev->out_ctx_dma); | 1308 | (unsigned long long)virt_dev->out_ctx->dma); |
1199 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | 1309 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
1200 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); | 1310 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
1201 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | 1311 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
1202 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); | 1312 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
1203 | /* | 1313 | /* |
1204 | * USB core uses address 1 for the roothubs, so we add one to the | 1314 | * USB core uses address 1 for the roothubs, so we add one to the |
1205 | * address given back to us by the HC. | 1315 | * address given back to us by the HC. |
1206 | */ | 1316 | */ |
1207 | udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; | 1317 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
1318 | udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; | ||
1208 | /* Zero the input context control for later use */ | 1319 | /* Zero the input context control for later use */ |
1209 | virt_dev->in_ctx->add_flags = 0; | 1320 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
1210 | virt_dev->in_ctx->drop_flags = 0; | 1321 | ctrl_ctx->add_flags = 0; |
1211 | /* Mirror flags in the output context for future ep enable/disable */ | 1322 | ctrl_ctx->drop_flags = 0; |
1212 | virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | ||
1213 | virt_dev->out_ctx->drop_flags = 0; | ||
1214 | 1323 | ||
1215 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); | 1324 | xhci_dbg(xhci, "Device address = %d\n", udev->devnum); |
1216 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ | 1325 | /* XXX Meh, not sure if anyone else but choose_address uses this. */ |
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void) | |||
1252 | /* xhci_device_control has eight fields, and also | 1361 | /* xhci_device_control has eight fields, and also |
1253 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | 1362 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
1254 | */ | 1363 | */ |
1255 | BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); | ||
1256 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); | 1364 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
1257 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | 1365 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
1258 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | 1366 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c8a72de1c508..e6b9a1c6002d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |||
88 | return; | 88 | return; |
89 | prev->next = next; | 89 | prev->next = next; |
90 | if (link_trbs) { | 90 | if (link_trbs) { |
91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
92 | 92 | ||
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | 93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | 94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; |
@@ -189,6 +189,63 @@ fail: | |||
189 | return 0; | 189 | return 0; |
190 | } | 190 | } |
191 | 191 | ||
192 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) | ||
193 | |||
194 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | ||
195 | int type, gfp_t flags) | ||
196 | { | ||
197 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | ||
198 | if (!ctx) | ||
199 | return NULL; | ||
200 | |||
201 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | ||
202 | ctx->type = type; | ||
203 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | ||
204 | if (type == XHCI_CTX_TYPE_INPUT) | ||
205 | ctx->size += CTX_SIZE(xhci->hcc_params); | ||
206 | |||
207 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | ||
208 | memset(ctx->bytes, 0, ctx->size); | ||
209 | return ctx; | ||
210 | } | ||
211 | |||
212 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | ||
213 | struct xhci_container_ctx *ctx) | ||
214 | { | ||
215 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | ||
216 | kfree(ctx); | ||
217 | } | ||
218 | |||
219 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | ||
220 | struct xhci_container_ctx *ctx) | ||
221 | { | ||
222 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | ||
223 | return (struct xhci_input_control_ctx *)ctx->bytes; | ||
224 | } | ||
225 | |||
226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | ||
227 | struct xhci_container_ctx *ctx) | ||
228 | { | ||
229 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | ||
230 | return (struct xhci_slot_ctx *)ctx->bytes; | ||
231 | |||
232 | return (struct xhci_slot_ctx *) | ||
233 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | ||
234 | } | ||
235 | |||
236 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | ||
237 | struct xhci_container_ctx *ctx, | ||
238 | unsigned int ep_index) | ||
239 | { | ||
240 | /* increment ep index by offset of start of ep ctx array */ | ||
241 | ep_index++; | ||
242 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | ||
243 | ep_index++; | ||
244 | |||
245 | return (struct xhci_ep_ctx *) | ||
246 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | ||
247 | } | ||
248 | |||
192 | /* All the xhci_tds in the ring's TD list should be freed at this point */ | 249 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
193 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | 250 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
194 | { | 251 | { |
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
200 | return; | 257 | return; |
201 | 258 | ||
202 | dev = xhci->devs[slot_id]; | 259 | dev = xhci->devs[slot_id]; |
203 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; | 260 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
204 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
205 | if (!dev) | 261 | if (!dev) |
206 | return; | 262 | return; |
207 | 263 | ||
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
210 | xhci_ring_free(xhci, dev->ep_rings[i]); | 266 | xhci_ring_free(xhci, dev->ep_rings[i]); |
211 | 267 | ||
212 | if (dev->in_ctx) | 268 | if (dev->in_ctx) |
213 | dma_pool_free(xhci->device_pool, | 269 | xhci_free_container_ctx(xhci, dev->in_ctx); |
214 | dev->in_ctx, dev->in_ctx_dma); | ||
215 | if (dev->out_ctx) | 270 | if (dev->out_ctx) |
216 | dma_pool_free(xhci->device_pool, | 271 | xhci_free_container_ctx(xhci, dev->out_ctx); |
217 | dev->out_ctx, dev->out_ctx_dma); | 272 | |
218 | kfree(xhci->devs[slot_id]); | 273 | kfree(xhci->devs[slot_id]); |
219 | xhci->devs[slot_id] = 0; | 274 | xhci->devs[slot_id] = 0; |
220 | } | 275 | } |
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) | |||
222 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | 277 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, |
223 | struct usb_device *udev, gfp_t flags) | 278 | struct usb_device *udev, gfp_t flags) |
224 | { | 279 | { |
225 | dma_addr_t dma; | ||
226 | struct xhci_virt_device *dev; | 280 | struct xhci_virt_device *dev; |
227 | 281 | ||
228 | /* Slot ID 0 is reserved */ | 282 | /* Slot ID 0 is reserved */ |
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
236 | return 0; | 290 | return 0; |
237 | dev = xhci->devs[slot_id]; | 291 | dev = xhci->devs[slot_id]; |
238 | 292 | ||
239 | /* Allocate the (output) device context that will be used in the HC */ | 293 | /* Allocate the (output) device context that will be used in the HC. */ |
240 | dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 294 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
241 | if (!dev->out_ctx) | 295 | if (!dev->out_ctx) |
242 | goto fail; | 296 | goto fail; |
243 | dev->out_ctx_dma = dma; | 297 | |
244 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, | 298 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
245 | (unsigned long long)dma); | 299 | (unsigned long long)dev->out_ctx->dma); |
246 | memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); | ||
247 | 300 | ||
248 | /* Allocate the (input) device context for address device command */ | 301 | /* Allocate the (input) device context for address device command */ |
249 | dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); | 302 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
250 | if (!dev->in_ctx) | 303 | if (!dev->in_ctx) |
251 | goto fail; | 304 | goto fail; |
252 | dev->in_ctx_dma = dma; | 305 | |
253 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, | 306 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
254 | (unsigned long long)dma); | 307 | (unsigned long long)dev->in_ctx->dma); |
255 | memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); | ||
256 | 308 | ||
257 | /* Allocate endpoint 0 ring */ | 309 | /* Allocate endpoint 0 ring */ |
258 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); | 310 | dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); |
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
261 | 313 | ||
262 | init_completion(&dev->cmd_completion); | 314 | init_completion(&dev->cmd_completion); |
263 | 315 | ||
264 | /* | 316 | /* Point to output device context in dcbaa. */ |
265 | * Point to output device context in dcbaa; skip the output control | 317 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
266 | * context, which is eight 32 bit fields (or 32 bytes long) | ||
267 | */ | ||
268 | xhci->dcbaa->dev_context_ptrs[2*slot_id] = | ||
269 | (u32) dev->out_ctx_dma + (32); | ||
270 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", | 318 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
271 | slot_id, | 319 | slot_id, |
272 | &xhci->dcbaa->dev_context_ptrs[2*slot_id], | 320 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
273 | (unsigned long long)dev->out_ctx_dma); | 321 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
274 | xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; | ||
275 | 322 | ||
276 | return 1; | 323 | return 1; |
277 | fail: | 324 | fail: |
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
285 | struct xhci_virt_device *dev; | 332 | struct xhci_virt_device *dev; |
286 | struct xhci_ep_ctx *ep0_ctx; | 333 | struct xhci_ep_ctx *ep0_ctx; |
287 | struct usb_device *top_dev; | 334 | struct usb_device *top_dev; |
335 | struct xhci_slot_ctx *slot_ctx; | ||
336 | struct xhci_input_control_ctx *ctrl_ctx; | ||
288 | 337 | ||
289 | dev = xhci->devs[udev->slot_id]; | 338 | dev = xhci->devs[udev->slot_id]; |
290 | /* Slot ID 0 is reserved */ | 339 | /* Slot ID 0 is reserved */ |
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
293 | udev->slot_id); | 342 | udev->slot_id); |
294 | return -EINVAL; | 343 | return -EINVAL; |
295 | } | 344 | } |
296 | ep0_ctx = &dev->in_ctx->ep[0]; | 345 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
346 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | ||
347 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | ||
297 | 348 | ||
298 | /* 2) New slot context and endpoint 0 context are valid*/ | 349 | /* 2) New slot context and endpoint 0 context are valid*/ |
299 | dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; | 350 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
300 | 351 | ||
301 | /* 3) Only the control endpoint is valid - one endpoint context */ | 352 | /* 3) Only the control endpoint is valid - one endpoint context */ |
302 | dev->in_ctx->slot.dev_info |= LAST_CTX(1); | 353 | slot_ctx->dev_info |= LAST_CTX(1); |
303 | 354 | ||
304 | switch (udev->speed) { | 355 | switch (udev->speed) { |
305 | case USB_SPEED_SUPER: | 356 | case USB_SPEED_SUPER: |
306 | dev->in_ctx->slot.dev_info |= (u32) udev->route; | 357 | slot_ctx->dev_info |= (u32) udev->route; |
307 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; | 358 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
308 | break; | 359 | break; |
309 | case USB_SPEED_HIGH: | 360 | case USB_SPEED_HIGH: |
310 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; | 361 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
311 | break; | 362 | break; |
312 | case USB_SPEED_FULL: | 363 | case USB_SPEED_FULL: |
313 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; | 364 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
314 | break; | 365 | break; |
315 | case USB_SPEED_LOW: | 366 | case USB_SPEED_LOW: |
316 | dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; | 367 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
317 | break; | 368 | break; |
318 | case USB_SPEED_VARIABLE: | 369 | case USB_SPEED_VARIABLE: |
319 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | 370 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); |
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
327 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 378 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
328 | top_dev = top_dev->parent) | 379 | top_dev = top_dev->parent) |
329 | /* Found device below root hub */; | 380 | /* Found device below root hub */; |
330 | dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); | 381 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
331 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); | 382 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
332 | 383 | ||
333 | /* Is this a LS/FS device under a HS hub? */ | 384 | /* Is this a LS/FS device under a HS hub? */ |
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
337 | */ | 388 | */ |
338 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && | 389 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
339 | udev->tt) { | 390 | udev->tt) { |
340 | dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; | 391 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
341 | dev->in_ctx->slot.tt_info |= udev->ttport << 8; | 392 | slot_ctx->tt_info |= udev->ttport << 8; |
342 | } | 393 | } |
343 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); | 394 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
344 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); | 395 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud | |||
360 | ep0_ctx->ep_info2 |= MAX_BURST(0); | 411 | ep0_ctx->ep_info2 |= MAX_BURST(0); |
361 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | 412 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); |
362 | 413 | ||
363 | ep0_ctx->deq[0] = | 414 | ep0_ctx->deq = |
364 | dev->ep_rings[0]->first_seg->dma; | 415 | dev->ep_rings[0]->first_seg->dma; |
365 | ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; | 416 | ep0_ctx->deq |= dev->ep_rings[0]->cycle_state; |
366 | ep0_ctx->deq[1] = 0; | ||
367 | 417 | ||
368 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | 418 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ |
369 | 419 | ||
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
470 | unsigned int max_burst; | 520 | unsigned int max_burst; |
471 | 521 | ||
472 | ep_index = xhci_get_endpoint_index(&ep->desc); | 522 | ep_index = xhci_get_endpoint_index(&ep->desc); |
473 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 523 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
474 | 524 | ||
475 | /* Set up the endpoint ring */ | 525 | /* Set up the endpoint ring */ |
476 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); | 526 | virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); |
477 | if (!virt_dev->new_ep_rings[ep_index]) | 527 | if (!virt_dev->new_ep_rings[ep_index]) |
478 | return -ENOMEM; | 528 | return -ENOMEM; |
479 | ep_ring = virt_dev->new_ep_rings[ep_index]; | 529 | ep_ring = virt_dev->new_ep_rings[ep_index]; |
480 | ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; | 530 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
481 | ep_ctx->deq[1] = 0; | ||
482 | 531 | ||
483 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | 532 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); |
484 | 533 | ||
485 | /* FIXME dig Mult and streams info out of ep companion desc */ | 534 | /* FIXME dig Mult and streams info out of ep companion desc */ |
486 | 535 | ||
487 | /* Allow 3 retries for everything but isoc */ | 536 | /* Allow 3 retries for everything but isoc; |
537 | * error count = 0 means infinite retries. | ||
538 | */ | ||
488 | if (!usb_endpoint_xfer_isoc(&ep->desc)) | 539 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
489 | ep_ctx->ep_info2 = ERROR_COUNT(3); | 540 | ep_ctx->ep_info2 = ERROR_COUNT(3); |
490 | else | 541 | else |
491 | ep_ctx->ep_info2 = ERROR_COUNT(0); | 542 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
492 | 543 | ||
493 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | 544 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); |
494 | 545 | ||
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
498 | max_packet = ep->desc.wMaxPacketSize; | 549 | max_packet = ep->desc.wMaxPacketSize; |
499 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | 550 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); |
500 | /* dig out max burst from ep companion desc */ | 551 | /* dig out max burst from ep companion desc */ |
501 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | 552 | if (!ep->ss_ep_comp) { |
553 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | ||
554 | max_packet = 0; | ||
555 | } else { | ||
556 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | ||
557 | } | ||
502 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); | 558 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
503 | break; | 559 | break; |
504 | case USB_SPEED_HIGH: | 560 | case USB_SPEED_HIGH: |
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, | |||
531 | struct xhci_ep_ctx *ep_ctx; | 587 | struct xhci_ep_ctx *ep_ctx; |
532 | 588 | ||
533 | ep_index = xhci_get_endpoint_index(&ep->desc); | 589 | ep_index = xhci_get_endpoint_index(&ep->desc); |
534 | ep_ctx = &virt_dev->in_ctx->ep[ep_index]; | 590 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
535 | 591 | ||
536 | ep_ctx->ep_info = 0; | 592 | ep_ctx->ep_info = 0; |
537 | ep_ctx->ep_info2 = 0; | 593 | ep_ctx->ep_info2 = 0; |
538 | ep_ctx->deq[0] = 0; | 594 | ep_ctx->deq = 0; |
539 | ep_ctx->deq[1] = 0; | ||
540 | ep_ctx->tx_info = 0; | 595 | ep_ctx->tx_info = 0; |
541 | /* Don't free the endpoint ring until the set interface or configuration | 596 | /* Don't free the endpoint ring until the set interface or configuration |
542 | * request succeeds. | 597 | * request succeeds. |
543 | */ | 598 | */ |
544 | } | 599 | } |
545 | 600 | ||
601 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ | ||
602 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | ||
603 | { | ||
604 | int i; | ||
605 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | ||
606 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
607 | |||
608 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | ||
609 | |||
610 | if (!num_sp) | ||
611 | return 0; | ||
612 | |||
613 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | ||
614 | if (!xhci->scratchpad) | ||
615 | goto fail_sp; | ||
616 | |||
617 | xhci->scratchpad->sp_array = | ||
618 | pci_alloc_consistent(to_pci_dev(dev), | ||
619 | num_sp * sizeof(u64), | ||
620 | &xhci->scratchpad->sp_dma); | ||
621 | if (!xhci->scratchpad->sp_array) | ||
622 | goto fail_sp2; | ||
623 | |||
624 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | ||
625 | if (!xhci->scratchpad->sp_buffers) | ||
626 | goto fail_sp3; | ||
627 | |||
628 | xhci->scratchpad->sp_dma_buffers = | ||
629 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | ||
630 | |||
631 | if (!xhci->scratchpad->sp_dma_buffers) | ||
632 | goto fail_sp4; | ||
633 | |||
634 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | ||
635 | for (i = 0; i < num_sp; i++) { | ||
636 | dma_addr_t dma; | ||
637 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | ||
638 | xhci->page_size, &dma); | ||
639 | if (!buf) | ||
640 | goto fail_sp5; | ||
641 | |||
642 | xhci->scratchpad->sp_array[i] = dma; | ||
643 | xhci->scratchpad->sp_buffers[i] = buf; | ||
644 | xhci->scratchpad->sp_dma_buffers[i] = dma; | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | |||
649 | fail_sp5: | ||
650 | for (i = i - 1; i >= 0; i--) { | ||
651 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | ||
652 | xhci->scratchpad->sp_buffers[i], | ||
653 | xhci->scratchpad->sp_dma_buffers[i]); | ||
654 | } | ||
655 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
656 | |||
657 | fail_sp4: | ||
658 | kfree(xhci->scratchpad->sp_buffers); | ||
659 | |||
660 | fail_sp3: | ||
661 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | ||
662 | xhci->scratchpad->sp_array, | ||
663 | xhci->scratchpad->sp_dma); | ||
664 | |||
665 | fail_sp2: | ||
666 | kfree(xhci->scratchpad); | ||
667 | xhci->scratchpad = NULL; | ||
668 | |||
669 | fail_sp: | ||
670 | return -ENOMEM; | ||
671 | } | ||
672 | |||
673 | static void scratchpad_free(struct xhci_hcd *xhci) | ||
674 | { | ||
675 | int num_sp; | ||
676 | int i; | ||
677 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | ||
678 | |||
679 | if (!xhci->scratchpad) | ||
680 | return; | ||
681 | |||
682 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | ||
683 | |||
684 | for (i = 0; i < num_sp; i++) { | ||
685 | pci_free_consistent(pdev, xhci->page_size, | ||
686 | xhci->scratchpad->sp_buffers[i], | ||
687 | xhci->scratchpad->sp_dma_buffers[i]); | ||
688 | } | ||
689 | kfree(xhci->scratchpad->sp_dma_buffers); | ||
690 | kfree(xhci->scratchpad->sp_buffers); | ||
691 | pci_free_consistent(pdev, num_sp * sizeof(u64), | ||
692 | xhci->scratchpad->sp_array, | ||
693 | xhci->scratchpad->sp_dma); | ||
694 | kfree(xhci->scratchpad); | ||
695 | xhci->scratchpad = NULL; | ||
696 | } | ||
697 | |||
546 | void xhci_mem_cleanup(struct xhci_hcd *xhci) | 698 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
547 | { | 699 | { |
548 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | 700 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
551 | 703 | ||
552 | /* Free the Event Ring Segment Table and the actual Event Ring */ | 704 | /* Free the Event Ring Segment Table and the actual Event Ring */ |
553 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | 705 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); |
554 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | 706 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); |
555 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | 707 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); |
556 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | ||
557 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | ||
558 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | 708 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
559 | if (xhci->erst.entries) | 709 | if (xhci->erst.entries) |
560 | pci_free_consistent(pdev, size, | 710 | pci_free_consistent(pdev, size, |
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
566 | xhci->event_ring = NULL; | 716 | xhci->event_ring = NULL; |
567 | xhci_dbg(xhci, "Freed event ring\n"); | 717 | xhci_dbg(xhci, "Freed event ring\n"); |
568 | 718 | ||
569 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | 719 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
570 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | ||
571 | if (xhci->cmd_ring) | 720 | if (xhci->cmd_ring) |
572 | xhci_ring_free(xhci, xhci->cmd_ring); | 721 | xhci_ring_free(xhci, xhci->cmd_ring); |
573 | xhci->cmd_ring = NULL; | 722 | xhci->cmd_ring = NULL; |
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
586 | xhci->device_pool = NULL; | 735 | xhci->device_pool = NULL; |
587 | xhci_dbg(xhci, "Freed device context pool\n"); | 736 | xhci_dbg(xhci, "Freed device context pool\n"); |
588 | 737 | ||
589 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | 738 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
590 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
591 | if (xhci->dcbaa) | 739 | if (xhci->dcbaa) |
592 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | 740 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), |
593 | xhci->dcbaa, xhci->dcbaa->dma); | 741 | xhci->dcbaa, xhci->dcbaa->dma); |
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
595 | 743 | ||
596 | xhci->page_size = 0; | 744 | xhci->page_size = 0; |
597 | xhci->page_shift = 0; | 745 | xhci->page_shift = 0; |
746 | scratchpad_free(xhci); | ||
598 | } | 747 | } |
599 | 748 | ||
600 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | 749 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
602 | dma_addr_t dma; | 751 | dma_addr_t dma; |
603 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | 752 | struct device *dev = xhci_to_hcd(xhci)->self.controller; |
604 | unsigned int val, val2; | 753 | unsigned int val, val2; |
754 | u64 val_64; | ||
605 | struct xhci_segment *seg; | 755 | struct xhci_segment *seg; |
606 | u32 page_size; | 756 | u32 page_size; |
607 | int i; | 757 | int i; |
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
647 | xhci->dcbaa->dma = dma; | 797 | xhci->dcbaa->dma = dma; |
648 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", | 798 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
649 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 799 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
650 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | 800 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
651 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | ||
652 | 801 | ||
653 | /* | 802 | /* |
654 | * Initialize the ring segment pool. The ring must be a contiguous | 803 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
658 | */ | 807 | */ |
659 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | 808 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, |
660 | SEGMENT_SIZE, 64, xhci->page_size); | 809 | SEGMENT_SIZE, 64, xhci->page_size); |
810 | |||
661 | /* See Table 46 and Note on Figure 55 */ | 811 | /* See Table 46 and Note on Figure 55 */ |
662 | /* FIXME support 64-byte contexts */ | ||
663 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, | 812 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
664 | sizeof(struct xhci_device_control), | 813 | 2112, 64, xhci->page_size); |
665 | 64, xhci->page_size); | ||
666 | if (!xhci->segment_pool || !xhci->device_pool) | 814 | if (!xhci->segment_pool || !xhci->device_pool) |
667 | goto fail; | 815 | goto fail; |
668 | 816 | ||
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
675 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 823 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
676 | 824 | ||
677 | /* Set the address in the Command Ring Control register */ | 825 | /* Set the address in the Command Ring Control register */ |
678 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | 826 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
679 | val = (val & ~CMD_RING_ADDR_MASK) | | 827 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
680 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | 828 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
681 | xhci->cmd_ring->cycle_state; | 829 | xhci->cmd_ring->cycle_state; |
682 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | 830 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
683 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | 831 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
684 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | ||
685 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | ||
686 | xhci_dbg_cmd_ptrs(xhci); | 832 | xhci_dbg_cmd_ptrs(xhci); |
687 | 833 | ||
688 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | 834 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); |
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
722 | /* set ring base address and size for each segment table entry */ | 868 | /* set ring base address and size for each segment table entry */ |
723 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | 869 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { |
724 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | 870 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; |
725 | entry->seg_addr[0] = seg->dma; | 871 | entry->seg_addr = seg->dma; |
726 | entry->seg_addr[1] = 0; | ||
727 | entry->seg_size = TRBS_PER_SEGMENT; | 872 | entry->seg_size = TRBS_PER_SEGMENT; |
728 | entry->rsvd = 0; | 873 | entry->rsvd = 0; |
729 | seg = seg->next; | 874 | seg = seg->next; |
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
741 | /* set the segment table base address */ | 886 | /* set the segment table base address */ |
742 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", | 887 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
743 | (unsigned long long)xhci->erst.erst_dma_addr); | 888 | (unsigned long long)xhci->erst.erst_dma_addr); |
744 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | 889 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
745 | val &= ERST_PTR_MASK; | 890 | val_64 &= ERST_PTR_MASK; |
746 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | 891 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
747 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | 892 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
748 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | ||
749 | 893 | ||
750 | /* Set the event ring dequeue address */ | 894 | /* Set the event ring dequeue address */ |
751 | xhci_set_hc_event_deq(xhci); | 895 | xhci_set_hc_event_deq(xhci); |
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
761 | for (i = 0; i < MAX_HC_SLOTS; ++i) | 905 | for (i = 0; i < MAX_HC_SLOTS; ++i) |
762 | xhci->devs[i] = 0; | 906 | xhci->devs[i] = 0; |
763 | 907 | ||
908 | if (scratchpad_alloc(xhci, flags)) | ||
909 | goto fail; | ||
910 | |||
764 | return 0; | 911 | return 0; |
912 | |||
765 | fail: | 913 | fail: |
766 | xhci_warn(xhci, "Couldn't initialize memory\n"); | 914 | xhci_warn(xhci, "Couldn't initialize memory\n"); |
767 | xhci_mem_cleanup(xhci); | 915 | xhci_mem_cleanup(xhci); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1462709e26c0..592fe7e623f7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
117 | .free_dev = xhci_free_dev, | 117 | .free_dev = xhci_free_dev, |
118 | .add_endpoint = xhci_add_endpoint, | 118 | .add_endpoint = xhci_add_endpoint, |
119 | .drop_endpoint = xhci_drop_endpoint, | 119 | .drop_endpoint = xhci_drop_endpoint, |
120 | .endpoint_reset = xhci_endpoint_reset, | ||
120 | .check_bandwidth = xhci_check_bandwidth, | 121 | .check_bandwidth = xhci_check_bandwidth, |
121 | .reset_bandwidth = xhci_reset_bandwidth, | 122 | .reset_bandwidth = xhci_reset_bandwidth, |
122 | .address_device = xhci_address_device, | 123 | .address_device = xhci_address_device, |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 02d81985c454..aa88a067148b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, | |||
135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) | 135 | static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) |
136 | { | 136 | { |
137 | union xhci_trb *next = ++(ring->dequeue); | 137 | union xhci_trb *next = ++(ring->dequeue); |
138 | unsigned long long addr; | ||
138 | 139 | ||
139 | ring->deq_updates++; | 140 | ring->deq_updates++; |
140 | /* Update the dequeue pointer further if that was a link TRB or we're at | 141 | /* Update the dequeue pointer further if that was a link TRB or we're at |
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
152 | ring->dequeue = ring->deq_seg->trbs; | 153 | ring->dequeue = ring->deq_seg->trbs; |
153 | next = ring->dequeue; | 154 | next = ring->dequeue; |
154 | } | 155 | } |
156 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); | ||
157 | if (ring == xhci->event_ring) | ||
158 | xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr); | ||
159 | else if (ring == xhci->cmd_ring) | ||
160 | xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr); | ||
161 | else | ||
162 | xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr); | ||
155 | } | 163 | } |
156 | 164 | ||
157 | /* | 165 | /* |
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
171 | { | 179 | { |
172 | u32 chain; | 180 | u32 chain; |
173 | union xhci_trb *next; | 181 | union xhci_trb *next; |
182 | unsigned long long addr; | ||
174 | 183 | ||
175 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; | 184 | chain = ring->enqueue->generic.field[3] & TRB_CHAIN; |
176 | next = ++(ring->enqueue); | 185 | next = ++(ring->enqueue); |
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer | |||
204 | ring->enqueue = ring->enq_seg->trbs; | 213 | ring->enqueue = ring->enq_seg->trbs; |
205 | next = ring->enqueue; | 214 | next = ring->enqueue; |
206 | } | 215 | } |
216 | addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); | ||
217 | if (ring == xhci->event_ring) | ||
218 | xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr); | ||
219 | else if (ring == xhci->cmd_ring) | ||
220 | xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr); | ||
221 | else | ||
222 | xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr); | ||
207 | } | 223 | } |
208 | 224 | ||
209 | /* | 225 | /* |
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, | |||
237 | 253 | ||
238 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | 254 | void xhci_set_hc_event_deq(struct xhci_hcd *xhci) |
239 | { | 255 | { |
240 | u32 temp; | 256 | u64 temp; |
241 | dma_addr_t deq; | 257 | dma_addr_t deq; |
242 | 258 | ||
243 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 259 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
246 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 262 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
247 | "dequeue ptr.\n"); | 263 | "dequeue ptr.\n"); |
248 | /* Update HC event ring dequeue pointer */ | 264 | /* Update HC event ring dequeue pointer */ |
249 | temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); | 265 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
250 | temp &= ERST_PTR_MASK; | 266 | temp &= ERST_PTR_MASK; |
251 | if (!in_interrupt()) | 267 | /* Don't clear the EHB bit (which is RW1C) because |
252 | xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); | 268 | * there might be more events to service. |
253 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | 269 | */ |
254 | xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, | 270 | temp &= ~ERST_EHB; |
255 | &xhci->ir_set->erst_dequeue[0]); | 271 | xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n"); |
272 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | ||
273 | &xhci->ir_set->erst_dequeue); | ||
256 | } | 274 | } |
257 | 275 | ||
258 | /* Ring the host controller doorbell after placing a command on the ring */ | 276 | /* Ring the host controller doorbell after placing a command on the ring */ |
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, | |||
279 | /* Don't ring the doorbell for this endpoint if there are pending | 297 | /* Don't ring the doorbell for this endpoint if there are pending |
280 | * cancellations because the we don't want to interrupt processing. | 298 | * cancellations because the we don't want to interrupt processing. |
281 | */ | 299 | */ |
282 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { | 300 | if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING) |
301 | && !(ep_ring->state & EP_HALTED)) { | ||
283 | field = xhci_readl(xhci, db_addr) & DB_MASK; | 302 | field = xhci_readl(xhci, db_addr) & DB_MASK; |
284 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); | 303 | xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); |
285 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this | 304 | /* Flush PCI posted writes - FIXME Matthew Wilcox says this |
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( | |||
316 | return cur_seg; | 335 | return cur_seg; |
317 | } | 336 | } |
318 | 337 | ||
319 | struct dequeue_state { | ||
320 | struct xhci_segment *new_deq_seg; | ||
321 | union xhci_trb *new_deq_ptr; | ||
322 | int new_cycle_state; | ||
323 | }; | ||
324 | |||
325 | /* | 338 | /* |
326 | * Move the xHC's endpoint ring dequeue pointer past cur_td. | 339 | * Move the xHC's endpoint ring dequeue pointer past cur_td. |
327 | * Record the new state of the xHC's endpoint ring dequeue segment, | 340 | * Record the new state of the xHC's endpoint ring dequeue segment, |
@@ -336,24 +349,30 @@ struct dequeue_state { | |||
336 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit | 349 | * - Finally we move the dequeue state one TRB further, toggling the cycle bit |
337 | * if we've moved it past a link TRB with the toggle cycle bit set. | 350 | * if we've moved it past a link TRB with the toggle cycle bit set. |
338 | */ | 351 | */ |
339 | static void find_new_dequeue_state(struct xhci_hcd *xhci, | 352 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
340 | unsigned int slot_id, unsigned int ep_index, | 353 | unsigned int slot_id, unsigned int ep_index, |
341 | struct xhci_td *cur_td, struct dequeue_state *state) | 354 | struct xhci_td *cur_td, struct xhci_dequeue_state *state) |
342 | { | 355 | { |
343 | struct xhci_virt_device *dev = xhci->devs[slot_id]; | 356 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
344 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; | 357 | struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; |
345 | struct xhci_generic_trb *trb; | 358 | struct xhci_generic_trb *trb; |
359 | struct xhci_ep_ctx *ep_ctx; | ||
360 | dma_addr_t addr; | ||
346 | 361 | ||
347 | state->new_cycle_state = 0; | 362 | state->new_cycle_state = 0; |
363 | xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); | ||
348 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 364 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
349 | ep_ring->stopped_trb, | 365 | ep_ring->stopped_trb, |
350 | &state->new_cycle_state); | 366 | &state->new_cycle_state); |
351 | if (!state->new_deq_seg) | 367 | if (!state->new_deq_seg) |
352 | BUG(); | 368 | BUG(); |
353 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 369 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
354 | state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; | 370 | xhci_dbg(xhci, "Finding endpoint context\n"); |
371 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
372 | state->new_cycle_state = 0x1 & ep_ctx->deq; | ||
355 | 373 | ||
356 | state->new_deq_ptr = cur_td->last_trb; | 374 | state->new_deq_ptr = cur_td->last_trb; |
375 | xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n"); | ||
357 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 376 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
358 | state->new_deq_ptr, | 377 | state->new_deq_ptr, |
359 | &state->new_cycle_state); | 378 | &state->new_cycle_state); |
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, | |||
367 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); | 386 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
368 | 387 | ||
369 | /* Don't update the ring cycle state for the producer (us). */ | 388 | /* Don't update the ring cycle state for the producer (us). */ |
389 | xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", | ||
390 | state->new_deq_seg); | ||
391 | addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); | ||
392 | xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", | ||
393 | (unsigned long long) addr); | ||
394 | xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); | ||
370 | ep_ring->dequeue = state->new_deq_ptr; | 395 | ep_ring->dequeue = state->new_deq_ptr; |
371 | ep_ring->deq_seg = state->new_deq_seg; | 396 | ep_ring->deq_seg = state->new_deq_seg; |
372 | } | 397 | } |
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
416 | unsigned int ep_index, struct xhci_segment *deq_seg, | 441 | unsigned int ep_index, struct xhci_segment *deq_seg, |
417 | union xhci_trb *deq_ptr, u32 cycle_state); | 442 | union xhci_trb *deq_ptr, u32 cycle_state); |
418 | 443 | ||
444 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
445 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
446 | unsigned int ep_index, struct xhci_dequeue_state *deq_state) | ||
447 | { | ||
448 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | ||
449 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | ||
450 | deq_state->new_deq_seg, | ||
451 | (unsigned long long)deq_state->new_deq_seg->dma, | ||
452 | deq_state->new_deq_ptr, | ||
453 | (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), | ||
454 | deq_state->new_cycle_state); | ||
455 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
456 | deq_state->new_deq_seg, | ||
457 | deq_state->new_deq_ptr, | ||
458 | (u32) deq_state->new_cycle_state); | ||
459 | /* Stop the TD queueing code from ringing the doorbell until | ||
460 | * this command completes. The HC won't set the dequeue pointer | ||
461 | * if the ring is running, and ringing the doorbell starts the | ||
462 | * ring running. | ||
463 | */ | ||
464 | ep_ring->state |= SET_DEQ_PENDING; | ||
465 | xhci_ring_cmd_db(xhci); | ||
466 | } | ||
467 | |||
419 | /* | 468 | /* |
420 | * When we get a command completion for a Stop Endpoint Command, we need to | 469 | * When we get a command completion for a Stop Endpoint Command, we need to |
421 | * unlink any cancelled TDs from the ring. There are two ways to do that: | 470 | * unlink any cancelled TDs from the ring. There are two ways to do that: |
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
436 | struct xhci_td *cur_td = 0; | 485 | struct xhci_td *cur_td = 0; |
437 | struct xhci_td *last_unlinked_td; | 486 | struct xhci_td *last_unlinked_td; |
438 | 487 | ||
439 | struct dequeue_state deq_state; | 488 | struct xhci_dequeue_state deq_state; |
440 | #ifdef CONFIG_USB_HCD_STAT | 489 | #ifdef CONFIG_USB_HCD_STAT |
441 | ktime_t stop_time = ktime_get(); | 490 | ktime_t stop_time = ktime_get(); |
442 | #endif | 491 | #endif |
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
464 | * move the xHC endpoint ring dequeue pointer past this TD. | 513 | * move the xHC endpoint ring dequeue pointer past this TD. |
465 | */ | 514 | */ |
466 | if (cur_td == ep_ring->stopped_td) | 515 | if (cur_td == ep_ring->stopped_td) |
467 | find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, | 516 | xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, |
468 | &deq_state); | 517 | &deq_state); |
469 | else | 518 | else |
470 | td_to_noop(xhci, ep_ring, cur_td); | 519 | td_to_noop(xhci, ep_ring, cur_td); |
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, | |||
480 | 529 | ||
481 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ | 530 | /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ |
482 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { | 531 | if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { |
483 | xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " | 532 | xhci_queue_new_dequeue_state(xhci, ep_ring, |
484 | "new deq ptr = %p (0x%llx dma), new cycle = %u\n", | 533 | slot_id, ep_index, &deq_state); |
485 | deq_state.new_deq_seg, | ||
486 | (unsigned long long)deq_state.new_deq_seg->dma, | ||
487 | deq_state.new_deq_ptr, | ||
488 | (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), | ||
489 | deq_state.new_cycle_state); | ||
490 | queue_set_tr_deq(xhci, slot_id, ep_index, | ||
491 | deq_state.new_deq_seg, | ||
492 | deq_state.new_deq_ptr, | ||
493 | (u32) deq_state.new_cycle_state); | ||
494 | /* Stop the TD queueing code from ringing the doorbell until | ||
495 | * this command completes. The HC won't set the dequeue pointer | ||
496 | * if the ring is running, and ringing the doorbell starts the | ||
497 | * ring running. | ||
498 | */ | ||
499 | ep_ring->state |= SET_DEQ_PENDING; | ||
500 | xhci_ring_cmd_db(xhci); | ||
501 | } else { | 534 | } else { |
502 | /* Otherwise just ring the doorbell to restart the ring */ | 535 | /* Otherwise just ring the doorbell to restart the ring */ |
503 | ring_ep_doorbell(xhci, slot_id, ep_index); | 536 | ring_ep_doorbell(xhci, slot_id, ep_index); |
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
551 | unsigned int ep_index; | 584 | unsigned int ep_index; |
552 | struct xhci_ring *ep_ring; | 585 | struct xhci_ring *ep_ring; |
553 | struct xhci_virt_device *dev; | 586 | struct xhci_virt_device *dev; |
587 | struct xhci_ep_ctx *ep_ctx; | ||
588 | struct xhci_slot_ctx *slot_ctx; | ||
554 | 589 | ||
555 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | 590 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); |
556 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | 591 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); |
557 | dev = xhci->devs[slot_id]; | 592 | dev = xhci->devs[slot_id]; |
558 | ep_ring = dev->ep_rings[ep_index]; | 593 | ep_ring = dev->ep_rings[ep_index]; |
594 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | ||
595 | slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); | ||
559 | 596 | ||
560 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { | 597 | if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { |
561 | unsigned int ep_state; | 598 | unsigned int ep_state; |
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
569 | case COMP_CTX_STATE: | 606 | case COMP_CTX_STATE: |
570 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " | 607 | xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " |
571 | "to incorrect slot or ep state.\n"); | 608 | "to incorrect slot or ep state.\n"); |
572 | ep_state = dev->out_ctx->ep[ep_index].ep_info; | 609 | ep_state = ep_ctx->ep_info; |
573 | ep_state &= EP_STATE_MASK; | 610 | ep_state &= EP_STATE_MASK; |
574 | slot_state = dev->out_ctx->slot.dev_state; | 611 | slot_state = slot_ctx->dev_state; |
575 | slot_state = GET_SLOT_STATE(slot_state); | 612 | slot_state = GET_SLOT_STATE(slot_state); |
576 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", | 613 | xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", |
577 | slot_state, ep_state); | 614 | slot_state, ep_state); |
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, | |||
593 | * cancelling URBs, which might not be an error... | 630 | * cancelling URBs, which might not be an error... |
594 | */ | 631 | */ |
595 | } else { | 632 | } else { |
596 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " | 633 | xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", |
597 | "deq[1] = 0x%x.\n", | 634 | ep_ctx->deq); |
598 | dev->out_ctx->ep[ep_index].deq[0], | ||
599 | dev->out_ctx->ep[ep_index].deq[1]); | ||
600 | } | 635 | } |
601 | 636 | ||
602 | ep_ring->state &= ~SET_DEQ_PENDING; | 637 | ep_ring->state &= ~SET_DEQ_PENDING; |
603 | ring_ep_doorbell(xhci, slot_id, ep_index); | 638 | ring_ep_doorbell(xhci, slot_id, ep_index); |
604 | } | 639 | } |
605 | 640 | ||
641 | static void handle_reset_ep_completion(struct xhci_hcd *xhci, | ||
642 | struct xhci_event_cmd *event, | ||
643 | union xhci_trb *trb) | ||
644 | { | ||
645 | int slot_id; | ||
646 | unsigned int ep_index; | ||
647 | |||
648 | slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); | ||
649 | ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); | ||
650 | /* This command will only fail if the endpoint wasn't halted, | ||
651 | * but we don't care. | ||
652 | */ | ||
653 | xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", | ||
654 | (unsigned int) GET_COMP_CODE(event->status)); | ||
655 | |||
656 | /* Clear our internal halted state and restart the ring */ | ||
657 | xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; | ||
658 | ring_ep_doorbell(xhci, slot_id, ep_index); | ||
659 | } | ||
606 | 660 | ||
607 | static void handle_cmd_completion(struct xhci_hcd *xhci, | 661 | static void handle_cmd_completion(struct xhci_hcd *xhci, |
608 | struct xhci_event_cmd *event) | 662 | struct xhci_event_cmd *event) |
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
611 | u64 cmd_dma; | 665 | u64 cmd_dma; |
612 | dma_addr_t cmd_dequeue_dma; | 666 | dma_addr_t cmd_dequeue_dma; |
613 | 667 | ||
614 | cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; | 668 | cmd_dma = event->cmd_trb; |
615 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 669 | cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
616 | xhci->cmd_ring->dequeue); | 670 | xhci->cmd_ring->dequeue); |
617 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ | 671 | /* Is the command ring deq ptr out of sync with the deq seg ptr? */ |
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
653 | case TRB_TYPE(TRB_CMD_NOOP): | 707 | case TRB_TYPE(TRB_CMD_NOOP): |
654 | ++xhci->noops_handled; | 708 | ++xhci->noops_handled; |
655 | break; | 709 | break; |
710 | case TRB_TYPE(TRB_RESET_EP): | ||
711 | handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); | ||
712 | break; | ||
656 | default: | 713 | default: |
657 | /* Skip over unknown commands on the event ring */ | 714 | /* Skip over unknown commands on the event ring */ |
658 | xhci->error_bitmask |= 1 << 6; | 715 | xhci->error_bitmask |= 1 << 6; |
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
756 | union xhci_trb *event_trb; | 813 | union xhci_trb *event_trb; |
757 | struct urb *urb = 0; | 814 | struct urb *urb = 0; |
758 | int status = -EINPROGRESS; | 815 | int status = -EINPROGRESS; |
816 | struct xhci_ep_ctx *ep_ctx; | ||
759 | 817 | ||
818 | xhci_dbg(xhci, "In %s\n", __func__); | ||
760 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; | 819 | xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; |
761 | if (!xdev) { | 820 | if (!xdev) { |
762 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); | 821 | xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); |
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
765 | 824 | ||
766 | /* Endpoint ID is 1 based, our index is zero based */ | 825 | /* Endpoint ID is 1 based, our index is zero based */ |
767 | ep_index = TRB_TO_EP_ID(event->flags) - 1; | 826 | ep_index = TRB_TO_EP_ID(event->flags) - 1; |
827 | xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); | ||
768 | ep_ring = xdev->ep_rings[ep_index]; | 828 | ep_ring = xdev->ep_rings[ep_index]; |
769 | if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | 829 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
830 | if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { | ||
770 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); | 831 | xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); |
771 | return -ENODEV; | 832 | return -ENODEV; |
772 | } | 833 | } |
773 | 834 | ||
774 | event_dma = event->buffer[0]; | 835 | event_dma = event->buffer; |
775 | if (event->buffer[1] != 0) | ||
776 | xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); | ||
777 | |||
778 | /* This TRB should be in the TD at the head of this ring's TD list */ | 836 | /* This TRB should be in the TD at the head of this ring's TD list */ |
837 | xhci_dbg(xhci, "%s - checking for list empty\n", __func__); | ||
779 | if (list_empty(&ep_ring->td_list)) { | 838 | if (list_empty(&ep_ring->td_list)) { |
780 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", | 839 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
781 | TRB_TO_SLOT_ID(event->flags), ep_index); | 840 | TRB_TO_SLOT_ID(event->flags), ep_index); |
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
785 | urb = NULL; | 844 | urb = NULL; |
786 | goto cleanup; | 845 | goto cleanup; |
787 | } | 846 | } |
847 | xhci_dbg(xhci, "%s - getting list entry\n", __func__); | ||
788 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); | 848 | td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); |
789 | 849 | ||
790 | /* Is this a TRB in the currently executing TD? */ | 850 | /* Is this a TRB in the currently executing TD? */ |
851 | xhci_dbg(xhci, "%s - looking for TD\n", __func__); | ||
791 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, | 852 | event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, |
792 | td->last_trb, event_dma); | 853 | td->last_trb, event_dma); |
854 | xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg); | ||
793 | if (!event_seg) { | 855 | if (!event_seg) { |
794 | /* HC is busted, give up! */ | 856 | /* HC is busted, give up! */ |
795 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); | 857 | xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); |
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
798 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; | 860 | event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; |
799 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 861 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", |
800 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); | 862 | (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); |
801 | xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", | 863 | xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n", |
802 | (unsigned int) event->buffer[0]); | 864 | lower_32_bits(event->buffer)); |
803 | xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", | 865 | xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n", |
804 | (unsigned int) event->buffer[1]); | 866 | upper_32_bits(event->buffer)); |
805 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", | 867 | xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", |
806 | (unsigned int) event->transfer_len); | 868 | (unsigned int) event->transfer_len); |
807 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", | 869 | xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", |
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
823 | break; | 885 | break; |
824 | case COMP_STALL: | 886 | case COMP_STALL: |
825 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); | 887 | xhci_warn(xhci, "WARN: Stalled endpoint\n"); |
888 | ep_ring->state |= EP_HALTED; | ||
826 | status = -EPIPE; | 889 | status = -EPIPE; |
827 | break; | 890 | break; |
828 | case COMP_TRB_ERR: | 891 | case COMP_TRB_ERR: |
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
833 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); | 896 | xhci_warn(xhci, "WARN: transfer error on endpoint\n"); |
834 | status = -EPROTO; | 897 | status = -EPROTO; |
835 | break; | 898 | break; |
899 | case COMP_BABBLE: | ||
900 | xhci_warn(xhci, "WARN: babble error on endpoint\n"); | ||
901 | status = -EOVERFLOW; | ||
902 | break; | ||
836 | case COMP_DB_ERR: | 903 | case COMP_DB_ERR: |
837 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); | 904 | xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); |
838 | status = -ENOSR; | 905 | status = -ENOSR; |
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
874 | if (event_trb != ep_ring->dequeue) { | 941 | if (event_trb != ep_ring->dequeue) { |
875 | /* The event was for the status stage */ | 942 | /* The event was for the status stage */ |
876 | if (event_trb == td->last_trb) { | 943 | if (event_trb == td->last_trb) { |
877 | td->urb->actual_length = | 944 | if (td->urb->actual_length != 0) { |
878 | td->urb->transfer_buffer_length; | 945 | /* Don't overwrite a previously set error code */ |
946 | if (status == -EINPROGRESS || status == 0) | ||
947 | /* Did we already see a short data stage? */ | ||
948 | status = -EREMOTEIO; | ||
949 | } else { | ||
950 | td->urb->actual_length = | ||
951 | td->urb->transfer_buffer_length; | ||
952 | } | ||
879 | } else { | 953 | } else { |
880 | /* Maybe the event was for the data stage? */ | 954 | /* Maybe the event was for the data stage? */ |
881 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) | 955 | if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { |
882 | /* We didn't stop on a link TRB in the middle */ | 956 | /* We didn't stop on a link TRB in the middle */ |
883 | td->urb->actual_length = | 957 | td->urb->actual_length = |
884 | td->urb->transfer_buffer_length - | 958 | td->urb->transfer_buffer_length - |
885 | TRB_LEN(event->transfer_len); | 959 | TRB_LEN(event->transfer_len); |
960 | xhci_dbg(xhci, "Waiting for status stage event\n"); | ||
961 | urb = NULL; | ||
962 | goto cleanup; | ||
963 | } | ||
886 | } | 964 | } |
887 | } | 965 | } |
888 | } else { | 966 | } else { |
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
929 | TRB_LEN(event->transfer_len)); | 1007 | TRB_LEN(event->transfer_len)); |
930 | td->urb->actual_length = 0; | 1008 | td->urb->actual_length = 0; |
931 | } | 1009 | } |
932 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 1010 | /* Don't overwrite a previously set error code */ |
933 | status = -EREMOTEIO; | 1011 | if (status == -EINPROGRESS) { |
934 | else | 1012 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
935 | status = 0; | 1013 | status = -EREMOTEIO; |
1014 | else | ||
1015 | status = 0; | ||
1016 | } | ||
936 | } else { | 1017 | } else { |
937 | td->urb->actual_length = td->urb->transfer_buffer_length; | 1018 | td->urb->actual_length = td->urb->transfer_buffer_length; |
938 | /* Ignore a short packet completion if the | 1019 | /* Ignore a short packet completion if the |
939 | * untransferred length was zero. | 1020 | * untransferred length was zero. |
940 | */ | 1021 | */ |
941 | status = 0; | 1022 | if (status == -EREMOTEIO) |
1023 | status = 0; | ||
942 | } | 1024 | } |
943 | } else { | 1025 | } else { |
944 | /* Slow path - walk the list, starting from the dequeue | 1026 | /* Slow path - walk the list, starting from the dequeue |
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
965 | TRB_LEN(event->transfer_len); | 1047 | TRB_LEN(event->transfer_len); |
966 | } | 1048 | } |
967 | } | 1049 | } |
968 | /* The Endpoint Stop Command completion will take care of | ||
969 | * any stopped TDs. A stopped TD may be restarted, so don't update the | ||
970 | * ring dequeue pointer or take this TD off any lists yet. | ||
971 | */ | ||
972 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || | 1050 | if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || |
973 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { | 1051 | GET_COMP_CODE(event->transfer_len) == COMP_STOP) { |
1052 | /* The Endpoint Stop Command completion will take care of any | ||
1053 | * stopped TDs. A stopped TD may be restarted, so don't update | ||
1054 | * the ring dequeue pointer or take this TD off any lists yet. | ||
1055 | */ | ||
974 | ep_ring->stopped_td = td; | 1056 | ep_ring->stopped_td = td; |
975 | ep_ring->stopped_trb = event_trb; | 1057 | ep_ring->stopped_trb = event_trb; |
976 | } else { | 1058 | } else { |
977 | /* Update ring dequeue pointer */ | 1059 | if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { |
978 | while (ep_ring->dequeue != td->last_trb) | 1060 | /* The transfer is completed from the driver's |
1061 | * perspective, but we need to issue a set dequeue | ||
1062 | * command for this stalled endpoint to move the dequeue | ||
1063 | * pointer past the TD. We can't do that here because | ||
1064 | * the halt condition must be cleared first. | ||
1065 | */ | ||
1066 | ep_ring->stopped_td = td; | ||
1067 | ep_ring->stopped_trb = event_trb; | ||
1068 | } else { | ||
1069 | /* Update ring dequeue pointer */ | ||
1070 | while (ep_ring->dequeue != td->last_trb) | ||
1071 | inc_deq(xhci, ep_ring, false); | ||
979 | inc_deq(xhci, ep_ring, false); | 1072 | inc_deq(xhci, ep_ring, false); |
980 | inc_deq(xhci, ep_ring, false); | 1073 | } |
981 | 1074 | ||
982 | /* Clean up the endpoint's TD list */ | 1075 | /* Clean up the endpoint's TD list */ |
983 | urb = td->urb; | 1076 | urb = td->urb; |
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
987 | list_del(&td->cancelled_td_list); | 1080 | list_del(&td->cancelled_td_list); |
988 | ep_ring->cancels_pending--; | 1081 | ep_ring->cancels_pending--; |
989 | } | 1082 | } |
990 | kfree(td); | 1083 | /* Leave the TD around for the reset endpoint function to use */ |
1084 | if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { | ||
1085 | kfree(td); | ||
1086 | } | ||
991 | urb->hcpriv = NULL; | 1087 | urb->hcpriv = NULL; |
992 | } | 1088 | } |
993 | cleanup: | 1089 | cleanup: |
@@ -997,6 +1093,8 @@ cleanup: | |||
997 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ | 1093 | /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ |
998 | if (urb) { | 1094 | if (urb) { |
999 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); | 1095 | usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); |
1096 | xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", | ||
1097 | urb, td->urb->actual_length, status); | ||
1000 | spin_unlock(&xhci->lock); | 1098 | spin_unlock(&xhci->lock); |
1001 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); | 1099 | usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); |
1002 | spin_lock(&xhci->lock); | 1100 | spin_lock(&xhci->lock); |
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1014 | int update_ptrs = 1; | 1112 | int update_ptrs = 1; |
1015 | int ret; | 1113 | int ret; |
1016 | 1114 | ||
1115 | xhci_dbg(xhci, "In %s\n", __func__); | ||
1017 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { | 1116 | if (!xhci->event_ring || !xhci->event_ring->dequeue) { |
1018 | xhci->error_bitmask |= 1 << 1; | 1117 | xhci->error_bitmask |= 1 << 1; |
1019 | return; | 1118 | return; |
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) | |||
1026 | xhci->error_bitmask |= 1 << 2; | 1125 | xhci->error_bitmask |= 1 << 2; |
1027 | return; | 1126 | return; |
1028 | } | 1127 | } |
1128 | xhci_dbg(xhci, "%s - OS owns TRB\n", __func__); | ||
1029 | 1129 | ||
1030 | /* FIXME: Handle more event types. */ | 1130 | /* FIXME: Handle more event types. */ |
1031 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { | 1131 | switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { |
1032 | case TRB_TYPE(TRB_COMPLETION): | 1132 | case TRB_TYPE(TRB_COMPLETION): |
1133 | xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__); | ||
1033 | handle_cmd_completion(xhci, &event->event_cmd); | 1134 | handle_cmd_completion(xhci, &event->event_cmd); |
1135 | xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__); | ||
1034 | break; | 1136 | break; |
1035 | case TRB_TYPE(TRB_PORT_STATUS): | 1137 | case TRB_TYPE(TRB_PORT_STATUS): |
1138 | xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__); | ||
1036 | handle_port_status(xhci, event); | 1139 | handle_port_status(xhci, event); |
1140 | xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__); | ||
1037 | update_ptrs = 0; | 1141 | update_ptrs = 0; |
1038 | break; | 1142 | break; |
1039 | case TRB_TYPE(TRB_TRANSFER): | 1143 | case TRB_TYPE(TRB_TRANSFER): |
1144 | xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__); | ||
1040 | ret = handle_tx_event(xhci, &event->trans_event); | 1145 | ret = handle_tx_event(xhci, &event->trans_event); |
1146 | xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__); | ||
1041 | if (ret < 0) | 1147 | if (ret < 0) |
1042 | xhci->error_bitmask |= 1 << 9; | 1148 | xhci->error_bitmask |= 1 << 9; |
1043 | else | 1149 | else |
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
1093 | */ | 1199 | */ |
1094 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); | 1200 | xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); |
1095 | return -ENOENT; | 1201 | return -ENOENT; |
1096 | case EP_STATE_HALTED: | ||
1097 | case EP_STATE_ERROR: | 1202 | case EP_STATE_ERROR: |
1098 | xhci_warn(xhci, "WARN waiting for halt or error on ep " | 1203 | xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); |
1099 | "to be cleared\n"); | ||
1100 | /* FIXME event handling code for error needs to clear it */ | 1204 | /* FIXME event handling code for error needs to clear it */ |
1101 | /* XXX not sure if this should be -ENOENT or not */ | 1205 | /* XXX not sure if this should be -ENOENT or not */ |
1102 | return -EINVAL; | 1206 | return -EINVAL; |
1207 | case EP_STATE_HALTED: | ||
1208 | xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); | ||
1103 | case EP_STATE_STOPPED: | 1209 | case EP_STATE_STOPPED: |
1104 | case EP_STATE_RUNNING: | 1210 | case EP_STATE_RUNNING: |
1105 | break; | 1211 | break; |
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, | |||
1128 | gfp_t mem_flags) | 1234 | gfp_t mem_flags) |
1129 | { | 1235 | { |
1130 | int ret; | 1236 | int ret; |
1131 | 1237 | struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | |
1132 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], | 1238 | ret = prepare_ring(xhci, xdev->ep_rings[ep_index], |
1133 | xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, | 1239 | ep_ctx->ep_info & EP_STATE_MASK, |
1134 | num_trbs, mem_flags); | 1240 | num_trbs, mem_flags); |
1135 | if (ret) | 1241 | if (ret) |
1136 | return ret; | 1242 | return ret; |
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1285 | /* Queue the first TRB, even if it's zero-length */ | 1391 | /* Queue the first TRB, even if it's zero-length */ |
1286 | do { | 1392 | do { |
1287 | u32 field = 0; | 1393 | u32 field = 0; |
1394 | u32 length_field = 0; | ||
1288 | 1395 | ||
1289 | /* Don't change the cycle bit of the first TRB until later */ | 1396 | /* Don't change the cycle bit of the first TRB until later */ |
1290 | if (first_trb) | 1397 | if (first_trb) |
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1314 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 1421 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
1315 | (unsigned int) addr + trb_buff_len); | 1422 | (unsigned int) addr + trb_buff_len); |
1316 | } | 1423 | } |
1424 | length_field = TRB_LEN(trb_buff_len) | | ||
1425 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1426 | TRB_INTR_TARGET(0); | ||
1317 | queue_trb(xhci, ep_ring, false, | 1427 | queue_trb(xhci, ep_ring, false, |
1318 | (u32) addr, | 1428 | lower_32_bits(addr), |
1319 | (u32) ((u64) addr >> 32), | 1429 | upper_32_bits(addr), |
1320 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1430 | length_field, |
1321 | /* We always want to know if the TRB was short, | 1431 | /* We always want to know if the TRB was short, |
1322 | * or we won't get an event when it completes. | 1432 | * or we won't get an event when it completes. |
1323 | * (Unless we use event data TRBs, which are a | 1433 | * (Unless we use event data TRBs, which are a |
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1365 | struct xhci_generic_trb *start_trb; | 1475 | struct xhci_generic_trb *start_trb; |
1366 | bool first_trb; | 1476 | bool first_trb; |
1367 | int start_cycle; | 1477 | int start_cycle; |
1368 | u32 field; | 1478 | u32 field, length_field; |
1369 | 1479 | ||
1370 | int running_total, trb_buff_len, ret; | 1480 | int running_total, trb_buff_len, ret; |
1371 | u64 addr; | 1481 | u64 addr; |
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1443 | td->last_trb = ep_ring->enqueue; | 1553 | td->last_trb = ep_ring->enqueue; |
1444 | field |= TRB_IOC; | 1554 | field |= TRB_IOC; |
1445 | } | 1555 | } |
1556 | length_field = TRB_LEN(trb_buff_len) | | ||
1557 | TD_REMAINDER(urb->transfer_buffer_length - running_total) | | ||
1558 | TRB_INTR_TARGET(0); | ||
1446 | queue_trb(xhci, ep_ring, false, | 1559 | queue_trb(xhci, ep_ring, false, |
1447 | (u32) addr, | 1560 | lower_32_bits(addr), |
1448 | (u32) ((u64) addr >> 32), | 1561 | upper_32_bits(addr), |
1449 | TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), | 1562 | length_field, |
1450 | /* We always want to know if the TRB was short, | 1563 | /* We always want to know if the TRB was short, |
1451 | * or we won't get an event when it completes. | 1564 | * or we won't get an event when it completes. |
1452 | * (Unless we use event data TRBs, which are a | 1565 | * (Unless we use event data TRBs, which are a |
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1478 | struct usb_ctrlrequest *setup; | 1591 | struct usb_ctrlrequest *setup; |
1479 | struct xhci_generic_trb *start_trb; | 1592 | struct xhci_generic_trb *start_trb; |
1480 | int start_cycle; | 1593 | int start_cycle; |
1481 | u32 field; | 1594 | u32 field, length_field; |
1482 | struct xhci_td *td; | 1595 | struct xhci_td *td; |
1483 | 1596 | ||
1484 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; | 1597 | ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; |
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
1528 | 1641 | ||
1529 | /* If there's data, queue data TRBs */ | 1642 | /* If there's data, queue data TRBs */ |
1530 | field = 0; | 1643 | field = 0; |
1644 | length_field = TRB_LEN(urb->transfer_buffer_length) | | ||
1645 | TD_REMAINDER(urb->transfer_buffer_length) | | ||
1646 | TRB_INTR_TARGET(0); | ||
1531 | if (urb->transfer_buffer_length > 0) { | 1647 | if (urb->transfer_buffer_length > 0) { |
1532 | if (setup->bRequestType & USB_DIR_IN) | 1648 | if (setup->bRequestType & USB_DIR_IN) |
1533 | field |= TRB_DIR_IN; | 1649 | field |= TRB_DIR_IN; |
1534 | queue_trb(xhci, ep_ring, false, | 1650 | queue_trb(xhci, ep_ring, false, |
1535 | lower_32_bits(urb->transfer_dma), | 1651 | lower_32_bits(urb->transfer_dma), |
1536 | upper_32_bits(urb->transfer_dma), | 1652 | upper_32_bits(urb->transfer_dma), |
1537 | TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), | 1653 | length_field, |
1538 | /* Event on short tx */ | 1654 | /* Event on short tx */ |
1539 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); | 1655 | field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); |
1540 | } | 1656 | } |
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) | |||
1603 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1719 | int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1604 | u32 slot_id) | 1720 | u32 slot_id) |
1605 | { | 1721 | { |
1606 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1722 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1723 | upper_32_bits(in_ctx_ptr), 0, | ||
1607 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); | 1724 | TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); |
1608 | } | 1725 | } |
1609 | 1726 | ||
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | |||
1611 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1728 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1612 | u32 slot_id) | 1729 | u32 slot_id) |
1613 | { | 1730 | { |
1614 | return queue_command(xhci, in_ctx_ptr, 0, 0, | 1731 | return queue_command(xhci, lower_32_bits(in_ctx_ptr), |
1732 | upper_32_bits(in_ctx_ptr), 0, | ||
1615 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); | 1733 | TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); |
1616 | } | 1734 | } |
1617 | 1735 | ||
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, | |||
1639 | u32 type = TRB_TYPE(TRB_SET_DEQ); | 1757 | u32 type = TRB_TYPE(TRB_SET_DEQ); |
1640 | 1758 | ||
1641 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); | 1759 | addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); |
1642 | if (addr == 0) | 1760 | if (addr == 0) { |
1643 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); | 1761 | xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); |
1644 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", | 1762 | xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", |
1645 | deq_seg, deq_ptr); | 1763 | deq_seg, deq_ptr); |
1646 | return queue_command(xhci, (u32) addr | cycle_state, 0, 0, | 1764 | return 0; |
1765 | } | ||
1766 | return queue_command(xhci, lower_32_bits(addr) | cycle_state, | ||
1767 | upper_32_bits(addr), 0, | ||
1647 | trb_slot_id | trb_ep_index | type); | 1768 | trb_slot_id | trb_ep_index | type); |
1648 | } | 1769 | } |
1770 | |||
1771 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1772 | unsigned int ep_index) | ||
1773 | { | ||
1774 | u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); | ||
1775 | u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); | ||
1776 | u32 type = TRB_TYPE(TRB_RESET_EP); | ||
1777 | |||
1778 | return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type); | ||
1779 | } | ||
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8936eeb5588b..d31d32206ba3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/kernel.h> | ||
28 | 29 | ||
29 | #include "../core/hcd.h" | 30 | #include "../core/hcd.h" |
30 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
@@ -42,14 +43,6 @@ | |||
42 | * xHCI register interface. | 43 | * xHCI register interface. |
43 | * This corresponds to the eXtensible Host Controller Interface (xHCI) | 44 | * This corresponds to the eXtensible Host Controller Interface (xHCI) |
44 | * Revision 0.95 specification | 45 | * Revision 0.95 specification |
45 | * | ||
46 | * Registers should always be accessed with double word or quad word accesses. | ||
47 | * | ||
48 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
49 | * with 64-bit address pointers should be written to with dword accesses by | ||
50 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
51 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
52 | * the high dword, and write order is irrelevant. | ||
53 | */ | 46 | */ |
54 | 47 | ||
55 | /** | 48 | /** |
@@ -96,6 +89,7 @@ struct xhci_cap_regs { | |||
96 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) | 89 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) |
97 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ | 90 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ |
98 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ | 91 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ |
92 | #define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) | ||
99 | 93 | ||
100 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ | 94 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ |
101 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ | 95 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ |
@@ -166,10 +160,10 @@ struct xhci_op_regs { | |||
166 | u32 reserved1; | 160 | u32 reserved1; |
167 | u32 reserved2; | 161 | u32 reserved2; |
168 | u32 dev_notification; | 162 | u32 dev_notification; |
169 | u32 cmd_ring[2]; | 163 | u64 cmd_ring; |
170 | /* rsvd: offset 0x20-2F */ | 164 | /* rsvd: offset 0x20-2F */ |
171 | u32 reserved3[4]; | 165 | u32 reserved3[4]; |
172 | u32 dcbaa_ptr[2]; | 166 | u64 dcbaa_ptr; |
173 | u32 config_reg; | 167 | u32 config_reg; |
174 | /* rsvd: offset 0x3C-3FF */ | 168 | /* rsvd: offset 0x3C-3FF */ |
175 | u32 reserved4[241]; | 169 | u32 reserved4[241]; |
@@ -254,7 +248,7 @@ struct xhci_op_regs { | |||
254 | #define CMD_RING_RUNNING (1 << 3) | 248 | #define CMD_RING_RUNNING (1 << 3) |
255 | /* bits 4:5 reserved and should be preserved */ | 249 | /* bits 4:5 reserved and should be preserved */ |
256 | /* Command Ring pointer - bit mask for the lower 32 bits. */ | 250 | /* Command Ring pointer - bit mask for the lower 32 bits. */ |
257 | #define CMD_RING_ADDR_MASK (0xffffffc0) | 251 | #define CMD_RING_RSVD_BITS (0x3f) |
258 | 252 | ||
259 | /* CONFIG - Configure Register - config_reg bitmasks */ | 253 | /* CONFIG - Configure Register - config_reg bitmasks */ |
260 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ | 254 | /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ |
@@ -382,8 +376,8 @@ struct xhci_intr_reg { | |||
382 | u32 irq_control; | 376 | u32 irq_control; |
383 | u32 erst_size; | 377 | u32 erst_size; |
384 | u32 rsvd; | 378 | u32 rsvd; |
385 | u32 erst_base[2]; | 379 | u64 erst_base; |
386 | u32 erst_dequeue[2]; | 380 | u64 erst_dequeue; |
387 | }; | 381 | }; |
388 | 382 | ||
389 | /* irq_pending bitmasks */ | 383 | /* irq_pending bitmasks */ |
@@ -453,6 +447,27 @@ struct xhci_doorbell_array { | |||
453 | 447 | ||
454 | 448 | ||
455 | /** | 449 | /** |
450 | * struct xhci_container_ctx | ||
451 | * @type: Type of context. Used to calculated offsets to contained contexts. | ||
452 | * @size: Size of the context data | ||
453 | * @bytes: The raw context data given to HW | ||
454 | * @dma: dma address of the bytes | ||
455 | * | ||
456 | * Represents either a Device or Input context. Holds a pointer to the raw | ||
457 | * memory used for the context (bytes) and dma address of it (dma). | ||
458 | */ | ||
459 | struct xhci_container_ctx { | ||
460 | unsigned type; | ||
461 | #define XHCI_CTX_TYPE_DEVICE 0x1 | ||
462 | #define XHCI_CTX_TYPE_INPUT 0x2 | ||
463 | |||
464 | int size; | ||
465 | |||
466 | u8 *bytes; | ||
467 | dma_addr_t dma; | ||
468 | }; | ||
469 | |||
470 | /** | ||
456 | * struct xhci_slot_ctx | 471 | * struct xhci_slot_ctx |
457 | * @dev_info: Route string, device speed, hub info, and last valid endpoint | 472 | * @dev_info: Route string, device speed, hub info, and last valid endpoint |
458 | * @dev_info2: Max exit latency for device number, root hub port number | 473 | * @dev_info2: Max exit latency for device number, root hub port number |
@@ -538,7 +553,7 @@ struct xhci_slot_ctx { | |||
538 | struct xhci_ep_ctx { | 553 | struct xhci_ep_ctx { |
539 | u32 ep_info; | 554 | u32 ep_info; |
540 | u32 ep_info2; | 555 | u32 ep_info2; |
541 | u32 deq[2]; | 556 | u64 deq; |
542 | u32 tx_info; | 557 | u32 tx_info; |
543 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 558 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
544 | u32 reserved[3]; | 559 | u32 reserved[3]; |
@@ -589,18 +604,16 @@ struct xhci_ep_ctx { | |||
589 | 604 | ||
590 | 605 | ||
591 | /** | 606 | /** |
592 | * struct xhci_device_control | 607 | * struct xhci_input_control_context |
593 | * Input/Output context; see section 6.2.5. | 608 | * Input control context; see section 6.2.5. |
594 | * | 609 | * |
595 | * @drop_context: set the bit of the endpoint context you want to disable | 610 | * @drop_context: set the bit of the endpoint context you want to disable |
596 | * @add_context: set the bit of the endpoint context you want to enable | 611 | * @add_context: set the bit of the endpoint context you want to enable |
597 | */ | 612 | */ |
598 | struct xhci_device_control { | 613 | struct xhci_input_control_ctx { |
599 | u32 drop_flags; | 614 | u32 drop_flags; |
600 | u32 add_flags; | 615 | u32 add_flags; |
601 | u32 rsvd[6]; | 616 | u32 rsvd2[6]; |
602 | struct xhci_slot_ctx slot; | ||
603 | struct xhci_ep_ctx ep[31]; | ||
604 | }; | 617 | }; |
605 | 618 | ||
606 | /* drop context bitmasks */ | 619 | /* drop context bitmasks */ |
@@ -608,7 +621,6 @@ struct xhci_device_control { | |||
608 | /* add context bitmasks */ | 621 | /* add context bitmasks */ |
609 | #define ADD_EP(x) (0x1 << x) | 622 | #define ADD_EP(x) (0x1 << x) |
610 | 623 | ||
611 | |||
612 | struct xhci_virt_device { | 624 | struct xhci_virt_device { |
613 | /* | 625 | /* |
614 | * Commands to the hardware are passed an "input context" that | 626 | * Commands to the hardware are passed an "input context" that |
@@ -618,11 +630,10 @@ struct xhci_virt_device { | |||
618 | * track of input and output contexts separately because | 630 | * track of input and output contexts separately because |
619 | * these commands might fail and we don't trust the hardware. | 631 | * these commands might fail and we don't trust the hardware. |
620 | */ | 632 | */ |
621 | struct xhci_device_control *out_ctx; | 633 | struct xhci_container_ctx *out_ctx; |
622 | dma_addr_t out_ctx_dma; | ||
623 | /* Used for addressing devices and configuration changes */ | 634 | /* Used for addressing devices and configuration changes */ |
624 | struct xhci_device_control *in_ctx; | 635 | struct xhci_container_ctx *in_ctx; |
625 | dma_addr_t in_ctx_dma; | 636 | |
626 | /* FIXME when stream support is added */ | 637 | /* FIXME when stream support is added */ |
627 | struct xhci_ring *ep_rings[31]; | 638 | struct xhci_ring *ep_rings[31]; |
628 | /* Temporary storage in case the configure endpoint command fails and we | 639 | /* Temporary storage in case the configure endpoint command fails and we |
@@ -641,7 +652,7 @@ struct xhci_virt_device { | |||
641 | */ | 652 | */ |
642 | struct xhci_device_context_array { | 653 | struct xhci_device_context_array { |
643 | /* 64-bit device addresses; we only write 32-bit addresses */ | 654 | /* 64-bit device addresses; we only write 32-bit addresses */ |
644 | u32 dev_context_ptrs[2*MAX_HC_SLOTS]; | 655 | u64 dev_context_ptrs[MAX_HC_SLOTS]; |
645 | /* private xHCD pointers */ | 656 | /* private xHCD pointers */ |
646 | dma_addr_t dma; | 657 | dma_addr_t dma; |
647 | }; | 658 | }; |
@@ -654,7 +665,7 @@ struct xhci_device_context_array { | |||
654 | 665 | ||
655 | struct xhci_stream_ctx { | 666 | struct xhci_stream_ctx { |
656 | /* 64-bit stream ring address, cycle state, and stream type */ | 667 | /* 64-bit stream ring address, cycle state, and stream type */ |
657 | u32 stream_ring[2]; | 668 | u64 stream_ring; |
658 | /* offset 0x14 - 0x1f reserved for HC internal use */ | 669 | /* offset 0x14 - 0x1f reserved for HC internal use */ |
659 | u32 reserved[2]; | 670 | u32 reserved[2]; |
660 | }; | 671 | }; |
@@ -662,7 +673,7 @@ struct xhci_stream_ctx { | |||
662 | 673 | ||
663 | struct xhci_transfer_event { | 674 | struct xhci_transfer_event { |
664 | /* 64-bit buffer address, or immediate data */ | 675 | /* 64-bit buffer address, or immediate data */ |
665 | u32 buffer[2]; | 676 | u64 buffer; |
666 | u32 transfer_len; | 677 | u32 transfer_len; |
667 | /* This field is interpreted differently based on the type of TRB */ | 678 | /* This field is interpreted differently based on the type of TRB */ |
668 | u32 flags; | 679 | u32 flags; |
@@ -744,7 +755,7 @@ struct xhci_transfer_event { | |||
744 | 755 | ||
745 | struct xhci_link_trb { | 756 | struct xhci_link_trb { |
746 | /* 64-bit segment pointer*/ | 757 | /* 64-bit segment pointer*/ |
747 | u32 segment_ptr[2]; | 758 | u64 segment_ptr; |
748 | u32 intr_target; | 759 | u32 intr_target; |
749 | u32 control; | 760 | u32 control; |
750 | }; | 761 | }; |
@@ -755,7 +766,7 @@ struct xhci_link_trb { | |||
755 | /* Command completion event TRB */ | 766 | /* Command completion event TRB */ |
756 | struct xhci_event_cmd { | 767 | struct xhci_event_cmd { |
757 | /* Pointer to command TRB, or the value passed by the event data trb */ | 768 | /* Pointer to command TRB, or the value passed by the event data trb */ |
758 | u32 cmd_trb[2]; | 769 | u64 cmd_trb; |
759 | u32 status; | 770 | u32 status; |
760 | u32 flags; | 771 | u32 flags; |
761 | }; | 772 | }; |
@@ -848,8 +859,8 @@ union xhci_trb { | |||
848 | #define TRB_CONFIG_EP 12 | 859 | #define TRB_CONFIG_EP 12 |
849 | /* Evaluate Context Command */ | 860 | /* Evaluate Context Command */ |
850 | #define TRB_EVAL_CONTEXT 13 | 861 | #define TRB_EVAL_CONTEXT 13 |
851 | /* Reset Transfer Ring Command */ | 862 | /* Reset Endpoint Command */ |
852 | #define TRB_RESET_RING 14 | 863 | #define TRB_RESET_EP 14 |
853 | /* Stop Transfer Ring Command */ | 864 | /* Stop Transfer Ring Command */ |
854 | #define TRB_STOP_RING 15 | 865 | #define TRB_STOP_RING 15 |
855 | /* Set Transfer Ring Dequeue Pointer Command */ | 866 | /* Set Transfer Ring Dequeue Pointer Command */ |
@@ -929,6 +940,7 @@ struct xhci_ring { | |||
929 | unsigned int cancels_pending; | 940 | unsigned int cancels_pending; |
930 | unsigned int state; | 941 | unsigned int state; |
931 | #define SET_DEQ_PENDING (1 << 0) | 942 | #define SET_DEQ_PENDING (1 << 0) |
943 | #define EP_HALTED (1 << 1) | ||
932 | /* The TRB that was last reported in a stopped endpoint ring */ | 944 | /* The TRB that was last reported in a stopped endpoint ring */ |
933 | union xhci_trb *stopped_trb; | 945 | union xhci_trb *stopped_trb; |
934 | struct xhci_td *stopped_td; | 946 | struct xhci_td *stopped_td; |
@@ -940,9 +952,15 @@ struct xhci_ring { | |||
940 | u32 cycle_state; | 952 | u32 cycle_state; |
941 | }; | 953 | }; |
942 | 954 | ||
955 | struct xhci_dequeue_state { | ||
956 | struct xhci_segment *new_deq_seg; | ||
957 | union xhci_trb *new_deq_ptr; | ||
958 | int new_cycle_state; | ||
959 | }; | ||
960 | |||
943 | struct xhci_erst_entry { | 961 | struct xhci_erst_entry { |
944 | /* 64-bit event ring segment address */ | 962 | /* 64-bit event ring segment address */ |
945 | u32 seg_addr[2]; | 963 | u64 seg_addr; |
946 | u32 seg_size; | 964 | u32 seg_size; |
947 | /* Set to zero */ | 965 | /* Set to zero */ |
948 | u32 rsvd; | 966 | u32 rsvd; |
@@ -957,6 +975,13 @@ struct xhci_erst { | |||
957 | unsigned int erst_size; | 975 | unsigned int erst_size; |
958 | }; | 976 | }; |
959 | 977 | ||
978 | struct xhci_scratchpad { | ||
979 | u64 *sp_array; | ||
980 | dma_addr_t sp_dma; | ||
981 | void **sp_buffers; | ||
982 | dma_addr_t *sp_dma_buffers; | ||
983 | }; | ||
984 | |||
960 | /* | 985 | /* |
961 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: | 986 | * Each segment table entry is 4*32bits long. 1K seems like an ok size: |
962 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, | 987 | * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, |
@@ -1011,6 +1036,9 @@ struct xhci_hcd { | |||
1011 | struct xhci_ring *cmd_ring; | 1036 | struct xhci_ring *cmd_ring; |
1012 | struct xhci_ring *event_ring; | 1037 | struct xhci_ring *event_ring; |
1013 | struct xhci_erst erst; | 1038 | struct xhci_erst erst; |
1039 | /* Scratchpad */ | ||
1040 | struct xhci_scratchpad *scratchpad; | ||
1041 | |||
1014 | /* slot enabling and address device helpers */ | 1042 | /* slot enabling and address device helpers */ |
1015 | struct completion addr_dev; | 1043 | struct completion addr_dev; |
1016 | int slot_id; | 1044 | int slot_id; |
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, | |||
1071 | static inline void xhci_writel(struct xhci_hcd *xhci, | 1099 | static inline void xhci_writel(struct xhci_hcd *xhci, |
1072 | const unsigned int val, __u32 __iomem *regs) | 1100 | const unsigned int val, __u32 __iomem *regs) |
1073 | { | 1101 | { |
1074 | if (!in_interrupt()) | 1102 | xhci_dbg(xhci, |
1075 | xhci_dbg(xhci, | 1103 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", |
1076 | "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", | 1104 | regs, val); |
1077 | regs, val); | ||
1078 | writel(val, regs); | 1105 | writel(val, regs); |
1079 | } | 1106 | } |
1080 | 1107 | ||
1108 | /* | ||
1109 | * Registers should always be accessed with double word or quad word accesses. | ||
1110 | * | ||
1111 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
1112 | * with 64-bit address pointers should be written to with dword accesses by | ||
1113 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
1114 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
1115 | * the high dword, and write order is irrelevant. | ||
1116 | */ | ||
1117 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | ||
1118 | __u64 __iomem *regs) | ||
1119 | { | ||
1120 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1121 | u64 val_lo = readl(ptr); | ||
1122 | u64 val_hi = readl(ptr + 1); | ||
1123 | return val_lo + (val_hi << 32); | ||
1124 | } | ||
1125 | static inline void xhci_write_64(struct xhci_hcd *xhci, | ||
1126 | const u64 val, __u64 __iomem *regs) | ||
1127 | { | ||
1128 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1129 | u32 val_lo = lower_32_bits(val); | ||
1130 | u32 val_hi = upper_32_bits(val); | ||
1131 | |||
1132 | xhci_dbg(xhci, | ||
1133 | "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n", | ||
1134 | regs, (long unsigned int) val); | ||
1135 | writel(val_lo, ptr); | ||
1136 | writel(val_hi, ptr + 1); | ||
1137 | } | ||
1138 | |||
1081 | /* xHCI debugging */ | 1139 | /* xHCI debugging */ |
1082 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1140 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); |
1083 | void xhci_print_registers(struct xhci_hcd *xhci); | 1141 | void xhci_print_registers(struct xhci_hcd *xhci); |
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); | |||
1090 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); | 1148 | void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); |
1091 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); | 1149 | void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); |
1092 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); | 1150 | void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); |
1093 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); | 1151 | void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); |
1094 | 1152 | ||
1095 | /* xHCI memory managment */ | 1153 | /* xHCI memory managment */ |
1096 | void xhci_mem_cleanup(struct xhci_hcd *xhci); | 1154 | void xhci_mem_cleanup(struct xhci_hcd *xhci); |
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); | |||
1128 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); | 1186 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); |
1129 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1187 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1130 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); | 1188 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); |
1189 | void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); | ||
1131 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1190 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1132 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); | 1191 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); |
1133 | 1192 | ||
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, | |||
1148 | int slot_id, unsigned int ep_index); | 1207 | int slot_id, unsigned int ep_index); |
1149 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, | 1208 | int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, |
1150 | u32 slot_id); | 1209 | u32 slot_id); |
1210 | int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, | ||
1211 | unsigned int ep_index); | ||
1212 | void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | ||
1213 | unsigned int slot_id, unsigned int ep_index, | ||
1214 | struct xhci_td *cur_td, struct xhci_dequeue_state *state); | ||
1215 | void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, | ||
1216 | struct xhci_ring *ep_ring, unsigned int slot_id, | ||
1217 | unsigned int ep_index, struct xhci_dequeue_state *deq_state); | ||
1151 | 1218 | ||
1152 | /* xHCI roothub code */ | 1219 | /* xHCI roothub code */ |
1153 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1220 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
1154 | char *buf, u16 wLength); | 1221 | char *buf, u16 wLength); |
1155 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); | 1222 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); |
1156 | 1223 | ||
1224 | /* xHCI contexts */ | ||
1225 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1226 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); | ||
1227 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); | ||
1228 | |||
1157 | #endif /* __LINUX_XHCI_HCD_H */ | 1229 | #endif /* __LINUX_XHCI_HCD_H */ |
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index a68d91a11bee..abe3aa67ed00 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig | |||
@@ -220,7 +220,7 @@ config USB_IOWARRIOR | |||
220 | 220 | ||
221 | config USB_TEST | 221 | config USB_TEST |
222 | tristate "USB testing driver" | 222 | tristate "USB testing driver" |
223 | depends on USB && USB_DEVICEFS | 223 | depends on USB |
224 | help | 224 | help |
225 | This driver is for testing host controller software. It is used | 225 | This driver is for testing host controller software. It is used |
226 | with specialized device firmware for regression and stress testing, | 226 | with specialized device firmware for regression and stress testing, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 554a414f65d1..c7c1ca0494cd 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) | |||
1326 | int i; | 1326 | int i; |
1327 | 1327 | ||
1328 | /* log core options (read using indexed model) */ | 1328 | /* log core options (read using indexed model) */ |
1329 | musb_ep_select(mbase, 0); | ||
1330 | reg = musb_read_configdata(mbase); | 1329 | reg = musb_read_configdata(mbase); |
1331 | 1330 | ||
1332 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); | 1331 | strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); |
@@ -1990,7 +1989,7 @@ bad_config: | |||
1990 | if (status < 0) | 1989 | if (status < 0) |
1991 | goto fail2; | 1990 | goto fail2; |
1992 | 1991 | ||
1993 | #ifdef CONFIG_USB_OTG | 1992 | #ifdef CONFIG_USB_MUSB_OTG |
1994 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); | 1993 | setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); |
1995 | #endif | 1994 | #endif |
1996 | 1995 | ||
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 40ed50ecedff..7a6778675ad3 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c | |||
@@ -407,7 +407,7 @@ stall: | |||
407 | csr |= MUSB_RXCSR_P_SENDSTALL | 407 | csr |= MUSB_RXCSR_P_SENDSTALL |
408 | | MUSB_RXCSR_FLUSHFIFO | 408 | | MUSB_RXCSR_FLUSHFIFO |
409 | | MUSB_RXCSR_CLRDATATOG | 409 | | MUSB_RXCSR_CLRDATATOG |
410 | | MUSB_TXCSR_P_WZC_BITS; | 410 | | MUSB_RXCSR_P_WZC_BITS; |
411 | musb_writew(regs, MUSB_RXCSR, | 411 | musb_writew(regs, MUSB_RXCSR, |
412 | csr); | 412 | csr); |
413 | } | 413 | } |
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index de3b2f18db44..fbfd3fd9ce1f 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h | |||
@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) | |||
323 | 323 | ||
324 | static inline u8 musb_read_configdata(void __iomem *mbase) | 324 | static inline u8 musb_read_configdata(void __iomem *mbase) |
325 | { | 325 | { |
326 | musb_writeb(mbase, MUSB_INDEX, 0); | ||
326 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); | 327 | return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); |
327 | } | 328 | } |
328 | 329 | ||
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index e9a40b820fd4..985cbcf48bda 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = { | |||
80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 80 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ | 81 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ |
82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ | 82 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ |
83 | { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ | ||
83 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 84 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
84 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 85 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
85 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ | 86 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = { | |||
96 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ | 97 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ |
97 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ | 98 | { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ |
98 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ | 99 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ |
100 | { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ | ||
99 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ | 101 | { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ |
102 | { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ | ||
100 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ | 103 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
101 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 104 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
102 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 105 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 60c64cc5be2a..b574878c78b2 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = { | |||
698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), | 698 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), |
699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 699 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, | 700 | { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, |
701 | { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, | ||
701 | { }, /* Optional parameter entry */ | 702 | { }, /* Optional parameter entry */ |
702 | { } /* Terminating entry */ | 703 | { } /* Terminating entry */ |
703 | }; | 704 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index c9fbd7415092..24dbd99e87d7 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -947,6 +947,13 @@ | |||
947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ | 947 | #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * GN Otometrics (http://www.otometrics.com) | ||
951 | * Submitted by Ville Sundberg. | ||
952 | */ | ||
953 | #define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */ | ||
954 | #define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */ | ||
955 | |||
956 | /* | ||
950 | * BmRequestType: 1100 0000b | 957 | * BmRequestType: 1100 0000b |
951 | * bRequest: FTDI_E2_READ | 958 | * bRequest: FTDI_E2_READ |
952 | * wValue: 0 | 959 | * wValue: 0 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index c31940a307f8..270009afdf77 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -124,10 +124,13 @@ | |||
124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | 124 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 |
125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | 125 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 |
126 | 126 | ||
127 | /* This driver also supports the ATEN UC2324 device since it is mos7840 based | 127 | /* This driver also supports |
128 | * - if I knew the device id it would also support the ATEN UC2322 */ | 128 | * ATEN UC2324 device using Moschip MCS7840 |
129 | * ATEN UC2322 device using Moschip MCS7820 | ||
130 | */ | ||
129 | #define USB_VENDOR_ID_ATENINTL 0x0557 | 131 | #define USB_VENDOR_ID_ATENINTL 0x0557 |
130 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 | 132 | #define ATENINTL_DEVICE_ID_UC2324 0x2011 |
133 | #define ATENINTL_DEVICE_ID_UC2322 0x7820 | ||
131 | 134 | ||
132 | /* Interrupt Routine Defines */ | 135 | /* Interrupt Routine Defines */ |
133 | 136 | ||
@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { | |||
177 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
178 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 181 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 182 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
183 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
180 | {} /* terminating entry */ | 184 | {} /* terminating entry */ |
181 | }; | 185 | }; |
182 | 186 | ||
@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | |||
186 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | 190 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, |
187 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | 191 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, |
188 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | 192 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, |
193 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
189 | {} /* terminating entry */ | 194 | {} /* terminating entry */ |
190 | }; | 195 | }; |
191 | 196 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 98262dd552bb..c784ddbe7b61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); | |||
66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, | 66 | static int option_tiocmset(struct tty_struct *tty, struct file *file, |
67 | unsigned int set, unsigned int clear); | 67 | unsigned int set, unsigned int clear); |
68 | static int option_send_setup(struct usb_serial_port *port); | 68 | static int option_send_setup(struct usb_serial_port *port); |
69 | #ifdef CONFIG_PM | ||
69 | static int option_suspend(struct usb_serial *serial, pm_message_t message); | 70 | static int option_suspend(struct usb_serial *serial, pm_message_t message); |
70 | static int option_resume(struct usb_serial *serial); | 71 | static int option_resume(struct usb_serial *serial); |
72 | #endif | ||
71 | 73 | ||
72 | /* Vendor and product IDs */ | 74 | /* Vendor and product IDs */ |
73 | #define OPTION_VENDOR_ID 0x0AF0 | 75 | #define OPTION_VENDOR_ID 0x0AF0 |
@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial); | |||
205 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | 207 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 |
206 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | 208 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 |
207 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 | 209 | #define NOVATELWIRELESS_PRODUCT_U727 0x5010 |
210 | #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 | ||
208 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 | 211 | #define NOVATELWIRELESS_PRODUCT_MC760 0x6000 |
209 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 | 212 | #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 |
210 | 213 | ||
@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial); | |||
259 | #define AXESSTEL_VENDOR_ID 0x1726 | 262 | #define AXESSTEL_VENDOR_ID 0x1726 |
260 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | 263 | #define AXESSTEL_PRODUCT_MV110H 0x1000 |
261 | 264 | ||
262 | #define ONDA_VENDOR_ID 0x19d2 | ||
263 | #define ONDA_PRODUCT_MSA501HS 0x0001 | ||
264 | #define ONDA_PRODUCT_ET502HS 0x0002 | ||
265 | #define ONDA_PRODUCT_MT503HS 0x2000 | ||
266 | |||
267 | #define BANDRICH_VENDOR_ID 0x1A8D | 265 | #define BANDRICH_VENDOR_ID 0x1A8D |
268 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 266 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
269 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 267 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial); | |||
301 | #define ZTE_PRODUCT_MF628 0x0015 | 299 | #define ZTE_PRODUCT_MF628 0x0015 |
302 | #define ZTE_PRODUCT_MF626 0x0031 | 300 | #define ZTE_PRODUCT_MF626 0x0031 |
303 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | 301 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe |
302 | #define ZTE_PRODUCT_AC8710 0xfff1 | ||
304 | 303 | ||
305 | #define BENQ_VENDOR_ID 0x04a5 | 304 | #define BENQ_VENDOR_ID 0x04a5 |
306 | #define BENQ_PRODUCT_H10 0x4068 | 305 | #define BENQ_PRODUCT_H10 0x4068 |
@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial); | |||
322 | #define ALINK_VENDOR_ID 0x1e0e | 321 | #define ALINK_VENDOR_ID 0x1e0e |
323 | #define ALINK_PRODUCT_3GU 0x9200 | 322 | #define ALINK_PRODUCT_3GU 0x9200 |
324 | 323 | ||
324 | /* ALCATEL PRODUCTS */ | ||
325 | #define ALCATEL_VENDOR_ID 0x1bbb | ||
326 | #define ALCATEL_PRODUCT_X060S 0x0000 | ||
327 | |||
328 | |||
325 | static struct usb_device_id option_ids[] = { | 329 | static struct usb_device_id option_ids[] = { |
326 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 330 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
327 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 331 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = { | |||
438 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ | 442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
439 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | 443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ |
440 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | 444 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ |
445 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ | ||
441 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ | 446 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ |
442 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ | 447 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ |
443 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ | 448 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ |
@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = { | |||
474 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 479 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
475 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, | 480 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, |
476 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | 481 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, |
477 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) }, | ||
478 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, | ||
479 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) }, | ||
480 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) }, | ||
481 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) }, | ||
482 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) }, | ||
483 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) }, | ||
484 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) }, | ||
485 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) }, | ||
486 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) }, | ||
487 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) }, | ||
488 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) }, | ||
489 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) }, | ||
490 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) }, | ||
491 | { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) }, | ||
492 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) }, | ||
493 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) }, | ||
494 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) }, | ||
495 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) }, | ||
496 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) }, | ||
497 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) }, | ||
498 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) }, | ||
499 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) }, | ||
500 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) }, | ||
501 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) }, | ||
502 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) }, | ||
503 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) }, | ||
504 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) }, | ||
505 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) }, | ||
506 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) }, | ||
507 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) }, | ||
508 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) }, | ||
509 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) }, | ||
510 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) }, | ||
511 | { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) }, | ||
512 | { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) }, | ||
513 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, | 482 | { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, |
514 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 483 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
515 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 484 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = { | |||
534 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 503 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
535 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 504 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
536 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 505 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
537 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, | 506 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
538 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, | 507 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, |
539 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 508 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, |
540 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | 509 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, |
510 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, | ||
511 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, | ||
512 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) }, | ||
513 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, | ||
514 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, | ||
515 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, | ||
516 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, | ||
517 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, | ||
518 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, | ||
519 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, | ||
520 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, | ||
521 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, | ||
522 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, | ||
523 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, | ||
524 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, | ||
525 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, | ||
526 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, | ||
527 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, | ||
528 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, | ||
529 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, | ||
530 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, | ||
531 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) }, | ||
532 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, | ||
533 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, | ||
534 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, | ||
535 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, | ||
536 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, | ||
537 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, | ||
538 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, | ||
539 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, | ||
540 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, | ||
542 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, | ||
543 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, | ||
544 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, | ||
545 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, | ||
546 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, | ||
547 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, | ||
548 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, | ||
549 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, | ||
550 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, | ||
551 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, | ||
552 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, | ||
553 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, | ||
554 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, | ||
555 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, | ||
556 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, | ||
557 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, | ||
558 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, | ||
559 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, | ||
560 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, | ||
561 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, | ||
562 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, | ||
563 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, | ||
564 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, | ||
565 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, | ||
566 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, | ||
567 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ | ||
568 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, | ||
569 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, | ||
570 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, | ||
571 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, | ||
572 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, | ||
573 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, | ||
574 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, | ||
541 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, | 575 | { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, |
542 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, | 576 | { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, |
543 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, | 577 | { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, |
@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = { | |||
547 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ | 581 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ |
548 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, | 582 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, |
549 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | 583 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, |
584 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | ||
550 | { } /* Terminating entry */ | 585 | { } /* Terminating entry */ |
551 | }; | 586 | }; |
552 | MODULE_DEVICE_TABLE(usb, option_ids); | 587 | MODULE_DEVICE_TABLE(usb, option_ids); |
@@ -555,8 +590,10 @@ static struct usb_driver option_driver = { | |||
555 | .name = "option", | 590 | .name = "option", |
556 | .probe = usb_serial_probe, | 591 | .probe = usb_serial_probe, |
557 | .disconnect = usb_serial_disconnect, | 592 | .disconnect = usb_serial_disconnect, |
593 | #ifdef CONFIG_PM | ||
558 | .suspend = usb_serial_suspend, | 594 | .suspend = usb_serial_suspend, |
559 | .resume = usb_serial_resume, | 595 | .resume = usb_serial_resume, |
596 | #endif | ||
560 | .id_table = option_ids, | 597 | .id_table = option_ids, |
561 | .no_dynamic_id = 1, | 598 | .no_dynamic_id = 1, |
562 | }; | 599 | }; |
@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { | |||
588 | .disconnect = option_disconnect, | 625 | .disconnect = option_disconnect, |
589 | .release = option_release, | 626 | .release = option_release, |
590 | .read_int_callback = option_instat_callback, | 627 | .read_int_callback = option_instat_callback, |
628 | #ifdef CONFIG_PM | ||
591 | .suspend = option_suspend, | 629 | .suspend = option_suspend, |
592 | .resume = option_resume, | 630 | .resume = option_resume, |
631 | #endif | ||
593 | }; | 632 | }; |
594 | 633 | ||
595 | static int debug; | 634 | static int debug; |
@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb) | |||
831 | int status = urb->status; | 870 | int status = urb->status; |
832 | struct usb_serial_port *port = urb->context; | 871 | struct usb_serial_port *port = urb->context; |
833 | struct option_port_private *portdata = usb_get_serial_port_data(port); | 872 | struct option_port_private *portdata = usb_get_serial_port_data(port); |
834 | struct usb_serial *serial = port->serial; | ||
835 | 873 | ||
836 | dbg("%s", __func__); | 874 | dbg("%s", __func__); |
837 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); | 875 | dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); |
@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty, | |||
927 | struct usb_serial_port *port, struct file *filp) | 965 | struct usb_serial_port *port, struct file *filp) |
928 | { | 966 | { |
929 | struct option_port_private *portdata; | 967 | struct option_port_private *portdata; |
930 | struct usb_serial *serial = port->serial; | ||
931 | int i, err; | 968 | int i, err; |
932 | struct urb *urb; | 969 | struct urb *urb; |
933 | 970 | ||
@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial) | |||
1187 | } | 1224 | } |
1188 | } | 1225 | } |
1189 | 1226 | ||
1227 | #ifdef CONFIG_PM | ||
1190 | static int option_suspend(struct usb_serial *serial, pm_message_t message) | 1228 | static int option_suspend(struct usb_serial *serial, pm_message_t message) |
1191 | { | 1229 | { |
1192 | dbg("%s entered", __func__); | 1230 | dbg("%s entered", __func__); |
@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) | |||
1245 | } | 1283 | } |
1246 | return 0; | 1284 | return 0; |
1247 | } | 1285 | } |
1286 | #endif | ||
1248 | 1287 | ||
1249 | MODULE_AUTHOR(DRIVER_AUTHOR); | 1288 | MODULE_AUTHOR(DRIVER_AUTHOR); |
1250 | MODULE_DESCRIPTION(DRIVER_DESC); | 1289 | MODULE_DESCRIPTION(DRIVER_DESC); |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index fcb320217218..e20dc525d177 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) | |||
961 | US_BULK_GET_MAX_LUN, | 961 | US_BULK_GET_MAX_LUN, |
962 | USB_DIR_IN | USB_TYPE_CLASS | | 962 | USB_DIR_IN | USB_TYPE_CLASS | |
963 | USB_RECIP_INTERFACE, | 963 | USB_RECIP_INTERFACE, |
964 | 0, us->ifnum, us->iobuf, 1, HZ); | 964 | 0, us->ifnum, us->iobuf, 1, 10*HZ); |
965 | 965 | ||
966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", | 966 | US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", |
967 | result, us->iobuf[0]); | 967 | result, us->iobuf[0]); |
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c index c3ebb6b41ce1..7aed2565c1bd 100644 --- a/drivers/video/backlight/jornada720_bl.c +++ b/drivers/video/backlight/jornada720_bl.c | |||
@@ -72,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd) | |||
72 | if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { | 72 | if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { |
73 | printk(KERN_INFO "bl : failed to set brightness\n"); | 73 | printk(KERN_INFO "bl : failed to set brightness\n"); |
74 | ret = -ETIMEDOUT; | 74 | ret = -ETIMEDOUT; |
75 | goto out | 75 | goto out; |
76 | } | 76 | } |
77 | 77 | ||
78 | /* at this point we expect that the mcu has accepted | 78 | /* at this point we expect that the mcu has accepted |
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index bb63c07e13de..5a72083dc67c 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c | |||
@@ -964,7 +964,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev) | |||
964 | struct s3c_fb *sfb = platform_get_drvdata(pdev); | 964 | struct s3c_fb *sfb = platform_get_drvdata(pdev); |
965 | int win; | 965 | int win; |
966 | 966 | ||
967 | for (win = 0; win <= S3C_FB_MAX_WIN; win++) | 967 | for (win = 0; win < S3C_FB_MAX_WIN; win++) |
968 | if (sfb->windows[win]) | 968 | if (sfb->windows[win]) |
969 | s3c_fb_release_win(sfb, sfb->windows[win]); | 969 | s3c_fb_release_win(sfb, sfb->windows[win]); |
970 | 970 | ||
@@ -988,7 +988,7 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state) | |||
988 | struct s3c_fb_win *win; | 988 | struct s3c_fb_win *win; |
989 | int win_no; | 989 | int win_no; |
990 | 990 | ||
991 | for (win_no = S3C_FB_MAX_WIN; win_no >= 0; win_no--) { | 991 | for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { |
992 | win = sfb->windows[win_no]; | 992 | win = sfb->windows[win_no]; |
993 | if (!win) | 993 | if (!win) |
994 | continue; | 994 | continue; |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index bcec78ffc765..248e00ec4dc1 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -52,8 +52,10 @@ struct virtio_pci_device | |||
52 | char (*msix_names)[256]; | 52 | char (*msix_names)[256]; |
53 | /* Number of available vectors */ | 53 | /* Number of available vectors */ |
54 | unsigned msix_vectors; | 54 | unsigned msix_vectors; |
55 | /* Vectors allocated */ | 55 | /* Vectors allocated, excluding per-vq vectors if any */ |
56 | unsigned msix_used_vectors; | 56 | unsigned msix_used_vectors; |
57 | /* Whether we have vector per vq */ | ||
58 | bool per_vq_vectors; | ||
57 | }; | 59 | }; |
58 | 60 | ||
59 | /* Constants for MSI-X */ | 61 | /* Constants for MSI-X */ |
@@ -258,7 +260,6 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
258 | 260 | ||
259 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) | 261 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
260 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); | 262 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); |
261 | vp_dev->msix_used_vectors = 0; | ||
262 | 263 | ||
263 | if (vp_dev->msix_enabled) { | 264 | if (vp_dev->msix_enabled) { |
264 | /* Disable the vector used for configuration */ | 265 | /* Disable the vector used for configuration */ |
@@ -267,80 +268,77 @@ static void vp_free_vectors(struct virtio_device *vdev) | |||
267 | /* Flush the write out to device */ | 268 | /* Flush the write out to device */ |
268 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 269 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
269 | 270 | ||
270 | vp_dev->msix_enabled = 0; | ||
271 | pci_disable_msix(vp_dev->pci_dev); | 271 | pci_disable_msix(vp_dev->pci_dev); |
272 | vp_dev->msix_enabled = 0; | ||
273 | vp_dev->msix_vectors = 0; | ||
272 | } | 274 | } |
273 | } | ||
274 | 275 | ||
275 | static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | 276 | vp_dev->msix_used_vectors = 0; |
276 | int *options, int noptions) | 277 | kfree(vp_dev->msix_names); |
277 | { | 278 | vp_dev->msix_names = NULL; |
278 | int i; | 279 | kfree(vp_dev->msix_entries); |
279 | for (i = 0; i < noptions; ++i) | 280 | vp_dev->msix_entries = NULL; |
280 | if (!pci_enable_msix(dev, entries, options[i])) | ||
281 | return options[i]; | ||
282 | return -EBUSY; | ||
283 | } | 281 | } |
284 | 282 | ||
285 | static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | 283 | static int vp_request_vectors(struct virtio_device *vdev, int nvectors, |
284 | bool per_vq_vectors) | ||
286 | { | 285 | { |
287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 286 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
288 | const char *name = dev_name(&vp_dev->vdev.dev); | 287 | const char *name = dev_name(&vp_dev->vdev.dev); |
289 | unsigned i, v; | 288 | unsigned i, v; |
290 | int err = -ENOMEM; | 289 | int err = -ENOMEM; |
291 | /* We want at most one vector per queue and one for config changes. | 290 | |
292 | * Fallback to separate vectors for config and a shared for queues. | 291 | if (!nvectors) { |
293 | * Finally fall back to regular interrupts. */ | 292 | /* Can't allocate MSI-X vectors, use regular interrupt */ |
294 | int options[] = { max_vqs + 1, 2 }; | 293 | vp_dev->msix_vectors = 0; |
295 | int nvectors = max(options[0], options[1]); | 294 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, |
295 | IRQF_SHARED, name, vp_dev); | ||
296 | if (err) | ||
297 | return err; | ||
298 | vp_dev->intx_enabled = 1; | ||
299 | return 0; | ||
300 | } | ||
296 | 301 | ||
297 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | 302 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, |
298 | GFP_KERNEL); | 303 | GFP_KERNEL); |
299 | if (!vp_dev->msix_entries) | 304 | if (!vp_dev->msix_entries) |
300 | goto error_entries; | 305 | goto error; |
301 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | 306 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, |
302 | GFP_KERNEL); | 307 | GFP_KERNEL); |
303 | if (!vp_dev->msix_names) | 308 | if (!vp_dev->msix_names) |
304 | goto error_names; | 309 | goto error; |
305 | 310 | ||
306 | for (i = 0; i < nvectors; ++i) | 311 | for (i = 0; i < nvectors; ++i) |
307 | vp_dev->msix_entries[i].entry = i; | 312 | vp_dev->msix_entries[i].entry = i; |
308 | 313 | ||
309 | err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, | 314 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); |
310 | options, ARRAY_SIZE(options)); | 315 | if (err > 0) |
311 | if (err < 0) { | 316 | err = -ENOSPC; |
312 | /* Can't allocate enough MSI-X vectors, use regular interrupt */ | 317 | if (err) |
313 | vp_dev->msix_vectors = 0; | 318 | goto error; |
314 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | 319 | vp_dev->msix_vectors = nvectors; |
315 | IRQF_SHARED, name, vp_dev); | 320 | vp_dev->msix_enabled = 1; |
316 | if (err) | 321 | |
317 | goto error_irq; | 322 | /* Set the vector used for configuration */ |
318 | vp_dev->intx_enabled = 1; | 323 | v = vp_dev->msix_used_vectors; |
319 | } else { | 324 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
320 | vp_dev->msix_vectors = err; | 325 | "%s-config", name); |
321 | vp_dev->msix_enabled = 1; | 326 | err = request_irq(vp_dev->msix_entries[v].vector, |
322 | 327 | vp_config_changed, 0, vp_dev->msix_names[v], | |
323 | /* Set the vector used for configuration */ | 328 | vp_dev); |
324 | v = vp_dev->msix_used_vectors; | 329 | if (err) |
325 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 330 | goto error; |
326 | "%s-config", name); | 331 | ++vp_dev->msix_used_vectors; |
327 | err = request_irq(vp_dev->msix_entries[v].vector, | 332 | |
328 | vp_config_changed, 0, vp_dev->msix_names[v], | 333 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
329 | vp_dev); | 334 | /* Verify we had enough resources to assign the vector */ |
330 | if (err) | 335 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
331 | goto error_irq; | 336 | if (v == VIRTIO_MSI_NO_VECTOR) { |
332 | ++vp_dev->msix_used_vectors; | 337 | err = -EBUSY; |
333 | 338 | goto error; | |
334 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
335 | /* Verify we had enough resources to assign the vector */ | ||
336 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | ||
337 | if (v == VIRTIO_MSI_NO_VECTOR) { | ||
338 | err = -EBUSY; | ||
339 | goto error_irq; | ||
340 | } | ||
341 | } | 339 | } |
342 | 340 | ||
343 | if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { | 341 | if (!per_vq_vectors) { |
344 | /* Shared vector for all VQs */ | 342 | /* Shared vector for all VQs */ |
345 | v = vp_dev->msix_used_vectors; | 343 | v = vp_dev->msix_used_vectors; |
346 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 344 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
@@ -349,28 +347,25 @@ static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) | |||
349 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | 347 | vp_vring_interrupt, 0, vp_dev->msix_names[v], |
350 | vp_dev); | 348 | vp_dev); |
351 | if (err) | 349 | if (err) |
352 | goto error_irq; | 350 | goto error; |
353 | ++vp_dev->msix_used_vectors; | 351 | ++vp_dev->msix_used_vectors; |
354 | } | 352 | } |
355 | return 0; | 353 | return 0; |
356 | error_irq: | 354 | error: |
357 | vp_free_vectors(vdev); | 355 | vp_free_vectors(vdev); |
358 | kfree(vp_dev->msix_names); | ||
359 | error_names: | ||
360 | kfree(vp_dev->msix_entries); | ||
361 | error_entries: | ||
362 | return err; | 356 | return err; |
363 | } | 357 | } |
364 | 358 | ||
365 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | 359 | static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, |
366 | void (*callback)(struct virtqueue *vq), | 360 | void (*callback)(struct virtqueue *vq), |
367 | const char *name) | 361 | const char *name, |
362 | u16 vector) | ||
368 | { | 363 | { |
369 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 364 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
370 | struct virtio_pci_vq_info *info; | 365 | struct virtio_pci_vq_info *info; |
371 | struct virtqueue *vq; | 366 | struct virtqueue *vq; |
372 | unsigned long flags, size; | 367 | unsigned long flags, size; |
373 | u16 num, vector; | 368 | u16 num; |
374 | int err; | 369 | int err; |
375 | 370 | ||
376 | /* Select the queue we're interested in */ | 371 | /* Select the queue we're interested in */ |
@@ -389,7 +384,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
389 | 384 | ||
390 | info->queue_index = index; | 385 | info->queue_index = index; |
391 | info->num = num; | 386 | info->num = num; |
392 | info->vector = VIRTIO_MSI_NO_VECTOR; | 387 | info->vector = vector; |
393 | 388 | ||
394 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 389 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
395 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 390 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
@@ -413,22 +408,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
413 | vq->priv = info; | 408 | vq->priv = info; |
414 | info->vq = vq; | 409 | info->vq = vq; |
415 | 410 | ||
416 | /* allocate per-vq vector if available and necessary */ | 411 | if (vector != VIRTIO_MSI_NO_VECTOR) { |
417 | if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { | ||
418 | vector = vp_dev->msix_used_vectors; | ||
419 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
420 | "%s-%s", dev_name(&vp_dev->vdev.dev), name); | ||
421 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
422 | vring_interrupt, 0, | ||
423 | vp_dev->msix_names[vector], vq); | ||
424 | if (err) | ||
425 | goto out_request_irq; | ||
426 | info->vector = vector; | ||
427 | ++vp_dev->msix_used_vectors; | ||
428 | } else | ||
429 | vector = VP_MSIX_VQ_VECTOR; | ||
430 | |||
431 | if (callback && vp_dev->msix_enabled) { | ||
432 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 412 | iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
433 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 413 | vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
434 | if (vector == VIRTIO_MSI_NO_VECTOR) { | 414 | if (vector == VIRTIO_MSI_NO_VECTOR) { |
@@ -444,11 +424,6 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
444 | return vq; | 424 | return vq; |
445 | 425 | ||
446 | out_assign: | 426 | out_assign: |
447 | if (info->vector != VIRTIO_MSI_NO_VECTOR) { | ||
448 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
449 | --vp_dev->msix_used_vectors; | ||
450 | } | ||
451 | out_request_irq: | ||
452 | vring_del_virtqueue(vq); | 427 | vring_del_virtqueue(vq); |
453 | out_activate_queue: | 428 | out_activate_queue: |
454 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 429 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
@@ -462,12 +437,13 @@ static void vp_del_vq(struct virtqueue *vq) | |||
462 | { | 437 | { |
463 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 438 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
464 | struct virtio_pci_vq_info *info = vq->priv; | 439 | struct virtio_pci_vq_info *info = vq->priv; |
465 | unsigned long size; | 440 | unsigned long flags, size; |
466 | 441 | ||
467 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | 442 | spin_lock_irqsave(&vp_dev->lock, flags); |
443 | list_del(&info->node); | ||
444 | spin_unlock_irqrestore(&vp_dev->lock, flags); | ||
468 | 445 | ||
469 | if (info->vector != VIRTIO_MSI_NO_VECTOR) | 446 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
470 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
471 | 447 | ||
472 | if (vp_dev->msix_enabled) { | 448 | if (vp_dev->msix_enabled) { |
473 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 449 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
@@ -489,36 +465,62 @@ static void vp_del_vq(struct virtqueue *vq) | |||
489 | /* the config->del_vqs() implementation */ | 465 | /* the config->del_vqs() implementation */ |
490 | static void vp_del_vqs(struct virtio_device *vdev) | 466 | static void vp_del_vqs(struct virtio_device *vdev) |
491 | { | 467 | { |
468 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
492 | struct virtqueue *vq, *n; | 469 | struct virtqueue *vq, *n; |
470 | struct virtio_pci_vq_info *info; | ||
493 | 471 | ||
494 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | 472 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { |
473 | info = vq->priv; | ||
474 | if (vp_dev->per_vq_vectors) | ||
475 | free_irq(vp_dev->msix_entries[info->vector].vector, vq); | ||
495 | vp_del_vq(vq); | 476 | vp_del_vq(vq); |
477 | } | ||
478 | vp_dev->per_vq_vectors = false; | ||
496 | 479 | ||
497 | vp_free_vectors(vdev); | 480 | vp_free_vectors(vdev); |
498 | } | 481 | } |
499 | 482 | ||
500 | /* the config->find_vqs() implementation */ | 483 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
501 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 484 | struct virtqueue *vqs[], |
502 | struct virtqueue *vqs[], | 485 | vq_callback_t *callbacks[], |
503 | vq_callback_t *callbacks[], | 486 | const char *names[], |
504 | const char *names[]) | 487 | int nvectors, |
488 | bool per_vq_vectors) | ||
505 | { | 489 | { |
506 | int vectors = 0; | 490 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
507 | int i, err; | 491 | u16 vector; |
508 | 492 | int i, err, allocated_vectors; | |
509 | /* How many vectors would we like? */ | ||
510 | for (i = 0; i < nvqs; ++i) | ||
511 | if (callbacks[i]) | ||
512 | ++vectors; | ||
513 | 493 | ||
514 | err = vp_request_vectors(vdev, vectors); | 494 | err = vp_request_vectors(vdev, nvectors, per_vq_vectors); |
515 | if (err) | 495 | if (err) |
516 | goto error_request; | 496 | goto error_request; |
517 | 497 | ||
498 | vp_dev->per_vq_vectors = per_vq_vectors; | ||
499 | allocated_vectors = vp_dev->msix_used_vectors; | ||
518 | for (i = 0; i < nvqs; ++i) { | 500 | for (i = 0; i < nvqs; ++i) { |
519 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); | 501 | if (!callbacks[i] || !vp_dev->msix_enabled) |
520 | if (IS_ERR(vqs[i])) | 502 | vector = VIRTIO_MSI_NO_VECTOR; |
503 | else if (vp_dev->per_vq_vectors) | ||
504 | vector = allocated_vectors++; | ||
505 | else | ||
506 | vector = VP_MSIX_VQ_VECTOR; | ||
507 | vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i], vector); | ||
508 | if (IS_ERR(vqs[i])) { | ||
509 | err = PTR_ERR(vqs[i]); | ||
521 | goto error_find; | 510 | goto error_find; |
511 | } | ||
512 | /* allocate per-vq irq if available and necessary */ | ||
513 | if (vp_dev->per_vq_vectors && vector != VIRTIO_MSI_NO_VECTOR) { | ||
514 | snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, | ||
515 | "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); | ||
516 | err = request_irq(vp_dev->msix_entries[vector].vector, | ||
517 | vring_interrupt, 0, | ||
518 | vp_dev->msix_names[vector], vqs[i]); | ||
519 | if (err) { | ||
520 | vp_del_vq(vqs[i]); | ||
521 | goto error_find; | ||
522 | } | ||
523 | } | ||
522 | } | 524 | } |
523 | return 0; | 525 | return 0; |
524 | 526 | ||
@@ -526,7 +528,37 @@ error_find: | |||
526 | vp_del_vqs(vdev); | 528 | vp_del_vqs(vdev); |
527 | 529 | ||
528 | error_request: | 530 | error_request: |
529 | return PTR_ERR(vqs[i]); | 531 | return err; |
532 | } | ||
533 | |||
534 | /* the config->find_vqs() implementation */ | ||
535 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
536 | struct virtqueue *vqs[], | ||
537 | vq_callback_t *callbacks[], | ||
538 | const char *names[]) | ||
539 | { | ||
540 | int vectors = 0; | ||
541 | int i, uninitialized_var(err); | ||
542 | |||
543 | /* How many vectors would we like? */ | ||
544 | for (i = 0; i < nvqs; ++i) | ||
545 | if (callbacks[i]) | ||
546 | ++vectors; | ||
547 | |||
548 | /* We want at most one vector per queue and one for config changes. */ | ||
549 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
550 | vectors + 1, true); | ||
551 | if (!err) | ||
552 | return 0; | ||
553 | /* Fallback to separate vectors for config and a shared for queues. */ | ||
554 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
555 | 2, false); | ||
556 | if (!err) | ||
557 | return 0; | ||
558 | /* Finally fall back to regular interrupts. */ | ||
559 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | ||
560 | 0, false); | ||
561 | return err; | ||
530 | } | 562 | } |
531 | 563 | ||
532 | static struct virtio_config_ops virtio_pci_config_ops = { | 564 | static struct virtio_config_ops virtio_pci_config_ops = { |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 3a6d4fb2a329..94dfda24c06e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -564,6 +564,16 @@ struct block_device *bdget(dev_t dev) | |||
564 | 564 | ||
565 | EXPORT_SYMBOL(bdget); | 565 | EXPORT_SYMBOL(bdget); |
566 | 566 | ||
567 | /** | ||
568 | * bdgrab -- Grab a reference to an already referenced block device | ||
569 | * @bdev: Block device to grab a reference to. | ||
570 | */ | ||
571 | struct block_device *bdgrab(struct block_device *bdev) | ||
572 | { | ||
573 | atomic_inc(&bdev->bd_inode->i_count); | ||
574 | return bdev; | ||
575 | } | ||
576 | |||
567 | long nr_blockdev_pages(void) | 577 | long nr_blockdev_pages(void) |
568 | { | 578 | { |
569 | struct block_device *bdev; | 579 | struct block_device *bdev; |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 6e4f6c50a120..019e8af449ab 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -424,11 +424,11 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
424 | * list | 424 | * list |
425 | */ | 425 | */ |
426 | if (worker->idle) { | 426 | if (worker->idle) { |
427 | spin_lock_irqsave(&worker->workers->lock, flags); | 427 | spin_lock(&worker->workers->lock); |
428 | worker->idle = 0; | 428 | worker->idle = 0; |
429 | list_move_tail(&worker->worker_list, | 429 | list_move_tail(&worker->worker_list, |
430 | &worker->workers->worker_list); | 430 | &worker->workers->worker_list); |
431 | spin_unlock_irqrestore(&worker->workers->lock, flags); | 431 | spin_unlock(&worker->workers->lock); |
432 | } | 432 | } |
433 | if (!worker->working) { | 433 | if (!worker->working) { |
434 | wake = 1; | 434 | wake = 1; |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 60a45f3a4e91..3fdcc0512d3a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -557,19 +557,7 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) | |||
557 | 557 | ||
558 | btrfs_disk_key_to_cpu(&k1, disk); | 558 | btrfs_disk_key_to_cpu(&k1, disk); |
559 | 559 | ||
560 | if (k1.objectid > k2->objectid) | 560 | return btrfs_comp_cpu_keys(&k1, k2); |
561 | return 1; | ||
562 | if (k1.objectid < k2->objectid) | ||
563 | return -1; | ||
564 | if (k1.type > k2->type) | ||
565 | return 1; | ||
566 | if (k1.type < k2->type) | ||
567 | return -1; | ||
568 | if (k1.offset > k2->offset) | ||
569 | return 1; | ||
570 | if (k1.offset < k2->offset) | ||
571 | return -1; | ||
572 | return 0; | ||
573 | } | 561 | } |
574 | 562 | ||
575 | /* | 563 | /* |
@@ -1052,9 +1040,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1052 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1040 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1053 | return 0; | 1041 | return 0; |
1054 | 1042 | ||
1055 | if (btrfs_header_nritems(mid) > 2) | ||
1056 | return 0; | ||
1057 | |||
1058 | if (btrfs_header_nritems(mid) < 2) | 1043 | if (btrfs_header_nritems(mid) < 2) |
1059 | err_on_enospc = 1; | 1044 | err_on_enospc = 1; |
1060 | 1045 | ||
@@ -1701,6 +1686,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root | |||
1701 | struct extent_buffer *b; | 1686 | struct extent_buffer *b; |
1702 | int slot; | 1687 | int slot; |
1703 | int ret; | 1688 | int ret; |
1689 | int err; | ||
1704 | int level; | 1690 | int level; |
1705 | int lowest_unlock = 1; | 1691 | int lowest_unlock = 1; |
1706 | u8 lowest_level = 0; | 1692 | u8 lowest_level = 0; |
@@ -1737,8 +1723,6 @@ again: | |||
1737 | p->locks[level] = 1; | 1723 | p->locks[level] = 1; |
1738 | 1724 | ||
1739 | if (cow) { | 1725 | if (cow) { |
1740 | int wret; | ||
1741 | |||
1742 | /* | 1726 | /* |
1743 | * if we don't really need to cow this block | 1727 | * if we don't really need to cow this block |
1744 | * then we don't want to set the path blocking, | 1728 | * then we don't want to set the path blocking, |
@@ -1749,12 +1733,12 @@ again: | |||
1749 | 1733 | ||
1750 | btrfs_set_path_blocking(p); | 1734 | btrfs_set_path_blocking(p); |
1751 | 1735 | ||
1752 | wret = btrfs_cow_block(trans, root, b, | 1736 | err = btrfs_cow_block(trans, root, b, |
1753 | p->nodes[level + 1], | 1737 | p->nodes[level + 1], |
1754 | p->slots[level + 1], &b); | 1738 | p->slots[level + 1], &b); |
1755 | if (wret) { | 1739 | if (err) { |
1756 | free_extent_buffer(b); | 1740 | free_extent_buffer(b); |
1757 | ret = wret; | 1741 | ret = err; |
1758 | goto done; | 1742 | goto done; |
1759 | } | 1743 | } |
1760 | } | 1744 | } |
@@ -1793,41 +1777,45 @@ cow_done: | |||
1793 | ret = bin_search(b, key, level, &slot); | 1777 | ret = bin_search(b, key, level, &slot); |
1794 | 1778 | ||
1795 | if (level != 0) { | 1779 | if (level != 0) { |
1796 | if (ret && slot > 0) | 1780 | int dec = 0; |
1781 | if (ret && slot > 0) { | ||
1782 | dec = 1; | ||
1797 | slot -= 1; | 1783 | slot -= 1; |
1784 | } | ||
1798 | p->slots[level] = slot; | 1785 | p->slots[level] = slot; |
1799 | ret = setup_nodes_for_search(trans, root, p, b, level, | 1786 | err = setup_nodes_for_search(trans, root, p, b, level, |
1800 | ins_len); | 1787 | ins_len); |
1801 | if (ret == -EAGAIN) | 1788 | if (err == -EAGAIN) |
1802 | goto again; | 1789 | goto again; |
1803 | else if (ret) | 1790 | if (err) { |
1791 | ret = err; | ||
1804 | goto done; | 1792 | goto done; |
1793 | } | ||
1805 | b = p->nodes[level]; | 1794 | b = p->nodes[level]; |
1806 | slot = p->slots[level]; | 1795 | slot = p->slots[level]; |
1807 | 1796 | ||
1808 | unlock_up(p, level, lowest_unlock); | 1797 | unlock_up(p, level, lowest_unlock); |
1809 | 1798 | ||
1810 | /* this is only true while dropping a snapshot */ | ||
1811 | if (level == lowest_level) { | 1799 | if (level == lowest_level) { |
1812 | ret = 0; | 1800 | if (dec) |
1801 | p->slots[level]++; | ||
1813 | goto done; | 1802 | goto done; |
1814 | } | 1803 | } |
1815 | 1804 | ||
1816 | ret = read_block_for_search(trans, root, p, | 1805 | err = read_block_for_search(trans, root, p, |
1817 | &b, level, slot, key); | 1806 | &b, level, slot, key); |
1818 | if (ret == -EAGAIN) | 1807 | if (err == -EAGAIN) |
1819 | goto again; | 1808 | goto again; |
1820 | 1809 | if (err) { | |
1821 | if (ret == -EIO) | 1810 | ret = err; |
1822 | goto done; | 1811 | goto done; |
1812 | } | ||
1823 | 1813 | ||
1824 | if (!p->skip_locking) { | 1814 | if (!p->skip_locking) { |
1825 | int lret; | ||
1826 | |||
1827 | btrfs_clear_path_blocking(p, NULL); | 1815 | btrfs_clear_path_blocking(p, NULL); |
1828 | lret = btrfs_try_spin_lock(b); | 1816 | err = btrfs_try_spin_lock(b); |
1829 | 1817 | ||
1830 | if (!lret) { | 1818 | if (!err) { |
1831 | btrfs_set_path_blocking(p); | 1819 | btrfs_set_path_blocking(p); |
1832 | btrfs_tree_lock(b); | 1820 | btrfs_tree_lock(b); |
1833 | btrfs_clear_path_blocking(p, b); | 1821 | btrfs_clear_path_blocking(p, b); |
@@ -1837,16 +1825,14 @@ cow_done: | |||
1837 | p->slots[level] = slot; | 1825 | p->slots[level] = slot; |
1838 | if (ins_len > 0 && | 1826 | if (ins_len > 0 && |
1839 | btrfs_leaf_free_space(root, b) < ins_len) { | 1827 | btrfs_leaf_free_space(root, b) < ins_len) { |
1840 | int sret; | ||
1841 | |||
1842 | btrfs_set_path_blocking(p); | 1828 | btrfs_set_path_blocking(p); |
1843 | sret = split_leaf(trans, root, key, | 1829 | err = split_leaf(trans, root, key, |
1844 | p, ins_len, ret == 0); | 1830 | p, ins_len, ret == 0); |
1845 | btrfs_clear_path_blocking(p, NULL); | 1831 | btrfs_clear_path_blocking(p, NULL); |
1846 | 1832 | ||
1847 | BUG_ON(sret > 0); | 1833 | BUG_ON(err > 0); |
1848 | if (sret) { | 1834 | if (err) { |
1849 | ret = sret; | 1835 | ret = err; |
1850 | goto done; | 1836 | goto done; |
1851 | } | 1837 | } |
1852 | } | 1838 | } |
@@ -3807,7 +3793,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
3807 | } | 3793 | } |
3808 | 3794 | ||
3809 | /* delete the leaf if it is mostly empty */ | 3795 | /* delete the leaf if it is mostly empty */ |
3810 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) { | 3796 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { |
3811 | /* push_leaf_left fixes the path. | 3797 | /* push_leaf_left fixes the path. |
3812 | * make sure the path still points to our leaf | 3798 | * make sure the path still points to our leaf |
3813 | * for possible call to del_ptr below | 3799 | * for possible call to del_ptr below |
@@ -4042,10 +4028,9 @@ out: | |||
4042 | * calling this function. | 4028 | * calling this function. |
4043 | */ | 4029 | */ |
4044 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, | 4030 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, |
4045 | struct btrfs_key *key, int lowest_level, | 4031 | struct btrfs_key *key, int level, |
4046 | int cache_only, u64 min_trans) | 4032 | int cache_only, u64 min_trans) |
4047 | { | 4033 | { |
4048 | int level = lowest_level; | ||
4049 | int slot; | 4034 | int slot; |
4050 | struct extent_buffer *c; | 4035 | struct extent_buffer *c; |
4051 | 4036 | ||
@@ -4058,11 +4043,40 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, | |||
4058 | c = path->nodes[level]; | 4043 | c = path->nodes[level]; |
4059 | next: | 4044 | next: |
4060 | if (slot >= btrfs_header_nritems(c)) { | 4045 | if (slot >= btrfs_header_nritems(c)) { |
4061 | level++; | 4046 | int ret; |
4062 | if (level == BTRFS_MAX_LEVEL) | 4047 | int orig_lowest; |
4048 | struct btrfs_key cur_key; | ||
4049 | if (level + 1 >= BTRFS_MAX_LEVEL || | ||
4050 | !path->nodes[level + 1]) | ||
4063 | return 1; | 4051 | return 1; |
4064 | continue; | 4052 | |
4053 | if (path->locks[level + 1]) { | ||
4054 | level++; | ||
4055 | continue; | ||
4056 | } | ||
4057 | |||
4058 | slot = btrfs_header_nritems(c) - 1; | ||
4059 | if (level == 0) | ||
4060 | btrfs_item_key_to_cpu(c, &cur_key, slot); | ||
4061 | else | ||
4062 | btrfs_node_key_to_cpu(c, &cur_key, slot); | ||
4063 | |||
4064 | orig_lowest = path->lowest_level; | ||
4065 | btrfs_release_path(root, path); | ||
4066 | path->lowest_level = level; | ||
4067 | ret = btrfs_search_slot(NULL, root, &cur_key, path, | ||
4068 | 0, 0); | ||
4069 | path->lowest_level = orig_lowest; | ||
4070 | if (ret < 0) | ||
4071 | return ret; | ||
4072 | |||
4073 | c = path->nodes[level]; | ||
4074 | slot = path->slots[level]; | ||
4075 | if (ret == 0) | ||
4076 | slot++; | ||
4077 | goto next; | ||
4065 | } | 4078 | } |
4079 | |||
4066 | if (level == 0) | 4080 | if (level == 0) |
4067 | btrfs_item_key_to_cpu(c, key, slot); | 4081 | btrfs_item_key_to_cpu(c, key, slot); |
4068 | else { | 4082 | else { |
@@ -4146,7 +4160,8 @@ again: | |||
4146 | * advance the path if there are now more items available. | 4160 | * advance the path if there are now more items available. |
4147 | */ | 4161 | */ |
4148 | if (nritems > 0 && path->slots[0] < nritems - 1) { | 4162 | if (nritems > 0 && path->slots[0] < nritems - 1) { |
4149 | path->slots[0]++; | 4163 | if (ret == 0) |
4164 | path->slots[0]++; | ||
4150 | ret = 0; | 4165 | ret = 0; |
4151 | goto done; | 4166 | goto done; |
4152 | } | 4167 | } |
@@ -4278,10 +4293,10 @@ int btrfs_previous_item(struct btrfs_root *root, | |||
4278 | path->slots[0]--; | 4293 | path->slots[0]--; |
4279 | 4294 | ||
4280 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 4295 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
4281 | if (found_key.type == type) | ||
4282 | return 0; | ||
4283 | if (found_key.objectid < min_objectid) | 4296 | if (found_key.objectid < min_objectid) |
4284 | break; | 4297 | break; |
4298 | if (found_key.type == type) | ||
4299 | return 0; | ||
4285 | if (found_key.objectid == min_objectid && | 4300 | if (found_key.objectid == min_objectid && |
4286 | found_key.type < type) | 4301 | found_key.type < type) |
4287 | break; | 4302 | break; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 98a873838717..215ef8cae823 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -481,7 +481,7 @@ struct btrfs_shared_data_ref { | |||
481 | 481 | ||
482 | struct btrfs_extent_inline_ref { | 482 | struct btrfs_extent_inline_ref { |
483 | u8 type; | 483 | u8 type; |
484 | u64 offset; | 484 | __le64 offset; |
485 | } __attribute__ ((__packed__)); | 485 | } __attribute__ ((__packed__)); |
486 | 486 | ||
487 | /* old style backrefs item */ | 487 | /* old style backrefs item */ |
@@ -689,6 +689,7 @@ struct btrfs_space_info { | |||
689 | struct list_head block_groups; | 689 | struct list_head block_groups; |
690 | spinlock_t lock; | 690 | spinlock_t lock; |
691 | struct rw_semaphore groups_sem; | 691 | struct rw_semaphore groups_sem; |
692 | atomic_t caching_threads; | ||
692 | }; | 693 | }; |
693 | 694 | ||
694 | /* | 695 | /* |
@@ -707,6 +708,9 @@ struct btrfs_free_cluster { | |||
707 | /* first extent starting offset */ | 708 | /* first extent starting offset */ |
708 | u64 window_start; | 709 | u64 window_start; |
709 | 710 | ||
711 | /* if this cluster simply points at a bitmap in the block group */ | ||
712 | bool points_to_bitmap; | ||
713 | |||
710 | struct btrfs_block_group_cache *block_group; | 714 | struct btrfs_block_group_cache *block_group; |
711 | /* | 715 | /* |
712 | * when a cluster is allocated from a block group, we put the | 716 | * when a cluster is allocated from a block group, we put the |
@@ -716,24 +720,37 @@ struct btrfs_free_cluster { | |||
716 | struct list_head block_group_list; | 720 | struct list_head block_group_list; |
717 | }; | 721 | }; |
718 | 722 | ||
723 | enum btrfs_caching_type { | ||
724 | BTRFS_CACHE_NO = 0, | ||
725 | BTRFS_CACHE_STARTED = 1, | ||
726 | BTRFS_CACHE_FINISHED = 2, | ||
727 | }; | ||
728 | |||
719 | struct btrfs_block_group_cache { | 729 | struct btrfs_block_group_cache { |
720 | struct btrfs_key key; | 730 | struct btrfs_key key; |
721 | struct btrfs_block_group_item item; | 731 | struct btrfs_block_group_item item; |
732 | struct btrfs_fs_info *fs_info; | ||
722 | spinlock_t lock; | 733 | spinlock_t lock; |
723 | struct mutex cache_mutex; | ||
724 | u64 pinned; | 734 | u64 pinned; |
725 | u64 reserved; | 735 | u64 reserved; |
726 | u64 flags; | 736 | u64 flags; |
727 | int cached; | 737 | u64 sectorsize; |
738 | int extents_thresh; | ||
739 | int free_extents; | ||
740 | int total_bitmaps; | ||
728 | int ro; | 741 | int ro; |
729 | int dirty; | 742 | int dirty; |
730 | 743 | ||
744 | /* cache tracking stuff */ | ||
745 | wait_queue_head_t caching_q; | ||
746 | int cached; | ||
747 | |||
731 | struct btrfs_space_info *space_info; | 748 | struct btrfs_space_info *space_info; |
732 | 749 | ||
733 | /* free space cache stuff */ | 750 | /* free space cache stuff */ |
734 | spinlock_t tree_lock; | 751 | spinlock_t tree_lock; |
735 | struct rb_root free_space_bytes; | ||
736 | struct rb_root free_space_offset; | 752 | struct rb_root free_space_offset; |
753 | u64 free_space; | ||
737 | 754 | ||
738 | /* block group cache stuff */ | 755 | /* block group cache stuff */ |
739 | struct rb_node cache_node; | 756 | struct rb_node cache_node; |
@@ -942,6 +959,9 @@ struct btrfs_root { | |||
942 | /* the node lock is held while changing the node pointer */ | 959 | /* the node lock is held while changing the node pointer */ |
943 | spinlock_t node_lock; | 960 | spinlock_t node_lock; |
944 | 961 | ||
962 | /* taken when updating the commit root */ | ||
963 | struct rw_semaphore commit_root_sem; | ||
964 | |||
945 | struct extent_buffer *commit_root; | 965 | struct extent_buffer *commit_root; |
946 | struct btrfs_root *log_root; | 966 | struct btrfs_root *log_root; |
947 | struct btrfs_root *reloc_root; | 967 | struct btrfs_root *reloc_root; |
@@ -1988,6 +2008,7 @@ void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, | |||
1988 | u64 bytes); | 2008 | u64 bytes); |
1989 | void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, | 2009 | void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, |
1990 | u64 bytes); | 2010 | u64 bytes); |
2011 | void btrfs_free_pinned_extents(struct btrfs_fs_info *info); | ||
1991 | /* ctree.c */ | 2012 | /* ctree.c */ |
1992 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2013 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
1993 | int level, int *slot); | 2014 | int level, int *slot); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d28d29c95f7c..7dcaa8138864 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -909,6 +909,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
909 | spin_lock_init(&root->inode_lock); | 909 | spin_lock_init(&root->inode_lock); |
910 | mutex_init(&root->objectid_mutex); | 910 | mutex_init(&root->objectid_mutex); |
911 | mutex_init(&root->log_mutex); | 911 | mutex_init(&root->log_mutex); |
912 | init_rwsem(&root->commit_root_sem); | ||
912 | init_waitqueue_head(&root->log_writer_wait); | 913 | init_waitqueue_head(&root->log_writer_wait); |
913 | init_waitqueue_head(&root->log_commit_wait[0]); | 914 | init_waitqueue_head(&root->log_commit_wait[0]); |
914 | init_waitqueue_head(&root->log_commit_wait[1]); | 915 | init_waitqueue_head(&root->log_commit_wait[1]); |
@@ -1799,6 +1800,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1799 | btrfs_super_chunk_root(disk_super), | 1800 | btrfs_super_chunk_root(disk_super), |
1800 | blocksize, generation); | 1801 | blocksize, generation); |
1801 | BUG_ON(!chunk_root->node); | 1802 | BUG_ON(!chunk_root->node); |
1803 | if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { | ||
1804 | printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", | ||
1805 | sb->s_id); | ||
1806 | goto fail_chunk_root; | ||
1807 | } | ||
1802 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); | 1808 | btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); |
1803 | chunk_root->commit_root = btrfs_root_node(chunk_root); | 1809 | chunk_root->commit_root = btrfs_root_node(chunk_root); |
1804 | 1810 | ||
@@ -1826,6 +1832,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1826 | blocksize, generation); | 1832 | blocksize, generation); |
1827 | if (!tree_root->node) | 1833 | if (!tree_root->node) |
1828 | goto fail_chunk_root; | 1834 | goto fail_chunk_root; |
1835 | if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { | ||
1836 | printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", | ||
1837 | sb->s_id); | ||
1838 | goto fail_tree_root; | ||
1839 | } | ||
1829 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); | 1840 | btrfs_set_root_node(&tree_root->root_item, tree_root->node); |
1830 | tree_root->commit_root = btrfs_root_node(tree_root); | 1841 | tree_root->commit_root = btrfs_root_node(tree_root); |
1831 | 1842 | ||
@@ -2322,6 +2333,9 @@ int close_ctree(struct btrfs_root *root) | |||
2322 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); | 2333 | printk(KERN_ERR "btrfs: commit super ret %d\n", ret); |
2323 | } | 2334 | } |
2324 | 2335 | ||
2336 | fs_info->closing = 2; | ||
2337 | smp_mb(); | ||
2338 | |||
2325 | if (fs_info->delalloc_bytes) { | 2339 | if (fs_info->delalloc_bytes) { |
2326 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", | 2340 | printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", |
2327 | (unsigned long long)fs_info->delalloc_bytes); | 2341 | (unsigned long long)fs_info->delalloc_bytes); |
@@ -2343,6 +2357,7 @@ int close_ctree(struct btrfs_root *root) | |||
2343 | free_extent_buffer(root->fs_info->csum_root->commit_root); | 2357 | free_extent_buffer(root->fs_info->csum_root->commit_root); |
2344 | 2358 | ||
2345 | btrfs_free_block_groups(root->fs_info); | 2359 | btrfs_free_block_groups(root->fs_info); |
2360 | btrfs_free_pinned_extents(root->fs_info); | ||
2346 | 2361 | ||
2347 | del_fs_roots(fs_info); | 2362 | del_fs_roots(fs_info); |
2348 | 2363 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a5aca3997d42..fadf69a2764b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/blkdev.h> | 21 | #include <linux/blkdev.h> |
22 | #include <linux/sort.h> | 22 | #include <linux/sort.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/kthread.h> | ||
24 | #include "compat.h" | 25 | #include "compat.h" |
25 | #include "hash.h" | 26 | #include "hash.h" |
26 | #include "ctree.h" | 27 | #include "ctree.h" |
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |||
61 | struct btrfs_root *extent_root, u64 alloc_bytes, | 62 | struct btrfs_root *extent_root, u64 alloc_bytes, |
62 | u64 flags, int force); | 63 | u64 flags, int force); |
63 | 64 | ||
65 | static noinline int | ||
66 | block_group_cache_done(struct btrfs_block_group_cache *cache) | ||
67 | { | ||
68 | smp_mb(); | ||
69 | return cache->cached == BTRFS_CACHE_FINISHED; | ||
70 | } | ||
71 | |||
64 | static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | 72 | static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) |
65 | { | 73 | { |
66 | return (cache->flags & bits) == bits; | 74 | return (cache->flags & bits) == bits; |
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, | |||
146 | } | 154 | } |
147 | 155 | ||
148 | /* | 156 | /* |
157 | * We always set EXTENT_LOCKED for the super mirror extents so we don't | ||
158 | * overwrite them, so those bits need to be unset. Also, if we are unmounting | ||
159 | * with pinned extents still sitting there because we had a block group caching, | ||
160 | * we need to clear those now, since we are done. | ||
161 | */ | ||
162 | void btrfs_free_pinned_extents(struct btrfs_fs_info *info) | ||
163 | { | ||
164 | u64 start, end, last = 0; | ||
165 | int ret; | ||
166 | |||
167 | while (1) { | ||
168 | ret = find_first_extent_bit(&info->pinned_extents, last, | ||
169 | &start, &end, | ||
170 | EXTENT_LOCKED|EXTENT_DIRTY); | ||
171 | if (ret) | ||
172 | break; | ||
173 | |||
174 | clear_extent_bits(&info->pinned_extents, start, end, | ||
175 | EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS); | ||
176 | last = end+1; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static int remove_sb_from_cache(struct btrfs_root *root, | ||
181 | struct btrfs_block_group_cache *cache) | ||
182 | { | ||
183 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
184 | u64 bytenr; | ||
185 | u64 *logical; | ||
186 | int stripe_len; | ||
187 | int i, nr, ret; | ||
188 | |||
189 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | ||
190 | bytenr = btrfs_sb_offset(i); | ||
191 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | ||
192 | cache->key.objectid, bytenr, | ||
193 | 0, &logical, &nr, &stripe_len); | ||
194 | BUG_ON(ret); | ||
195 | while (nr--) { | ||
196 | try_lock_extent(&fs_info->pinned_extents, | ||
197 | logical[nr], | ||
198 | logical[nr] + stripe_len - 1, GFP_NOFS); | ||
199 | } | ||
200 | kfree(logical); | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | /* | ||
149 | * this is only called by cache_block_group, since we could have freed extents | 207 | * this is only called by cache_block_group, since we could have freed extents |
150 | * we need to check the pinned_extents for any extents that can't be used yet | 208 | * we need to check the pinned_extents for any extents that can't be used yet |
151 | * since their free space will be released as soon as the transaction commits. | 209 | * since their free space will be released as soon as the transaction commits. |
152 | */ | 210 | */ |
153 | static int add_new_free_space(struct btrfs_block_group_cache *block_group, | 211 | static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, |
154 | struct btrfs_fs_info *info, u64 start, u64 end) | 212 | struct btrfs_fs_info *info, u64 start, u64 end) |
155 | { | 213 | { |
156 | u64 extent_start, extent_end, size; | 214 | u64 extent_start, extent_end, size, total_added = 0; |
157 | int ret; | 215 | int ret; |
158 | 216 | ||
159 | while (start < end) { | 217 | while (start < end) { |
160 | ret = find_first_extent_bit(&info->pinned_extents, start, | 218 | ret = find_first_extent_bit(&info->pinned_extents, start, |
161 | &extent_start, &extent_end, | 219 | &extent_start, &extent_end, |
162 | EXTENT_DIRTY); | 220 | EXTENT_DIRTY|EXTENT_LOCKED); |
163 | if (ret) | 221 | if (ret) |
164 | break; | 222 | break; |
165 | 223 | ||
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
167 | start = extent_end + 1; | 225 | start = extent_end + 1; |
168 | } else if (extent_start > start && extent_start < end) { | 226 | } else if (extent_start > start && extent_start < end) { |
169 | size = extent_start - start; | 227 | size = extent_start - start; |
228 | total_added += size; | ||
170 | ret = btrfs_add_free_space(block_group, start, | 229 | ret = btrfs_add_free_space(block_group, start, |
171 | size); | 230 | size); |
172 | BUG_ON(ret); | 231 | BUG_ON(ret); |
@@ -178,84 +237,79 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, | |||
178 | 237 | ||
179 | if (start < end) { | 238 | if (start < end) { |
180 | size = end - start; | 239 | size = end - start; |
240 | total_added += size; | ||
181 | ret = btrfs_add_free_space(block_group, start, size); | 241 | ret = btrfs_add_free_space(block_group, start, size); |
182 | BUG_ON(ret); | 242 | BUG_ON(ret); |
183 | } | 243 | } |
184 | 244 | ||
185 | return 0; | 245 | return total_added; |
186 | } | 246 | } |
187 | 247 | ||
188 | static int remove_sb_from_cache(struct btrfs_root *root, | 248 | static int caching_kthread(void *data) |
189 | struct btrfs_block_group_cache *cache) | ||
190 | { | ||
191 | u64 bytenr; | ||
192 | u64 *logical; | ||
193 | int stripe_len; | ||
194 | int i, nr, ret; | ||
195 | |||
196 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | ||
197 | bytenr = btrfs_sb_offset(i); | ||
198 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | ||
199 | cache->key.objectid, bytenr, 0, | ||
200 | &logical, &nr, &stripe_len); | ||
201 | BUG_ON(ret); | ||
202 | while (nr--) { | ||
203 | btrfs_remove_free_space(cache, logical[nr], | ||
204 | stripe_len); | ||
205 | } | ||
206 | kfree(logical); | ||
207 | } | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int cache_block_group(struct btrfs_root *root, | ||
212 | struct btrfs_block_group_cache *block_group) | ||
213 | { | 249 | { |
250 | struct btrfs_block_group_cache *block_group = data; | ||
251 | struct btrfs_fs_info *fs_info = block_group->fs_info; | ||
252 | u64 last = 0; | ||
214 | struct btrfs_path *path; | 253 | struct btrfs_path *path; |
215 | int ret = 0; | 254 | int ret = 0; |
216 | struct btrfs_key key; | 255 | struct btrfs_key key; |
217 | struct extent_buffer *leaf; | 256 | struct extent_buffer *leaf; |
218 | int slot; | 257 | int slot; |
219 | u64 last; | 258 | u64 total_found = 0; |
220 | |||
221 | if (!block_group) | ||
222 | return 0; | ||
223 | 259 | ||
224 | root = root->fs_info->extent_root; | 260 | BUG_ON(!fs_info); |
225 | |||
226 | if (block_group->cached) | ||
227 | return 0; | ||
228 | 261 | ||
229 | path = btrfs_alloc_path(); | 262 | path = btrfs_alloc_path(); |
230 | if (!path) | 263 | if (!path) |
231 | return -ENOMEM; | 264 | return -ENOMEM; |
232 | 265 | ||
233 | path->reada = 2; | 266 | atomic_inc(&block_group->space_info->caching_threads); |
267 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | ||
268 | again: | ||
269 | /* need to make sure the commit_root doesn't disappear */ | ||
270 | down_read(&fs_info->extent_root->commit_root_sem); | ||
271 | |||
234 | /* | 272 | /* |
235 | * we get into deadlocks with paths held by callers of this function. | 273 | * We don't want to deadlock with somebody trying to allocate a new |
236 | * since the alloc_mutex is protecting things right now, just | 274 | * extent for the extent root while also trying to search the extent |
237 | * skip the locking here | 275 | * root to add free space. So we skip locking and search the commit |
276 | * root, since its read-only | ||
238 | */ | 277 | */ |
239 | path->skip_locking = 1; | 278 | path->skip_locking = 1; |
240 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | 279 | path->search_commit_root = 1; |
280 | path->reada = 2; | ||
281 | |||
241 | key.objectid = last; | 282 | key.objectid = last; |
242 | key.offset = 0; | 283 | key.offset = 0; |
243 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); | 284 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); |
244 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 285 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); |
245 | if (ret < 0) | 286 | if (ret < 0) |
246 | goto err; | 287 | goto err; |
247 | 288 | ||
248 | while (1) { | 289 | while (1) { |
290 | smp_mb(); | ||
291 | if (block_group->fs_info->closing > 1) { | ||
292 | last = (u64)-1; | ||
293 | break; | ||
294 | } | ||
295 | |||
249 | leaf = path->nodes[0]; | 296 | leaf = path->nodes[0]; |
250 | slot = path->slots[0]; | 297 | slot = path->slots[0]; |
251 | if (slot >= btrfs_header_nritems(leaf)) { | 298 | if (slot >= btrfs_header_nritems(leaf)) { |
252 | ret = btrfs_next_leaf(root, path); | 299 | ret = btrfs_next_leaf(fs_info->extent_root, path); |
253 | if (ret < 0) | 300 | if (ret < 0) |
254 | goto err; | 301 | goto err; |
255 | if (ret == 0) | 302 | else if (ret) |
256 | continue; | ||
257 | else | ||
258 | break; | 303 | break; |
304 | |||
305 | if (need_resched()) { | ||
306 | btrfs_release_path(fs_info->extent_root, path); | ||
307 | up_read(&fs_info->extent_root->commit_root_sem); | ||
308 | cond_resched(); | ||
309 | goto again; | ||
310 | } | ||
311 | |||
312 | continue; | ||
259 | } | 313 | } |
260 | btrfs_item_key_to_cpu(leaf, &key, slot); | 314 | btrfs_item_key_to_cpu(leaf, &key, slot); |
261 | if (key.objectid < block_group->key.objectid) | 315 | if (key.objectid < block_group->key.objectid) |
@@ -266,24 +320,59 @@ static int cache_block_group(struct btrfs_root *root, | |||
266 | break; | 320 | break; |
267 | 321 | ||
268 | if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { | 322 | if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { |
269 | add_new_free_space(block_group, root->fs_info, last, | 323 | total_found += add_new_free_space(block_group, |
270 | key.objectid); | 324 | fs_info, last, |
271 | 325 | key.objectid); | |
272 | last = key.objectid + key.offset; | 326 | last = key.objectid + key.offset; |
273 | } | 327 | } |
328 | |||
329 | if (total_found > (1024 * 1024 * 2)) { | ||
330 | total_found = 0; | ||
331 | wake_up(&block_group->caching_q); | ||
332 | } | ||
274 | next: | 333 | next: |
275 | path->slots[0]++; | 334 | path->slots[0]++; |
276 | } | 335 | } |
336 | ret = 0; | ||
277 | 337 | ||
278 | add_new_free_space(block_group, root->fs_info, last, | 338 | total_found += add_new_free_space(block_group, fs_info, last, |
279 | block_group->key.objectid + | 339 | block_group->key.objectid + |
280 | block_group->key.offset); | 340 | block_group->key.offset); |
341 | |||
342 | spin_lock(&block_group->lock); | ||
343 | block_group->cached = BTRFS_CACHE_FINISHED; | ||
344 | spin_unlock(&block_group->lock); | ||
281 | 345 | ||
282 | block_group->cached = 1; | ||
283 | remove_sb_from_cache(root, block_group); | ||
284 | ret = 0; | ||
285 | err: | 346 | err: |
286 | btrfs_free_path(path); | 347 | btrfs_free_path(path); |
348 | up_read(&fs_info->extent_root->commit_root_sem); | ||
349 | atomic_dec(&block_group->space_info->caching_threads); | ||
350 | wake_up(&block_group->caching_q); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int cache_block_group(struct btrfs_block_group_cache *cache) | ||
356 | { | ||
357 | struct task_struct *tsk; | ||
358 | int ret = 0; | ||
359 | |||
360 | spin_lock(&cache->lock); | ||
361 | if (cache->cached != BTRFS_CACHE_NO) { | ||
362 | spin_unlock(&cache->lock); | ||
363 | return ret; | ||
364 | } | ||
365 | cache->cached = BTRFS_CACHE_STARTED; | ||
366 | spin_unlock(&cache->lock); | ||
367 | |||
368 | tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", | ||
369 | cache->key.objectid); | ||
370 | if (IS_ERR(tsk)) { | ||
371 | ret = PTR_ERR(tsk); | ||
372 | printk(KERN_ERR "error running thread %d\n", ret); | ||
373 | BUG(); | ||
374 | } | ||
375 | |||
287 | return ret; | 376 | return ret; |
288 | } | 377 | } |
289 | 378 | ||
@@ -2387,13 +2476,29 @@ fail: | |||
2387 | 2476 | ||
2388 | } | 2477 | } |
2389 | 2478 | ||
2479 | static struct btrfs_block_group_cache * | ||
2480 | next_block_group(struct btrfs_root *root, | ||
2481 | struct btrfs_block_group_cache *cache) | ||
2482 | { | ||
2483 | struct rb_node *node; | ||
2484 | spin_lock(&root->fs_info->block_group_cache_lock); | ||
2485 | node = rb_next(&cache->cache_node); | ||
2486 | btrfs_put_block_group(cache); | ||
2487 | if (node) { | ||
2488 | cache = rb_entry(node, struct btrfs_block_group_cache, | ||
2489 | cache_node); | ||
2490 | atomic_inc(&cache->count); | ||
2491 | } else | ||
2492 | cache = NULL; | ||
2493 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2494 | return cache; | ||
2495 | } | ||
2496 | |||
2390 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 2497 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
2391 | struct btrfs_root *root) | 2498 | struct btrfs_root *root) |
2392 | { | 2499 | { |
2393 | struct btrfs_block_group_cache *cache, *entry; | 2500 | struct btrfs_block_group_cache *cache; |
2394 | struct rb_node *n; | ||
2395 | int err = 0; | 2501 | int err = 0; |
2396 | int werr = 0; | ||
2397 | struct btrfs_path *path; | 2502 | struct btrfs_path *path; |
2398 | u64 last = 0; | 2503 | u64 last = 0; |
2399 | 2504 | ||
@@ -2402,39 +2507,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | |||
2402 | return -ENOMEM; | 2507 | return -ENOMEM; |
2403 | 2508 | ||
2404 | while (1) { | 2509 | while (1) { |
2405 | cache = NULL; | 2510 | if (last == 0) { |
2406 | spin_lock(&root->fs_info->block_group_cache_lock); | 2511 | err = btrfs_run_delayed_refs(trans, root, |
2407 | for (n = rb_first(&root->fs_info->block_group_cache_tree); | 2512 | (unsigned long)-1); |
2408 | n; n = rb_next(n)) { | 2513 | BUG_ON(err); |
2409 | entry = rb_entry(n, struct btrfs_block_group_cache, | ||
2410 | cache_node); | ||
2411 | if (entry->dirty) { | ||
2412 | cache = entry; | ||
2413 | break; | ||
2414 | } | ||
2415 | } | 2514 | } |
2416 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2417 | 2515 | ||
2418 | if (!cache) | 2516 | cache = btrfs_lookup_first_block_group(root->fs_info, last); |
2419 | break; | 2517 | while (cache) { |
2518 | if (cache->dirty) | ||
2519 | break; | ||
2520 | cache = next_block_group(root, cache); | ||
2521 | } | ||
2522 | if (!cache) { | ||
2523 | if (last == 0) | ||
2524 | break; | ||
2525 | last = 0; | ||
2526 | continue; | ||
2527 | } | ||
2420 | 2528 | ||
2421 | cache->dirty = 0; | 2529 | cache->dirty = 0; |
2422 | last += cache->key.offset; | 2530 | last = cache->key.objectid + cache->key.offset; |
2423 | 2531 | ||
2424 | err = write_one_cache_group(trans, root, | 2532 | err = write_one_cache_group(trans, root, path, cache); |
2425 | path, cache); | 2533 | BUG_ON(err); |
2426 | /* | 2534 | btrfs_put_block_group(cache); |
2427 | * if we fail to write the cache group, we want | ||
2428 | * to keep it marked dirty in hopes that a later | ||
2429 | * write will work | ||
2430 | */ | ||
2431 | if (err) { | ||
2432 | werr = err; | ||
2433 | continue; | ||
2434 | } | ||
2435 | } | 2535 | } |
2536 | |||
2436 | btrfs_free_path(path); | 2537 | btrfs_free_path(path); |
2437 | return werr; | 2538 | return 0; |
2438 | } | 2539 | } |
2439 | 2540 | ||
2440 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) | 2541 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) |
@@ -2484,6 +2585,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |||
2484 | found->force_alloc = 0; | 2585 | found->force_alloc = 0; |
2485 | *space_info = found; | 2586 | *space_info = found; |
2486 | list_add_rcu(&found->list, &info->space_info); | 2587 | list_add_rcu(&found->list, &info->space_info); |
2588 | atomic_set(&found->caching_threads, 0); | ||
2487 | return 0; | 2589 | return 0; |
2488 | } | 2590 | } |
2489 | 2591 | ||
@@ -2947,13 +3049,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, | |||
2947 | struct btrfs_block_group_cache *cache; | 3049 | struct btrfs_block_group_cache *cache; |
2948 | struct btrfs_fs_info *fs_info = root->fs_info; | 3050 | struct btrfs_fs_info *fs_info = root->fs_info; |
2949 | 3051 | ||
2950 | if (pin) { | 3052 | if (pin) |
2951 | set_extent_dirty(&fs_info->pinned_extents, | 3053 | set_extent_dirty(&fs_info->pinned_extents, |
2952 | bytenr, bytenr + num - 1, GFP_NOFS); | 3054 | bytenr, bytenr + num - 1, GFP_NOFS); |
2953 | } else { | ||
2954 | clear_extent_dirty(&fs_info->pinned_extents, | ||
2955 | bytenr, bytenr + num - 1, GFP_NOFS); | ||
2956 | } | ||
2957 | 3055 | ||
2958 | while (num > 0) { | 3056 | while (num > 0) { |
2959 | cache = btrfs_lookup_block_group(fs_info, bytenr); | 3057 | cache = btrfs_lookup_block_group(fs_info, bytenr); |
@@ -2969,14 +3067,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, | |||
2969 | spin_unlock(&cache->space_info->lock); | 3067 | spin_unlock(&cache->space_info->lock); |
2970 | fs_info->total_pinned += len; | 3068 | fs_info->total_pinned += len; |
2971 | } else { | 3069 | } else { |
3070 | int unpin = 0; | ||
3071 | |||
3072 | /* | ||
3073 | * in order to not race with the block group caching, we | ||
3074 | * only want to unpin the extent if we are cached. If | ||
3075 | * we aren't cached, we want to start async caching this | ||
3076 | * block group so we can free the extent the next time | ||
3077 | * around. | ||
3078 | */ | ||
2972 | spin_lock(&cache->space_info->lock); | 3079 | spin_lock(&cache->space_info->lock); |
2973 | spin_lock(&cache->lock); | 3080 | spin_lock(&cache->lock); |
2974 | cache->pinned -= len; | 3081 | unpin = (cache->cached == BTRFS_CACHE_FINISHED); |
2975 | cache->space_info->bytes_pinned -= len; | 3082 | if (likely(unpin)) { |
3083 | cache->pinned -= len; | ||
3084 | cache->space_info->bytes_pinned -= len; | ||
3085 | fs_info->total_pinned -= len; | ||
3086 | } | ||
2976 | spin_unlock(&cache->lock); | 3087 | spin_unlock(&cache->lock); |
2977 | spin_unlock(&cache->space_info->lock); | 3088 | spin_unlock(&cache->space_info->lock); |
2978 | fs_info->total_pinned -= len; | 3089 | |
2979 | if (cache->cached) | 3090 | if (likely(unpin)) |
3091 | clear_extent_dirty(&fs_info->pinned_extents, | ||
3092 | bytenr, bytenr + len -1, | ||
3093 | GFP_NOFS); | ||
3094 | else | ||
3095 | cache_block_group(cache); | ||
3096 | |||
3097 | if (unpin) | ||
2980 | btrfs_add_free_space(cache, bytenr, len); | 3098 | btrfs_add_free_space(cache, bytenr, len); |
2981 | } | 3099 | } |
2982 | btrfs_put_block_group(cache); | 3100 | btrfs_put_block_group(cache); |
@@ -3030,6 +3148,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) | |||
3030 | &start, &end, EXTENT_DIRTY); | 3148 | &start, &end, EXTENT_DIRTY); |
3031 | if (ret) | 3149 | if (ret) |
3032 | break; | 3150 | break; |
3151 | |||
3033 | set_extent_dirty(copy, start, end, GFP_NOFS); | 3152 | set_extent_dirty(copy, start, end, GFP_NOFS); |
3034 | last = end + 1; | 3153 | last = end + 1; |
3035 | } | 3154 | } |
@@ -3058,6 +3177,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |||
3058 | 3177 | ||
3059 | cond_resched(); | 3178 | cond_resched(); |
3060 | } | 3179 | } |
3180 | |||
3061 | return ret; | 3181 | return ret; |
3062 | } | 3182 | } |
3063 | 3183 | ||
@@ -3436,6 +3556,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) | |||
3436 | } | 3556 | } |
3437 | 3557 | ||
3438 | /* | 3558 | /* |
3559 | * when we wait for progress in the block group caching, its because | ||
3560 | * our allocation attempt failed at least once. So, we must sleep | ||
3561 | * and let some progress happen before we try again. | ||
3562 | * | ||
3563 | * This function will sleep at least once waiting for new free space to | ||
3564 | * show up, and then it will check the block group free space numbers | ||
3565 | * for our min num_bytes. Another option is to have it go ahead | ||
3566 | * and look in the rbtree for a free extent of a given size, but this | ||
3567 | * is a good start. | ||
3568 | */ | ||
3569 | static noinline int | ||
3570 | wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, | ||
3571 | u64 num_bytes) | ||
3572 | { | ||
3573 | DEFINE_WAIT(wait); | ||
3574 | |||
3575 | prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE); | ||
3576 | |||
3577 | if (block_group_cache_done(cache)) { | ||
3578 | finish_wait(&cache->caching_q, &wait); | ||
3579 | return 0; | ||
3580 | } | ||
3581 | schedule(); | ||
3582 | finish_wait(&cache->caching_q, &wait); | ||
3583 | |||
3584 | wait_event(cache->caching_q, block_group_cache_done(cache) || | ||
3585 | (cache->free_space >= num_bytes)); | ||
3586 | return 0; | ||
3587 | } | ||
3588 | |||
3589 | enum btrfs_loop_type { | ||
3590 | LOOP_CACHED_ONLY = 0, | ||
3591 | LOOP_CACHING_NOWAIT = 1, | ||
3592 | LOOP_CACHING_WAIT = 2, | ||
3593 | LOOP_ALLOC_CHUNK = 3, | ||
3594 | LOOP_NO_EMPTY_SIZE = 4, | ||
3595 | }; | ||
3596 | |||
3597 | /* | ||
3439 | * walks the btree of allocated extents and find a hole of a given size. | 3598 | * walks the btree of allocated extents and find a hole of a given size. |
3440 | * The key ins is changed to record the hole: | 3599 | * The key ins is changed to record the hole: |
3441 | * ins->objectid == block start | 3600 | * ins->objectid == block start |
@@ -3460,6 +3619,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
3460 | struct btrfs_space_info *space_info; | 3619 | struct btrfs_space_info *space_info; |
3461 | int last_ptr_loop = 0; | 3620 | int last_ptr_loop = 0; |
3462 | int loop = 0; | 3621 | int loop = 0; |
3622 | bool found_uncached_bg = false; | ||
3463 | 3623 | ||
3464 | WARN_ON(num_bytes < root->sectorsize); | 3624 | WARN_ON(num_bytes < root->sectorsize); |
3465 | btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); | 3625 | btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); |
@@ -3491,15 +3651,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
3491 | search_start = max(search_start, first_logical_byte(root, 0)); | 3651 | search_start = max(search_start, first_logical_byte(root, 0)); |
3492 | search_start = max(search_start, hint_byte); | 3652 | search_start = max(search_start, hint_byte); |
3493 | 3653 | ||
3494 | if (!last_ptr) { | 3654 | if (!last_ptr) |
3495 | empty_cluster = 0; | 3655 | empty_cluster = 0; |
3496 | loop = 1; | ||
3497 | } | ||
3498 | 3656 | ||
3499 | if (search_start == hint_byte) { | 3657 | if (search_start == hint_byte) { |
3500 | block_group = btrfs_lookup_block_group(root->fs_info, | 3658 | block_group = btrfs_lookup_block_group(root->fs_info, |
3501 | search_start); | 3659 | search_start); |
3502 | if (block_group && block_group_bits(block_group, data)) { | 3660 | /* |
3661 | * we don't want to use the block group if it doesn't match our | ||
3662 | * allocation bits, or if its not cached. | ||
3663 | */ | ||
3664 | if (block_group && block_group_bits(block_group, data) && | ||
3665 | block_group_cache_done(block_group)) { | ||
3503 | down_read(&space_info->groups_sem); | 3666 | down_read(&space_info->groups_sem); |
3504 | if (list_empty(&block_group->list) || | 3667 | if (list_empty(&block_group->list) || |
3505 | block_group->ro) { | 3668 | block_group->ro) { |
@@ -3522,21 +3685,35 @@ search: | |||
3522 | down_read(&space_info->groups_sem); | 3685 | down_read(&space_info->groups_sem); |
3523 | list_for_each_entry(block_group, &space_info->block_groups, list) { | 3686 | list_for_each_entry(block_group, &space_info->block_groups, list) { |
3524 | u64 offset; | 3687 | u64 offset; |
3688 | int cached; | ||
3525 | 3689 | ||
3526 | atomic_inc(&block_group->count); | 3690 | atomic_inc(&block_group->count); |
3527 | search_start = block_group->key.objectid; | 3691 | search_start = block_group->key.objectid; |
3528 | 3692 | ||
3529 | have_block_group: | 3693 | have_block_group: |
3530 | if (unlikely(!block_group->cached)) { | 3694 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { |
3531 | mutex_lock(&block_group->cache_mutex); | 3695 | /* |
3532 | ret = cache_block_group(root, block_group); | 3696 | * we want to start caching kthreads, but not too many |
3533 | mutex_unlock(&block_group->cache_mutex); | 3697 | * right off the bat so we don't overwhelm the system, |
3534 | if (ret) { | 3698 | * so only start them if there are less than 2 and we're |
3535 | btrfs_put_block_group(block_group); | 3699 | * in the initial allocation phase. |
3536 | break; | 3700 | */ |
3701 | if (loop > LOOP_CACHING_NOWAIT || | ||
3702 | atomic_read(&space_info->caching_threads) < 2) { | ||
3703 | ret = cache_block_group(block_group); | ||
3704 | BUG_ON(ret); | ||
3537 | } | 3705 | } |
3538 | } | 3706 | } |
3539 | 3707 | ||
3708 | cached = block_group_cache_done(block_group); | ||
3709 | if (unlikely(!cached)) { | ||
3710 | found_uncached_bg = true; | ||
3711 | |||
3712 | /* if we only want cached bgs, loop */ | ||
3713 | if (loop == LOOP_CACHED_ONLY) | ||
3714 | goto loop; | ||
3715 | } | ||
3716 | |||
3540 | if (unlikely(block_group->ro)) | 3717 | if (unlikely(block_group->ro)) |
3541 | goto loop; | 3718 | goto loop; |
3542 | 3719 | ||
@@ -3615,14 +3792,21 @@ refill_cluster: | |||
3615 | spin_unlock(&last_ptr->refill_lock); | 3792 | spin_unlock(&last_ptr->refill_lock); |
3616 | goto checks; | 3793 | goto checks; |
3617 | } | 3794 | } |
3795 | } else if (!cached && loop > LOOP_CACHING_NOWAIT) { | ||
3796 | spin_unlock(&last_ptr->refill_lock); | ||
3797 | |||
3798 | wait_block_group_cache_progress(block_group, | ||
3799 | num_bytes + empty_cluster + empty_size); | ||
3800 | goto have_block_group; | ||
3618 | } | 3801 | } |
3802 | |||
3619 | /* | 3803 | /* |
3620 | * at this point we either didn't find a cluster | 3804 | * at this point we either didn't find a cluster |
3621 | * or we weren't able to allocate a block from our | 3805 | * or we weren't able to allocate a block from our |
3622 | * cluster. Free the cluster we've been trying | 3806 | * cluster. Free the cluster we've been trying |
3623 | * to use, and go to the next block group | 3807 | * to use, and go to the next block group |
3624 | */ | 3808 | */ |
3625 | if (loop < 2) { | 3809 | if (loop < LOOP_NO_EMPTY_SIZE) { |
3626 | btrfs_return_cluster_to_free_space(NULL, | 3810 | btrfs_return_cluster_to_free_space(NULL, |
3627 | last_ptr); | 3811 | last_ptr); |
3628 | spin_unlock(&last_ptr->refill_lock); | 3812 | spin_unlock(&last_ptr->refill_lock); |
@@ -3633,11 +3817,17 @@ refill_cluster: | |||
3633 | 3817 | ||
3634 | offset = btrfs_find_space_for_alloc(block_group, search_start, | 3818 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
3635 | num_bytes, empty_size); | 3819 | num_bytes, empty_size); |
3636 | if (!offset) | 3820 | if (!offset && (cached || (!cached && |
3821 | loop == LOOP_CACHING_NOWAIT))) { | ||
3637 | goto loop; | 3822 | goto loop; |
3823 | } else if (!offset && (!cached && | ||
3824 | loop > LOOP_CACHING_NOWAIT)) { | ||
3825 | wait_block_group_cache_progress(block_group, | ||
3826 | num_bytes + empty_size); | ||
3827 | goto have_block_group; | ||
3828 | } | ||
3638 | checks: | 3829 | checks: |
3639 | search_start = stripe_align(root, offset); | 3830 | search_start = stripe_align(root, offset); |
3640 | |||
3641 | /* move on to the next group */ | 3831 | /* move on to the next group */ |
3642 | if (search_start + num_bytes >= search_end) { | 3832 | if (search_start + num_bytes >= search_end) { |
3643 | btrfs_add_free_space(block_group, offset, num_bytes); | 3833 | btrfs_add_free_space(block_group, offset, num_bytes); |
@@ -3683,13 +3873,26 @@ loop: | |||
3683 | } | 3873 | } |
3684 | up_read(&space_info->groups_sem); | 3874 | up_read(&space_info->groups_sem); |
3685 | 3875 | ||
3686 | /* loop == 0, try to find a clustered alloc in every block group | 3876 | /* LOOP_CACHED_ONLY, only search fully cached block groups |
3687 | * loop == 1, try again after forcing a chunk allocation | 3877 | * LOOP_CACHING_NOWAIT, search partially cached block groups, but |
3688 | * loop == 2, set empty_size and empty_cluster to 0 and try again | 3878 | * dont wait foR them to finish caching |
3879 | * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching | ||
3880 | * LOOP_ALLOC_CHUNK, force a chunk allocation and try again | ||
3881 | * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try | ||
3882 | * again | ||
3689 | */ | 3883 | */ |
3690 | if (!ins->objectid && loop < 3 && | 3884 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && |
3691 | (empty_size || empty_cluster || allowed_chunk_alloc)) { | 3885 | (found_uncached_bg || empty_size || empty_cluster || |
3692 | if (loop >= 2) { | 3886 | allowed_chunk_alloc)) { |
3887 | if (found_uncached_bg) { | ||
3888 | found_uncached_bg = false; | ||
3889 | if (loop < LOOP_CACHING_WAIT) { | ||
3890 | loop++; | ||
3891 | goto search; | ||
3892 | } | ||
3893 | } | ||
3894 | |||
3895 | if (loop == LOOP_ALLOC_CHUNK) { | ||
3693 | empty_size = 0; | 3896 | empty_size = 0; |
3694 | empty_cluster = 0; | 3897 | empty_cluster = 0; |
3695 | } | 3898 | } |
@@ -3702,7 +3905,7 @@ loop: | |||
3702 | space_info->force_alloc = 1; | 3905 | space_info->force_alloc = 1; |
3703 | } | 3906 | } |
3704 | 3907 | ||
3705 | if (loop < 3) { | 3908 | if (loop < LOOP_NO_EMPTY_SIZE) { |
3706 | loop++; | 3909 | loop++; |
3707 | goto search; | 3910 | goto search; |
3708 | } | 3911 | } |
@@ -3798,7 +4001,7 @@ again: | |||
3798 | num_bytes, data, 1); | 4001 | num_bytes, data, 1); |
3799 | goto again; | 4002 | goto again; |
3800 | } | 4003 | } |
3801 | if (ret) { | 4004 | if (ret == -ENOSPC) { |
3802 | struct btrfs_space_info *sinfo; | 4005 | struct btrfs_space_info *sinfo; |
3803 | 4006 | ||
3804 | sinfo = __find_space_info(root->fs_info, data); | 4007 | sinfo = __find_space_info(root->fs_info, data); |
@@ -3806,7 +4009,6 @@ again: | |||
3806 | "wanted %llu\n", (unsigned long long)data, | 4009 | "wanted %llu\n", (unsigned long long)data, |
3807 | (unsigned long long)num_bytes); | 4010 | (unsigned long long)num_bytes); |
3808 | dump_space_info(sinfo, num_bytes); | 4011 | dump_space_info(sinfo, num_bytes); |
3809 | BUG(); | ||
3810 | } | 4012 | } |
3811 | 4013 | ||
3812 | return ret; | 4014 | return ret; |
@@ -3844,7 +4046,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |||
3844 | ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, | 4046 | ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, |
3845 | empty_size, hint_byte, search_end, ins, | 4047 | empty_size, hint_byte, search_end, ins, |
3846 | data); | 4048 | data); |
3847 | update_reserved_extents(root, ins->objectid, ins->offset, 1); | 4049 | if (!ret) |
4050 | update_reserved_extents(root, ins->objectid, ins->offset, 1); | ||
4051 | |||
3848 | return ret; | 4052 | return ret; |
3849 | } | 4053 | } |
3850 | 4054 | ||
@@ -4006,9 +4210,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |||
4006 | struct btrfs_block_group_cache *block_group; | 4210 | struct btrfs_block_group_cache *block_group; |
4007 | 4211 | ||
4008 | block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); | 4212 | block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); |
4009 | mutex_lock(&block_group->cache_mutex); | 4213 | cache_block_group(block_group); |
4010 | cache_block_group(root, block_group); | 4214 | wait_event(block_group->caching_q, |
4011 | mutex_unlock(&block_group->cache_mutex); | 4215 | block_group_cache_done(block_group)); |
4012 | 4216 | ||
4013 | ret = btrfs_remove_free_space(block_group, ins->objectid, | 4217 | ret = btrfs_remove_free_space(block_group, ins->objectid, |
4014 | ins->offset); | 4218 | ins->offset); |
@@ -4039,7 +4243,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans, | |||
4039 | ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, | 4243 | ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, |
4040 | empty_size, hint_byte, search_end, | 4244 | empty_size, hint_byte, search_end, |
4041 | ins, 0); | 4245 | ins, 0); |
4042 | BUG_ON(ret); | 4246 | if (ret) |
4247 | return ret; | ||
4043 | 4248 | ||
4044 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | 4249 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
4045 | if (parent == 0) | 4250 | if (parent == 0) |
@@ -6955,11 +7160,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) | |||
6955 | &info->block_group_cache_tree); | 7160 | &info->block_group_cache_tree); |
6956 | spin_unlock(&info->block_group_cache_lock); | 7161 | spin_unlock(&info->block_group_cache_lock); |
6957 | 7162 | ||
6958 | btrfs_remove_free_space_cache(block_group); | ||
6959 | down_write(&block_group->space_info->groups_sem); | 7163 | down_write(&block_group->space_info->groups_sem); |
6960 | list_del(&block_group->list); | 7164 | list_del(&block_group->list); |
6961 | up_write(&block_group->space_info->groups_sem); | 7165 | up_write(&block_group->space_info->groups_sem); |
6962 | 7166 | ||
7167 | if (block_group->cached == BTRFS_CACHE_STARTED) | ||
7168 | wait_event(block_group->caching_q, | ||
7169 | block_group_cache_done(block_group)); | ||
7170 | |||
7171 | btrfs_remove_free_space_cache(block_group); | ||
7172 | |||
6963 | WARN_ON(atomic_read(&block_group->count) != 1); | 7173 | WARN_ON(atomic_read(&block_group->count) != 1); |
6964 | kfree(block_group); | 7174 | kfree(block_group); |
6965 | 7175 | ||
@@ -7025,9 +7235,19 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7025 | atomic_set(&cache->count, 1); | 7235 | atomic_set(&cache->count, 1); |
7026 | spin_lock_init(&cache->lock); | 7236 | spin_lock_init(&cache->lock); |
7027 | spin_lock_init(&cache->tree_lock); | 7237 | spin_lock_init(&cache->tree_lock); |
7028 | mutex_init(&cache->cache_mutex); | 7238 | cache->fs_info = info; |
7239 | init_waitqueue_head(&cache->caching_q); | ||
7029 | INIT_LIST_HEAD(&cache->list); | 7240 | INIT_LIST_HEAD(&cache->list); |
7030 | INIT_LIST_HEAD(&cache->cluster_list); | 7241 | INIT_LIST_HEAD(&cache->cluster_list); |
7242 | |||
7243 | /* | ||
7244 | * we only want to have 32k of ram per block group for keeping | ||
7245 | * track of free space, and if we pass 1/2 of that we want to | ||
7246 | * start converting things over to using bitmaps | ||
7247 | */ | ||
7248 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
7249 | sizeof(struct btrfs_free_space); | ||
7250 | |||
7031 | read_extent_buffer(leaf, &cache->item, | 7251 | read_extent_buffer(leaf, &cache->item, |
7032 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 7252 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
7033 | sizeof(cache->item)); | 7253 | sizeof(cache->item)); |
@@ -7036,6 +7256,26 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7036 | key.objectid = found_key.objectid + found_key.offset; | 7256 | key.objectid = found_key.objectid + found_key.offset; |
7037 | btrfs_release_path(root, path); | 7257 | btrfs_release_path(root, path); |
7038 | cache->flags = btrfs_block_group_flags(&cache->item); | 7258 | cache->flags = btrfs_block_group_flags(&cache->item); |
7259 | cache->sectorsize = root->sectorsize; | ||
7260 | |||
7261 | remove_sb_from_cache(root, cache); | ||
7262 | |||
7263 | /* | ||
7264 | * check for two cases, either we are full, and therefore | ||
7265 | * don't need to bother with the caching work since we won't | ||
7266 | * find any space, or we are empty, and we can just add all | ||
7267 | * the space in and be done with it. This saves us _alot_ of | ||
7268 | * time, particularly in the full case. | ||
7269 | */ | ||
7270 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { | ||
7271 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7272 | } else if (btrfs_block_group_used(&cache->item) == 0) { | ||
7273 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7274 | add_new_free_space(cache, root->fs_info, | ||
7275 | found_key.objectid, | ||
7276 | found_key.objectid + | ||
7277 | found_key.offset); | ||
7278 | } | ||
7039 | 7279 | ||
7040 | ret = update_space_info(info, cache->flags, found_key.offset, | 7280 | ret = update_space_info(info, cache->flags, found_key.offset, |
7041 | btrfs_block_group_used(&cache->item), | 7281 | btrfs_block_group_used(&cache->item), |
@@ -7079,10 +7319,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7079 | cache->key.objectid = chunk_offset; | 7319 | cache->key.objectid = chunk_offset; |
7080 | cache->key.offset = size; | 7320 | cache->key.offset = size; |
7081 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | 7321 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
7322 | cache->sectorsize = root->sectorsize; | ||
7323 | |||
7324 | /* | ||
7325 | * we only want to have 32k of ram per block group for keeping track | ||
7326 | * of free space, and if we pass 1/2 of that we want to start | ||
7327 | * converting things over to using bitmaps | ||
7328 | */ | ||
7329 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
7330 | sizeof(struct btrfs_free_space); | ||
7082 | atomic_set(&cache->count, 1); | 7331 | atomic_set(&cache->count, 1); |
7083 | spin_lock_init(&cache->lock); | 7332 | spin_lock_init(&cache->lock); |
7084 | spin_lock_init(&cache->tree_lock); | 7333 | spin_lock_init(&cache->tree_lock); |
7085 | mutex_init(&cache->cache_mutex); | 7334 | init_waitqueue_head(&cache->caching_q); |
7086 | INIT_LIST_HEAD(&cache->list); | 7335 | INIT_LIST_HEAD(&cache->list); |
7087 | INIT_LIST_HEAD(&cache->cluster_list); | 7336 | INIT_LIST_HEAD(&cache->cluster_list); |
7088 | 7337 | ||
@@ -7091,6 +7340,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7091 | cache->flags = type; | 7340 | cache->flags = type; |
7092 | btrfs_set_block_group_flags(&cache->item, type); | 7341 | btrfs_set_block_group_flags(&cache->item, type); |
7093 | 7342 | ||
7343 | cache->cached = BTRFS_CACHE_FINISHED; | ||
7344 | remove_sb_from_cache(root, cache); | ||
7345 | |||
7346 | add_new_free_space(cache, root->fs_info, chunk_offset, | ||
7347 | chunk_offset + size); | ||
7348 | |||
7094 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, | 7349 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, |
7095 | &cache->space_info); | 7350 | &cache->space_info); |
7096 | BUG_ON(ret); | 7351 | BUG_ON(ret); |
@@ -7149,7 +7404,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
7149 | rb_erase(&block_group->cache_node, | 7404 | rb_erase(&block_group->cache_node, |
7150 | &root->fs_info->block_group_cache_tree); | 7405 | &root->fs_info->block_group_cache_tree); |
7151 | spin_unlock(&root->fs_info->block_group_cache_lock); | 7406 | spin_unlock(&root->fs_info->block_group_cache_lock); |
7152 | btrfs_remove_free_space_cache(block_group); | 7407 | |
7153 | down_write(&block_group->space_info->groups_sem); | 7408 | down_write(&block_group->space_info->groups_sem); |
7154 | /* | 7409 | /* |
7155 | * we must use list_del_init so people can check to see if they | 7410 | * we must use list_del_init so people can check to see if they |
@@ -7158,11 +7413,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
7158 | list_del_init(&block_group->list); | 7413 | list_del_init(&block_group->list); |
7159 | up_write(&block_group->space_info->groups_sem); | 7414 | up_write(&block_group->space_info->groups_sem); |
7160 | 7415 | ||
7416 | if (block_group->cached == BTRFS_CACHE_STARTED) | ||
7417 | wait_event(block_group->caching_q, | ||
7418 | block_group_cache_done(block_group)); | ||
7419 | |||
7420 | btrfs_remove_free_space_cache(block_group); | ||
7421 | |||
7161 | spin_lock(&block_group->space_info->lock); | 7422 | spin_lock(&block_group->space_info->lock); |
7162 | block_group->space_info->total_bytes -= block_group->key.offset; | 7423 | block_group->space_info->total_bytes -= block_group->key.offset; |
7163 | block_group->space_info->bytes_readonly -= block_group->key.offset; | 7424 | block_group->space_info->bytes_readonly -= block_group->key.offset; |
7164 | spin_unlock(&block_group->space_info->lock); | 7425 | spin_unlock(&block_group->space_info->lock); |
7165 | block_group->space_info->full = 0; | 7426 | |
7427 | btrfs_clear_space_info_full(root->fs_info); | ||
7166 | 7428 | ||
7167 | btrfs_put_block_group(block_group); | 7429 | btrfs_put_block_group(block_group); |
7168 | btrfs_put_block_group(block_group); | 7430 | btrfs_put_block_group(block_group); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4538e48581a5..af99b78b288e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -16,45 +16,46 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/pagemap.h> | ||
19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/math64.h> | ||
20 | #include "ctree.h" | 22 | #include "ctree.h" |
21 | #include "free-space-cache.h" | 23 | #include "free-space-cache.h" |
22 | #include "transaction.h" | 24 | #include "transaction.h" |
23 | 25 | ||
24 | struct btrfs_free_space { | 26 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
25 | struct rb_node bytes_index; | 27 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
26 | struct rb_node offset_index; | ||
27 | u64 offset; | ||
28 | u64 bytes; | ||
29 | }; | ||
30 | 28 | ||
31 | static int tree_insert_offset(struct rb_root *root, u64 offset, | 29 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, |
32 | struct rb_node *node) | 30 | u64 offset) |
33 | { | 31 | { |
34 | struct rb_node **p = &root->rb_node; | 32 | BUG_ON(offset < bitmap_start); |
35 | struct rb_node *parent = NULL; | 33 | offset -= bitmap_start; |
36 | struct btrfs_free_space *info; | 34 | return (unsigned long)(div64_u64(offset, sectorsize)); |
35 | } | ||
37 | 36 | ||
38 | while (*p) { | 37 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) |
39 | parent = *p; | 38 | { |
40 | info = rb_entry(parent, struct btrfs_free_space, offset_index); | 39 | return (unsigned long)(div64_u64(bytes, sectorsize)); |
40 | } | ||
41 | 41 | ||
42 | if (offset < info->offset) | 42 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, |
43 | p = &(*p)->rb_left; | 43 | u64 offset) |
44 | else if (offset > info->offset) | 44 | { |
45 | p = &(*p)->rb_right; | 45 | u64 bitmap_start; |
46 | else | 46 | u64 bytes_per_bitmap; |
47 | return -EEXIST; | ||
48 | } | ||
49 | 47 | ||
50 | rb_link_node(node, parent, p); | 48 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; |
51 | rb_insert_color(node, root); | 49 | bitmap_start = offset - block_group->key.objectid; |
50 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | ||
51 | bitmap_start *= bytes_per_bitmap; | ||
52 | bitmap_start += block_group->key.objectid; | ||
52 | 53 | ||
53 | return 0; | 54 | return bitmap_start; |
54 | } | 55 | } |
55 | 56 | ||
56 | static int tree_insert_bytes(struct rb_root *root, u64 bytes, | 57 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
57 | struct rb_node *node) | 58 | struct rb_node *node, int bitmap) |
58 | { | 59 | { |
59 | struct rb_node **p = &root->rb_node; | 60 | struct rb_node **p = &root->rb_node; |
60 | struct rb_node *parent = NULL; | 61 | struct rb_node *parent = NULL; |
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, | |||
62 | 63 | ||
63 | while (*p) { | 64 | while (*p) { |
64 | parent = *p; | 65 | parent = *p; |
65 | info = rb_entry(parent, struct btrfs_free_space, bytes_index); | 66 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
66 | 67 | ||
67 | if (bytes < info->bytes) | 68 | if (offset < info->offset) { |
68 | p = &(*p)->rb_left; | 69 | p = &(*p)->rb_left; |
69 | else | 70 | } else if (offset > info->offset) { |
70 | p = &(*p)->rb_right; | 71 | p = &(*p)->rb_right; |
72 | } else { | ||
73 | /* | ||
74 | * we could have a bitmap entry and an extent entry | ||
75 | * share the same offset. If this is the case, we want | ||
76 | * the extent entry to always be found first if we do a | ||
77 | * linear search through the tree, since we want to have | ||
78 | * the quickest allocation time, and allocating from an | ||
79 | * extent is faster than allocating from a bitmap. So | ||
80 | * if we're inserting a bitmap and we find an entry at | ||
81 | * this offset, we want to go right, or after this entry | ||
82 | * logically. If we are inserting an extent and we've | ||
83 | * found a bitmap, we want to go left, or before | ||
84 | * logically. | ||
85 | */ | ||
86 | if (bitmap) { | ||
87 | WARN_ON(info->bitmap); | ||
88 | p = &(*p)->rb_right; | ||
89 | } else { | ||
90 | WARN_ON(!info->bitmap); | ||
91 | p = &(*p)->rb_left; | ||
92 | } | ||
93 | } | ||
71 | } | 94 | } |
72 | 95 | ||
73 | rb_link_node(node, parent, p); | 96 | rb_link_node(node, parent, p); |
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, | |||
79 | /* | 102 | /* |
80 | * searches the tree for the given offset. | 103 | * searches the tree for the given offset. |
81 | * | 104 | * |
82 | * fuzzy == 1: this is used for allocations where we are given a hint of where | 105 | * fuzzy - If this is set, then we are trying to make an allocation, and we just |
83 | * to look for free space. Because the hint may not be completely on an offset | 106 | * want a section that has at least bytes size and comes at or after the given |
84 | * mark, or the hint may no longer point to free space we need to fudge our | 107 | * offset. |
85 | * results a bit. So we look for free space starting at or after offset with at | ||
86 | * least bytes size. We prefer to find as close to the given offset as we can. | ||
87 | * Also if the offset is within a free space range, then we will return the free | ||
88 | * space that contains the given offset, which means we can return a free space | ||
89 | * chunk with an offset before the provided offset. | ||
90 | * | ||
91 | * fuzzy == 0: this is just a normal tree search. Give us the free space that | ||
92 | * starts at the given offset which is at least bytes size, and if its not there | ||
93 | * return NULL. | ||
94 | */ | 108 | */ |
95 | static struct btrfs_free_space *tree_search_offset(struct rb_root *root, | 109 | static struct btrfs_free_space * |
96 | u64 offset, u64 bytes, | 110 | tree_search_offset(struct btrfs_block_group_cache *block_group, |
97 | int fuzzy) | 111 | u64 offset, int bitmap_only, int fuzzy) |
98 | { | 112 | { |
99 | struct rb_node *n = root->rb_node; | 113 | struct rb_node *n = block_group->free_space_offset.rb_node; |
100 | struct btrfs_free_space *entry, *ret = NULL; | 114 | struct btrfs_free_space *entry, *prev = NULL; |
115 | |||
116 | /* find entry that is closest to the 'offset' */ | ||
117 | while (1) { | ||
118 | if (!n) { | ||
119 | entry = NULL; | ||
120 | break; | ||
121 | } | ||
101 | 122 | ||
102 | while (n) { | ||
103 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | 123 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
124 | prev = entry; | ||
104 | 125 | ||
105 | if (offset < entry->offset) { | 126 | if (offset < entry->offset) |
106 | if (fuzzy && | ||
107 | (!ret || entry->offset < ret->offset) && | ||
108 | (bytes <= entry->bytes)) | ||
109 | ret = entry; | ||
110 | n = n->rb_left; | 127 | n = n->rb_left; |
111 | } else if (offset > entry->offset) { | 128 | else if (offset > entry->offset) |
112 | if (fuzzy && | ||
113 | (entry->offset + entry->bytes - 1) >= offset && | ||
114 | bytes <= entry->bytes) { | ||
115 | ret = entry; | ||
116 | break; | ||
117 | } | ||
118 | n = n->rb_right; | 129 | n = n->rb_right; |
119 | } else { | 130 | else |
120 | if (bytes > entry->bytes) { | ||
121 | n = n->rb_right; | ||
122 | continue; | ||
123 | } | ||
124 | ret = entry; | ||
125 | break; | 131 | break; |
126 | } | ||
127 | } | 132 | } |
128 | 133 | ||
129 | return ret; | 134 | if (bitmap_only) { |
130 | } | 135 | if (!entry) |
131 | 136 | return NULL; | |
132 | /* | 137 | if (entry->bitmap) |
133 | * return a chunk at least bytes size, as close to offset that we can get. | 138 | return entry; |
134 | */ | ||
135 | static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, | ||
136 | u64 offset, u64 bytes) | ||
137 | { | ||
138 | struct rb_node *n = root->rb_node; | ||
139 | struct btrfs_free_space *entry, *ret = NULL; | ||
140 | 139 | ||
141 | while (n) { | 140 | /* |
142 | entry = rb_entry(n, struct btrfs_free_space, bytes_index); | 141 | * bitmap entry and extent entry may share same offset, |
142 | * in that case, bitmap entry comes after extent entry. | ||
143 | */ | ||
144 | n = rb_next(n); | ||
145 | if (!n) | ||
146 | return NULL; | ||
147 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | ||
148 | if (entry->offset != offset) | ||
149 | return NULL; | ||
143 | 150 | ||
144 | if (bytes < entry->bytes) { | 151 | WARN_ON(!entry->bitmap); |
152 | return entry; | ||
153 | } else if (entry) { | ||
154 | if (entry->bitmap) { | ||
145 | /* | 155 | /* |
146 | * We prefer to get a hole size as close to the size we | 156 | * if previous extent entry covers the offset, |
147 | * are asking for so we don't take small slivers out of | 157 | * we should return it instead of the bitmap entry |
148 | * huge holes, but we also want to get as close to the | ||
149 | * offset as possible so we don't have a whole lot of | ||
150 | * fragmentation. | ||
151 | */ | 158 | */ |
152 | if (offset <= entry->offset) { | 159 | n = &entry->offset_index; |
153 | if (!ret) | 160 | while (1) { |
154 | ret = entry; | 161 | n = rb_prev(n); |
155 | else if (entry->bytes < ret->bytes) | 162 | if (!n) |
156 | ret = entry; | 163 | break; |
157 | else if (entry->offset < ret->offset) | 164 | prev = rb_entry(n, struct btrfs_free_space, |
158 | ret = entry; | 165 | offset_index); |
166 | if (!prev->bitmap) { | ||
167 | if (prev->offset + prev->bytes > offset) | ||
168 | entry = prev; | ||
169 | break; | ||
170 | } | ||
159 | } | 171 | } |
160 | n = n->rb_left; | 172 | } |
161 | } else if (bytes > entry->bytes) { | 173 | return entry; |
162 | n = n->rb_right; | 174 | } |
175 | |||
176 | if (!prev) | ||
177 | return NULL; | ||
178 | |||
179 | /* find last entry before the 'offset' */ | ||
180 | entry = prev; | ||
181 | if (entry->offset > offset) { | ||
182 | n = rb_prev(&entry->offset_index); | ||
183 | if (n) { | ||
184 | entry = rb_entry(n, struct btrfs_free_space, | ||
185 | offset_index); | ||
186 | BUG_ON(entry->offset > offset); | ||
163 | } else { | 187 | } else { |
164 | /* | 188 | if (fuzzy) |
165 | * Ok we may have multiple chunks of the wanted size, | 189 | return entry; |
166 | * so we don't want to take the first one we find, we | 190 | else |
167 | * want to take the one closest to our given offset, so | 191 | return NULL; |
168 | * keep searching just in case theres a better match. | ||
169 | */ | ||
170 | n = n->rb_right; | ||
171 | if (offset > entry->offset) | ||
172 | continue; | ||
173 | else if (!ret || entry->offset < ret->offset) | ||
174 | ret = entry; | ||
175 | } | 192 | } |
176 | } | 193 | } |
177 | 194 | ||
178 | return ret; | 195 | if (entry->bitmap) { |
196 | n = &entry->offset_index; | ||
197 | while (1) { | ||
198 | n = rb_prev(n); | ||
199 | if (!n) | ||
200 | break; | ||
201 | prev = rb_entry(n, struct btrfs_free_space, | ||
202 | offset_index); | ||
203 | if (!prev->bitmap) { | ||
204 | if (prev->offset + prev->bytes > offset) | ||
205 | return prev; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | if (entry->offset + BITS_PER_BITMAP * | ||
210 | block_group->sectorsize > offset) | ||
211 | return entry; | ||
212 | } else if (entry->offset + entry->bytes > offset) | ||
213 | return entry; | ||
214 | |||
215 | if (!fuzzy) | ||
216 | return NULL; | ||
217 | |||
218 | while (1) { | ||
219 | if (entry->bitmap) { | ||
220 | if (entry->offset + BITS_PER_BITMAP * | ||
221 | block_group->sectorsize > offset) | ||
222 | break; | ||
223 | } else { | ||
224 | if (entry->offset + entry->bytes > offset) | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | n = rb_next(&entry->offset_index); | ||
229 | if (!n) | ||
230 | return NULL; | ||
231 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | ||
232 | } | ||
233 | return entry; | ||
179 | } | 234 | } |
180 | 235 | ||
181 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 236 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, |
182 | struct btrfs_free_space *info) | 237 | struct btrfs_free_space *info) |
183 | { | 238 | { |
184 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 239 | rb_erase(&info->offset_index, &block_group->free_space_offset); |
185 | rb_erase(&info->bytes_index, &block_group->free_space_bytes); | 240 | block_group->free_extents--; |
241 | block_group->free_space -= info->bytes; | ||
186 | } | 242 | } |
187 | 243 | ||
188 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 244 | static int link_free_space(struct btrfs_block_group_cache *block_group, |
@@ -190,17 +246,314 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
190 | { | 246 | { |
191 | int ret = 0; | 247 | int ret = 0; |
192 | 248 | ||
193 | 249 | BUG_ON(!info->bitmap && !info->bytes); | |
194 | BUG_ON(!info->bytes); | ||
195 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 250 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
196 | &info->offset_index); | 251 | &info->offset_index, (info->bitmap != NULL)); |
197 | if (ret) | 252 | if (ret) |
198 | return ret; | 253 | return ret; |
199 | 254 | ||
200 | ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, | 255 | block_group->free_space += info->bytes; |
201 | &info->bytes_index); | 256 | block_group->free_extents++; |
202 | if (ret) | 257 | return ret; |
203 | return ret; | 258 | } |
259 | |||
260 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | ||
261 | { | ||
262 | u64 max_bytes, possible_bytes; | ||
263 | |||
264 | /* | ||
265 | * The goal is to keep the total amount of memory used per 1gb of space | ||
266 | * at or below 32k, so we need to adjust how much memory we allow to be | ||
267 | * used by extent based free space tracking | ||
268 | */ | ||
269 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
270 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | ||
271 | |||
272 | possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) + | ||
273 | (sizeof(struct btrfs_free_space) * | ||
274 | block_group->extents_thresh); | ||
275 | |||
276 | if (possible_bytes > max_bytes) { | ||
277 | int extent_bytes = max_bytes - | ||
278 | (block_group->total_bitmaps * PAGE_CACHE_SIZE); | ||
279 | |||
280 | if (extent_bytes <= 0) { | ||
281 | block_group->extents_thresh = 0; | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | block_group->extents_thresh = extent_bytes / | ||
286 | (sizeof(struct btrfs_free_space)); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | ||
291 | struct btrfs_free_space *info, u64 offset, | ||
292 | u64 bytes) | ||
293 | { | ||
294 | unsigned long start, end; | ||
295 | unsigned long i; | ||
296 | |||
297 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | ||
298 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | ||
299 | BUG_ON(end > BITS_PER_BITMAP); | ||
300 | |||
301 | for (i = start; i < end; i++) | ||
302 | clear_bit(i, info->bitmap); | ||
303 | |||
304 | info->bytes -= bytes; | ||
305 | block_group->free_space -= bytes; | ||
306 | } | ||
307 | |||
308 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | ||
309 | struct btrfs_free_space *info, u64 offset, | ||
310 | u64 bytes) | ||
311 | { | ||
312 | unsigned long start, end; | ||
313 | unsigned long i; | ||
314 | |||
315 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | ||
316 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | ||
317 | BUG_ON(end > BITS_PER_BITMAP); | ||
318 | |||
319 | for (i = start; i < end; i++) | ||
320 | set_bit(i, info->bitmap); | ||
321 | |||
322 | info->bytes += bytes; | ||
323 | block_group->free_space += bytes; | ||
324 | } | ||
325 | |||
326 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | ||
327 | struct btrfs_free_space *bitmap_info, u64 *offset, | ||
328 | u64 *bytes) | ||
329 | { | ||
330 | unsigned long found_bits = 0; | ||
331 | unsigned long bits, i; | ||
332 | unsigned long next_zero; | ||
333 | |||
334 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | ||
335 | max_t(u64, *offset, bitmap_info->offset)); | ||
336 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | ||
337 | |||
338 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | ||
339 | i < BITS_PER_BITMAP; | ||
340 | i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
341 | next_zero = find_next_zero_bit(bitmap_info->bitmap, | ||
342 | BITS_PER_BITMAP, i); | ||
343 | if ((next_zero - i) >= bits) { | ||
344 | found_bits = next_zero - i; | ||
345 | break; | ||
346 | } | ||
347 | i = next_zero; | ||
348 | } | ||
349 | |||
350 | if (found_bits) { | ||
351 | *offset = (u64)(i * block_group->sectorsize) + | ||
352 | bitmap_info->offset; | ||
353 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | return -1; | ||
358 | } | ||
359 | |||
360 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | ||
361 | *block_group, u64 *offset, | ||
362 | u64 *bytes, int debug) | ||
363 | { | ||
364 | struct btrfs_free_space *entry; | ||
365 | struct rb_node *node; | ||
366 | int ret; | ||
367 | |||
368 | if (!block_group->free_space_offset.rb_node) | ||
369 | return NULL; | ||
370 | |||
371 | entry = tree_search_offset(block_group, | ||
372 | offset_to_bitmap(block_group, *offset), | ||
373 | 0, 1); | ||
374 | if (!entry) | ||
375 | return NULL; | ||
376 | |||
377 | for (node = &entry->offset_index; node; node = rb_next(node)) { | ||
378 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
379 | if (entry->bytes < *bytes) | ||
380 | continue; | ||
381 | |||
382 | if (entry->bitmap) { | ||
383 | ret = search_bitmap(block_group, entry, offset, bytes); | ||
384 | if (!ret) | ||
385 | return entry; | ||
386 | continue; | ||
387 | } | ||
388 | |||
389 | *offset = entry->offset; | ||
390 | *bytes = entry->bytes; | ||
391 | return entry; | ||
392 | } | ||
393 | |||
394 | return NULL; | ||
395 | } | ||
396 | |||
397 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | ||
398 | struct btrfs_free_space *info, u64 offset) | ||
399 | { | ||
400 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
401 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
402 | bytes_per_bg - 1, bytes_per_bg); | ||
403 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
404 | |||
405 | info->offset = offset_to_bitmap(block_group, offset); | ||
406 | link_free_space(block_group, info); | ||
407 | block_group->total_bitmaps++; | ||
408 | |||
409 | recalculate_thresholds(block_group); | ||
410 | } | ||
411 | |||
412 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | ||
413 | struct btrfs_free_space *bitmap_info, | ||
414 | u64 *offset, u64 *bytes) | ||
415 | { | ||
416 | u64 end; | ||
417 | |||
418 | again: | ||
419 | end = bitmap_info->offset + | ||
420 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
421 | |||
422 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | ||
423 | bitmap_clear_bits(block_group, bitmap_info, *offset, | ||
424 | end - *offset + 1); | ||
425 | *bytes -= end - *offset + 1; | ||
426 | *offset = end + 1; | ||
427 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | ||
428 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | ||
429 | *bytes = 0; | ||
430 | } | ||
431 | |||
432 | if (*bytes) { | ||
433 | if (!bitmap_info->bytes) { | ||
434 | unlink_free_space(block_group, bitmap_info); | ||
435 | kfree(bitmap_info->bitmap); | ||
436 | kfree(bitmap_info); | ||
437 | block_group->total_bitmaps--; | ||
438 | recalculate_thresholds(block_group); | ||
439 | } | ||
440 | |||
441 | bitmap_info = tree_search_offset(block_group, | ||
442 | offset_to_bitmap(block_group, | ||
443 | *offset), | ||
444 | 1, 0); | ||
445 | if (!bitmap_info) | ||
446 | return -EINVAL; | ||
447 | |||
448 | if (!bitmap_info->bitmap) | ||
449 | return -EAGAIN; | ||
450 | |||
451 | goto again; | ||
452 | } else if (!bitmap_info->bytes) { | ||
453 | unlink_free_space(block_group, bitmap_info); | ||
454 | kfree(bitmap_info->bitmap); | ||
455 | kfree(bitmap_info); | ||
456 | block_group->total_bitmaps--; | ||
457 | recalculate_thresholds(block_group); | ||
458 | } | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | ||
464 | struct btrfs_free_space *info) | ||
465 | { | ||
466 | struct btrfs_free_space *bitmap_info; | ||
467 | int added = 0; | ||
468 | u64 bytes, offset, end; | ||
469 | int ret; | ||
470 | |||
471 | /* | ||
472 | * If we are below the extents threshold then we can add this as an | ||
473 | * extent, and don't have to deal with the bitmap | ||
474 | */ | ||
475 | if (block_group->free_extents < block_group->extents_thresh && | ||
476 | info->bytes > block_group->sectorsize * 4) | ||
477 | return 0; | ||
478 | |||
479 | /* | ||
480 | * some block groups are so tiny they can't be enveloped by a bitmap, so | ||
481 | * don't even bother to create a bitmap for this | ||
482 | */ | ||
483 | if (BITS_PER_BITMAP * block_group->sectorsize > | ||
484 | block_group->key.offset) | ||
485 | return 0; | ||
486 | |||
487 | bytes = info->bytes; | ||
488 | offset = info->offset; | ||
489 | |||
490 | again: | ||
491 | bitmap_info = tree_search_offset(block_group, | ||
492 | offset_to_bitmap(block_group, offset), | ||
493 | 1, 0); | ||
494 | if (!bitmap_info) { | ||
495 | BUG_ON(added); | ||
496 | goto new_bitmap; | ||
497 | } | ||
498 | |||
499 | end = bitmap_info->offset + | ||
500 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | ||
501 | |||
502 | if (offset >= bitmap_info->offset && offset + bytes > end) { | ||
503 | bitmap_set_bits(block_group, bitmap_info, offset, | ||
504 | end - offset); | ||
505 | bytes -= end - offset; | ||
506 | offset = end; | ||
507 | added = 0; | ||
508 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | ||
509 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | ||
510 | bytes = 0; | ||
511 | } else { | ||
512 | BUG(); | ||
513 | } | ||
514 | |||
515 | if (!bytes) { | ||
516 | ret = 1; | ||
517 | goto out; | ||
518 | } else | ||
519 | goto again; | ||
520 | |||
521 | new_bitmap: | ||
522 | if (info && info->bitmap) { | ||
523 | add_new_bitmap(block_group, info, offset); | ||
524 | added = 1; | ||
525 | info = NULL; | ||
526 | goto again; | ||
527 | } else { | ||
528 | spin_unlock(&block_group->tree_lock); | ||
529 | |||
530 | /* no pre-allocated info, allocate a new one */ | ||
531 | if (!info) { | ||
532 | info = kzalloc(sizeof(struct btrfs_free_space), | ||
533 | GFP_NOFS); | ||
534 | if (!info) { | ||
535 | spin_lock(&block_group->tree_lock); | ||
536 | ret = -ENOMEM; | ||
537 | goto out; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | /* allocate the bitmap */ | ||
542 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | ||
543 | spin_lock(&block_group->tree_lock); | ||
544 | if (!info->bitmap) { | ||
545 | ret = -ENOMEM; | ||
546 | goto out; | ||
547 | } | ||
548 | goto again; | ||
549 | } | ||
550 | |||
551 | out: | ||
552 | if (info) { | ||
553 | if (info->bitmap) | ||
554 | kfree(info->bitmap); | ||
555 | kfree(info); | ||
556 | } | ||
204 | 557 | ||
205 | return ret; | 558 | return ret; |
206 | } | 559 | } |
@@ -208,8 +561,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
208 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 561 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
209 | u64 offset, u64 bytes) | 562 | u64 offset, u64 bytes) |
210 | { | 563 | { |
211 | struct btrfs_free_space *right_info; | 564 | struct btrfs_free_space *right_info = NULL; |
212 | struct btrfs_free_space *left_info; | 565 | struct btrfs_free_space *left_info = NULL; |
213 | struct btrfs_free_space *info = NULL; | 566 | struct btrfs_free_space *info = NULL; |
214 | int ret = 0; | 567 | int ret = 0; |
215 | 568 | ||
@@ -227,18 +580,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
227 | * are adding, if there is remove that struct and add a new one to | 580 | * are adding, if there is remove that struct and add a new one to |
228 | * cover the entire range | 581 | * cover the entire range |
229 | */ | 582 | */ |
230 | right_info = tree_search_offset(&block_group->free_space_offset, | 583 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); |
231 | offset+bytes, 0, 0); | 584 | if (right_info && rb_prev(&right_info->offset_index)) |
232 | left_info = tree_search_offset(&block_group->free_space_offset, | 585 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
233 | offset-1, 0, 1); | 586 | struct btrfs_free_space, offset_index); |
587 | else | ||
588 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | ||
234 | 589 | ||
235 | if (right_info) { | 590 | /* |
591 | * If there was no extent directly to the left or right of this new | ||
592 | * extent then we know we're going to have to allocate a new extent, so | ||
593 | * before we do that see if we need to drop this into a bitmap | ||
594 | */ | ||
595 | if ((!left_info || left_info->bitmap) && | ||
596 | (!right_info || right_info->bitmap)) { | ||
597 | ret = insert_into_bitmap(block_group, info); | ||
598 | |||
599 | if (ret < 0) { | ||
600 | goto out; | ||
601 | } else if (ret) { | ||
602 | ret = 0; | ||
603 | goto out; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | if (right_info && !right_info->bitmap) { | ||
236 | unlink_free_space(block_group, right_info); | 608 | unlink_free_space(block_group, right_info); |
237 | info->bytes += right_info->bytes; | 609 | info->bytes += right_info->bytes; |
238 | kfree(right_info); | 610 | kfree(right_info); |
239 | } | 611 | } |
240 | 612 | ||
241 | if (left_info && left_info->offset + left_info->bytes == offset) { | 613 | if (left_info && !left_info->bitmap && |
614 | left_info->offset + left_info->bytes == offset) { | ||
242 | unlink_free_space(block_group, left_info); | 615 | unlink_free_space(block_group, left_info); |
243 | info->offset = left_info->offset; | 616 | info->offset = left_info->offset; |
244 | info->bytes += left_info->bytes; | 617 | info->bytes += left_info->bytes; |
@@ -248,11 +621,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
248 | ret = link_free_space(block_group, info); | 621 | ret = link_free_space(block_group, info); |
249 | if (ret) | 622 | if (ret) |
250 | kfree(info); | 623 | kfree(info); |
251 | 624 | out: | |
252 | spin_unlock(&block_group->tree_lock); | 625 | spin_unlock(&block_group->tree_lock); |
253 | 626 | ||
254 | if (ret) { | 627 | if (ret) { |
255 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 628 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
256 | BUG_ON(ret == -EEXIST); | 629 | BUG_ON(ret == -EEXIST); |
257 | } | 630 | } |
258 | 631 | ||
@@ -263,40 +636,65 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
263 | u64 offset, u64 bytes) | 636 | u64 offset, u64 bytes) |
264 | { | 637 | { |
265 | struct btrfs_free_space *info; | 638 | struct btrfs_free_space *info; |
639 | struct btrfs_free_space *next_info = NULL; | ||
266 | int ret = 0; | 640 | int ret = 0; |
267 | 641 | ||
268 | spin_lock(&block_group->tree_lock); | 642 | spin_lock(&block_group->tree_lock); |
269 | 643 | ||
270 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 644 | again: |
271 | 1); | 645 | info = tree_search_offset(block_group, offset, 0, 0); |
272 | if (info && info->offset == offset) { | 646 | if (!info) { |
273 | if (info->bytes < bytes) { | 647 | WARN_ON(1); |
274 | printk(KERN_ERR "Found free space at %llu, size %llu," | 648 | goto out_lock; |
275 | "trying to use %llu\n", | 649 | } |
276 | (unsigned long long)info->offset, | 650 | |
277 | (unsigned long long)info->bytes, | 651 | if (info->bytes < bytes && rb_next(&info->offset_index)) { |
278 | (unsigned long long)bytes); | 652 | u64 end; |
653 | next_info = rb_entry(rb_next(&info->offset_index), | ||
654 | struct btrfs_free_space, | ||
655 | offset_index); | ||
656 | |||
657 | if (next_info->bitmap) | ||
658 | end = next_info->offset + BITS_PER_BITMAP * | ||
659 | block_group->sectorsize - 1; | ||
660 | else | ||
661 | end = next_info->offset + next_info->bytes; | ||
662 | |||
663 | if (next_info->bytes < bytes || | ||
664 | next_info->offset > offset || offset > end) { | ||
665 | printk(KERN_CRIT "Found free space at %llu, size %llu," | ||
666 | " trying to use %llu\n", | ||
667 | (unsigned long long)info->offset, | ||
668 | (unsigned long long)info->bytes, | ||
669 | (unsigned long long)bytes); | ||
279 | WARN_ON(1); | 670 | WARN_ON(1); |
280 | ret = -EINVAL; | 671 | ret = -EINVAL; |
281 | spin_unlock(&block_group->tree_lock); | 672 | goto out_lock; |
282 | goto out; | ||
283 | } | 673 | } |
284 | unlink_free_space(block_group, info); | ||
285 | 674 | ||
286 | if (info->bytes == bytes) { | 675 | info = next_info; |
287 | kfree(info); | 676 | } |
288 | spin_unlock(&block_group->tree_lock); | 677 | |
289 | goto out; | 678 | if (info->bytes == bytes) { |
679 | unlink_free_space(block_group, info); | ||
680 | if (info->bitmap) { | ||
681 | kfree(info->bitmap); | ||
682 | block_group->total_bitmaps--; | ||
290 | } | 683 | } |
684 | kfree(info); | ||
685 | goto out_lock; | ||
686 | } | ||
291 | 687 | ||
688 | if (!info->bitmap && info->offset == offset) { | ||
689 | unlink_free_space(block_group, info); | ||
292 | info->offset += bytes; | 690 | info->offset += bytes; |
293 | info->bytes -= bytes; | 691 | info->bytes -= bytes; |
692 | link_free_space(block_group, info); | ||
693 | goto out_lock; | ||
694 | } | ||
294 | 695 | ||
295 | ret = link_free_space(block_group, info); | 696 | if (!info->bitmap && info->offset <= offset && |
296 | spin_unlock(&block_group->tree_lock); | 697 | info->offset + info->bytes >= offset + bytes) { |
297 | BUG_ON(ret); | ||
298 | } else if (info && info->offset < offset && | ||
299 | info->offset + info->bytes >= offset + bytes) { | ||
300 | u64 old_start = info->offset; | 698 | u64 old_start = info->offset; |
301 | /* | 699 | /* |
302 | * we're freeing space in the middle of the info, | 700 | * we're freeing space in the middle of the info, |
@@ -312,7 +710,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
312 | info->offset = offset + bytes; | 710 | info->offset = offset + bytes; |
313 | info->bytes = old_end - info->offset; | 711 | info->bytes = old_end - info->offset; |
314 | ret = link_free_space(block_group, info); | 712 | ret = link_free_space(block_group, info); |
315 | BUG_ON(ret); | 713 | WARN_ON(ret); |
714 | if (ret) | ||
715 | goto out_lock; | ||
316 | } else { | 716 | } else { |
317 | /* the hole we're creating ends at the end | 717 | /* the hole we're creating ends at the end |
318 | * of the info struct, just free the info | 718 | * of the info struct, just free the info |
@@ -320,32 +720,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
320 | kfree(info); | 720 | kfree(info); |
321 | } | 721 | } |
322 | spin_unlock(&block_group->tree_lock); | 722 | spin_unlock(&block_group->tree_lock); |
323 | /* step two, insert a new info struct to cover anything | 723 | |
324 | * before the hole | 724 | /* step two, insert a new info struct to cover |
725 | * anything before the hole | ||
325 | */ | 726 | */ |
326 | ret = btrfs_add_free_space(block_group, old_start, | 727 | ret = btrfs_add_free_space(block_group, old_start, |
327 | offset - old_start); | 728 | offset - old_start); |
328 | BUG_ON(ret); | 729 | WARN_ON(ret); |
329 | } else { | 730 | goto out; |
330 | spin_unlock(&block_group->tree_lock); | ||
331 | if (!info) { | ||
332 | printk(KERN_ERR "couldn't find space %llu to free\n", | ||
333 | (unsigned long long)offset); | ||
334 | printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n", | ||
335 | block_group->cached, | ||
336 | (unsigned long long)block_group->key.objectid, | ||
337 | (unsigned long long)block_group->key.offset); | ||
338 | btrfs_dump_free_space(block_group, bytes); | ||
339 | } else if (info) { | ||
340 | printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, " | ||
341 | "but wanted offset=%llu bytes=%llu\n", | ||
342 | (unsigned long long)info->offset, | ||
343 | (unsigned long long)info->bytes, | ||
344 | (unsigned long long)offset, | ||
345 | (unsigned long long)bytes); | ||
346 | } | ||
347 | WARN_ON(1); | ||
348 | } | 731 | } |
732 | |||
733 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | ||
734 | if (ret == -EAGAIN) | ||
735 | goto again; | ||
736 | BUG_ON(ret); | ||
737 | out_lock: | ||
738 | spin_unlock(&block_group->tree_lock); | ||
349 | out: | 739 | out: |
350 | return ret; | 740 | return ret; |
351 | } | 741 | } |
@@ -361,10 +751,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
361 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 751 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
362 | if (info->bytes >= bytes) | 752 | if (info->bytes >= bytes) |
363 | count++; | 753 | count++; |
364 | printk(KERN_ERR "entry offset %llu, bytes %llu\n", | 754 | printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n", |
365 | (unsigned long long)info->offset, | 755 | (unsigned long long)info->offset, |
366 | (unsigned long long)info->bytes); | 756 | (unsigned long long)info->bytes, |
757 | (info->bitmap) ? "yes" : "no"); | ||
367 | } | 758 | } |
759 | printk(KERN_INFO "block group has cluster?: %s\n", | ||
760 | list_empty(&block_group->cluster_list) ? "no" : "yes"); | ||
368 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" | 761 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" |
369 | "\n", count); | 762 | "\n", count); |
370 | } | 763 | } |
@@ -397,26 +790,35 @@ __btrfs_return_cluster_to_free_space( | |||
397 | { | 790 | { |
398 | struct btrfs_free_space *entry; | 791 | struct btrfs_free_space *entry; |
399 | struct rb_node *node; | 792 | struct rb_node *node; |
793 | bool bitmap; | ||
400 | 794 | ||
401 | spin_lock(&cluster->lock); | 795 | spin_lock(&cluster->lock); |
402 | if (cluster->block_group != block_group) | 796 | if (cluster->block_group != block_group) |
403 | goto out; | 797 | goto out; |
404 | 798 | ||
799 | bitmap = cluster->points_to_bitmap; | ||
800 | cluster->block_group = NULL; | ||
405 | cluster->window_start = 0; | 801 | cluster->window_start = 0; |
802 | list_del_init(&cluster->block_group_list); | ||
803 | cluster->points_to_bitmap = false; | ||
804 | |||
805 | if (bitmap) | ||
806 | goto out; | ||
807 | |||
406 | node = rb_first(&cluster->root); | 808 | node = rb_first(&cluster->root); |
407 | while(node) { | 809 | while (node) { |
408 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 810 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
409 | node = rb_next(&entry->offset_index); | 811 | node = rb_next(&entry->offset_index); |
410 | rb_erase(&entry->offset_index, &cluster->root); | 812 | rb_erase(&entry->offset_index, &cluster->root); |
411 | link_free_space(block_group, entry); | 813 | BUG_ON(entry->bitmap); |
814 | tree_insert_offset(&block_group->free_space_offset, | ||
815 | entry->offset, &entry->offset_index, 0); | ||
412 | } | 816 | } |
413 | list_del_init(&cluster->block_group_list); | ||
414 | |||
415 | btrfs_put_block_group(cluster->block_group); | ||
416 | cluster->block_group = NULL; | ||
417 | cluster->root.rb_node = NULL; | 817 | cluster->root.rb_node = NULL; |
818 | |||
418 | out: | 819 | out: |
419 | spin_unlock(&cluster->lock); | 820 | spin_unlock(&cluster->lock); |
821 | btrfs_put_block_group(block_group); | ||
420 | return 0; | 822 | return 0; |
421 | } | 823 | } |
422 | 824 | ||
@@ -425,20 +827,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
425 | struct btrfs_free_space *info; | 827 | struct btrfs_free_space *info; |
426 | struct rb_node *node; | 828 | struct rb_node *node; |
427 | struct btrfs_free_cluster *cluster; | 829 | struct btrfs_free_cluster *cluster; |
428 | struct btrfs_free_cluster *safe; | 830 | struct list_head *head; |
429 | 831 | ||
430 | spin_lock(&block_group->tree_lock); | 832 | spin_lock(&block_group->tree_lock); |
431 | 833 | while ((head = block_group->cluster_list.next) != | |
432 | list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, | 834 | &block_group->cluster_list) { |
433 | block_group_list) { | 835 | cluster = list_entry(head, struct btrfs_free_cluster, |
836 | block_group_list); | ||
434 | 837 | ||
435 | WARN_ON(cluster->block_group != block_group); | 838 | WARN_ON(cluster->block_group != block_group); |
436 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 839 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
840 | if (need_resched()) { | ||
841 | spin_unlock(&block_group->tree_lock); | ||
842 | cond_resched(); | ||
843 | spin_lock(&block_group->tree_lock); | ||
844 | } | ||
437 | } | 845 | } |
438 | 846 | ||
439 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 847 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { |
440 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 848 | info = rb_entry(node, struct btrfs_free_space, offset_index); |
441 | unlink_free_space(block_group, info); | 849 | unlink_free_space(block_group, info); |
850 | if (info->bitmap) | ||
851 | kfree(info->bitmap); | ||
442 | kfree(info); | 852 | kfree(info); |
443 | if (need_resched()) { | 853 | if (need_resched()) { |
444 | spin_unlock(&block_group->tree_lock); | 854 | spin_unlock(&block_group->tree_lock); |
@@ -446,6 +856,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
446 | spin_lock(&block_group->tree_lock); | 856 | spin_lock(&block_group->tree_lock); |
447 | } | 857 | } |
448 | } | 858 | } |
859 | |||
449 | spin_unlock(&block_group->tree_lock); | 860 | spin_unlock(&block_group->tree_lock); |
450 | } | 861 | } |
451 | 862 | ||
@@ -453,25 +864,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | |||
453 | u64 offset, u64 bytes, u64 empty_size) | 864 | u64 offset, u64 bytes, u64 empty_size) |
454 | { | 865 | { |
455 | struct btrfs_free_space *entry = NULL; | 866 | struct btrfs_free_space *entry = NULL; |
867 | u64 bytes_search = bytes + empty_size; | ||
456 | u64 ret = 0; | 868 | u64 ret = 0; |
457 | 869 | ||
458 | spin_lock(&block_group->tree_lock); | 870 | spin_lock(&block_group->tree_lock); |
459 | entry = tree_search_offset(&block_group->free_space_offset, offset, | 871 | entry = find_free_space(block_group, &offset, &bytes_search, 0); |
460 | bytes + empty_size, 1); | ||
461 | if (!entry) | 872 | if (!entry) |
462 | entry = tree_search_bytes(&block_group->free_space_bytes, | 873 | goto out; |
463 | offset, bytes + empty_size); | 874 | |
464 | if (entry) { | 875 | ret = offset; |
876 | if (entry->bitmap) { | ||
877 | bitmap_clear_bits(block_group, entry, offset, bytes); | ||
878 | if (!entry->bytes) { | ||
879 | unlink_free_space(block_group, entry); | ||
880 | kfree(entry->bitmap); | ||
881 | kfree(entry); | ||
882 | block_group->total_bitmaps--; | ||
883 | recalculate_thresholds(block_group); | ||
884 | } | ||
885 | } else { | ||
465 | unlink_free_space(block_group, entry); | 886 | unlink_free_space(block_group, entry); |
466 | ret = entry->offset; | ||
467 | entry->offset += bytes; | 887 | entry->offset += bytes; |
468 | entry->bytes -= bytes; | 888 | entry->bytes -= bytes; |
469 | |||
470 | if (!entry->bytes) | 889 | if (!entry->bytes) |
471 | kfree(entry); | 890 | kfree(entry); |
472 | else | 891 | else |
473 | link_free_space(block_group, entry); | 892 | link_free_space(block_group, entry); |
474 | } | 893 | } |
894 | |||
895 | out: | ||
475 | spin_unlock(&block_group->tree_lock); | 896 | spin_unlock(&block_group->tree_lock); |
476 | 897 | ||
477 | return ret; | 898 | return ret; |
@@ -517,6 +938,47 @@ int btrfs_return_cluster_to_free_space( | |||
517 | return ret; | 938 | return ret; |
518 | } | 939 | } |
519 | 940 | ||
941 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | ||
942 | struct btrfs_free_cluster *cluster, | ||
943 | u64 bytes, u64 min_start) | ||
944 | { | ||
945 | struct btrfs_free_space *entry; | ||
946 | int err; | ||
947 | u64 search_start = cluster->window_start; | ||
948 | u64 search_bytes = bytes; | ||
949 | u64 ret = 0; | ||
950 | |||
951 | spin_lock(&block_group->tree_lock); | ||
952 | spin_lock(&cluster->lock); | ||
953 | |||
954 | if (!cluster->points_to_bitmap) | ||
955 | goto out; | ||
956 | |||
957 | if (cluster->block_group != block_group) | ||
958 | goto out; | ||
959 | |||
960 | entry = tree_search_offset(block_group, search_start, 0, 0); | ||
961 | |||
962 | if (!entry || !entry->bitmap) | ||
963 | goto out; | ||
964 | |||
965 | search_start = min_start; | ||
966 | search_bytes = bytes; | ||
967 | |||
968 | err = search_bitmap(block_group, entry, &search_start, | ||
969 | &search_bytes); | ||
970 | if (err) | ||
971 | goto out; | ||
972 | |||
973 | ret = search_start; | ||
974 | bitmap_clear_bits(block_group, entry, ret, bytes); | ||
975 | out: | ||
976 | spin_unlock(&cluster->lock); | ||
977 | spin_unlock(&block_group->tree_lock); | ||
978 | |||
979 | return ret; | ||
980 | } | ||
981 | |||
520 | /* | 982 | /* |
521 | * given a cluster, try to allocate 'bytes' from it, returns 0 | 983 | * given a cluster, try to allocate 'bytes' from it, returns 0 |
522 | * if it couldn't find anything suitably large, or a logical disk offset | 984 | * if it couldn't find anything suitably large, or a logical disk offset |
@@ -530,6 +992,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
530 | struct rb_node *node; | 992 | struct rb_node *node; |
531 | u64 ret = 0; | 993 | u64 ret = 0; |
532 | 994 | ||
995 | if (cluster->points_to_bitmap) | ||
996 | return btrfs_alloc_from_bitmap(block_group, cluster, bytes, | ||
997 | min_start); | ||
998 | |||
533 | spin_lock(&cluster->lock); | 999 | spin_lock(&cluster->lock); |
534 | if (bytes > cluster->max_size) | 1000 | if (bytes > cluster->max_size) |
535 | goto out; | 1001 | goto out; |
@@ -567,9 +1033,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
567 | } | 1033 | } |
568 | out: | 1034 | out: |
569 | spin_unlock(&cluster->lock); | 1035 | spin_unlock(&cluster->lock); |
1036 | |||
570 | return ret; | 1037 | return ret; |
571 | } | 1038 | } |
572 | 1039 | ||
1040 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | ||
1041 | struct btrfs_free_space *entry, | ||
1042 | struct btrfs_free_cluster *cluster, | ||
1043 | u64 offset, u64 bytes, u64 min_bytes) | ||
1044 | { | ||
1045 | unsigned long next_zero; | ||
1046 | unsigned long i; | ||
1047 | unsigned long search_bits; | ||
1048 | unsigned long total_bits; | ||
1049 | unsigned long found_bits; | ||
1050 | unsigned long start = 0; | ||
1051 | unsigned long total_found = 0; | ||
1052 | bool found = false; | ||
1053 | |||
1054 | i = offset_to_bit(entry->offset, block_group->sectorsize, | ||
1055 | max_t(u64, offset, entry->offset)); | ||
1056 | search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); | ||
1057 | total_bits = bytes_to_bits(bytes, block_group->sectorsize); | ||
1058 | |||
1059 | again: | ||
1060 | found_bits = 0; | ||
1061 | for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); | ||
1062 | i < BITS_PER_BITMAP; | ||
1063 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { | ||
1064 | next_zero = find_next_zero_bit(entry->bitmap, | ||
1065 | BITS_PER_BITMAP, i); | ||
1066 | if (next_zero - i >= search_bits) { | ||
1067 | found_bits = next_zero - i; | ||
1068 | break; | ||
1069 | } | ||
1070 | i = next_zero; | ||
1071 | } | ||
1072 | |||
1073 | if (!found_bits) | ||
1074 | return -1; | ||
1075 | |||
1076 | if (!found) { | ||
1077 | start = i; | ||
1078 | found = true; | ||
1079 | } | ||
1080 | |||
1081 | total_found += found_bits; | ||
1082 | |||
1083 | if (cluster->max_size < found_bits * block_group->sectorsize) | ||
1084 | cluster->max_size = found_bits * block_group->sectorsize; | ||
1085 | |||
1086 | if (total_found < total_bits) { | ||
1087 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero); | ||
1088 | if (i - start > total_bits * 2) { | ||
1089 | total_found = 0; | ||
1090 | cluster->max_size = 0; | ||
1091 | found = false; | ||
1092 | } | ||
1093 | goto again; | ||
1094 | } | ||
1095 | |||
1096 | cluster->window_start = start * block_group->sectorsize + | ||
1097 | entry->offset; | ||
1098 | cluster->points_to_bitmap = true; | ||
1099 | |||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
573 | /* | 1103 | /* |
574 | * here we try to find a cluster of blocks in a block group. The goal | 1104 | * here we try to find a cluster of blocks in a block group. The goal |
575 | * is to find at least bytes free and up to empty_size + bytes free. | 1105 | * is to find at least bytes free and up to empty_size + bytes free. |
@@ -587,12 +1117,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
587 | struct btrfs_free_space *entry = NULL; | 1117 | struct btrfs_free_space *entry = NULL; |
588 | struct rb_node *node; | 1118 | struct rb_node *node; |
589 | struct btrfs_free_space *next; | 1119 | struct btrfs_free_space *next; |
590 | struct btrfs_free_space *last; | 1120 | struct btrfs_free_space *last = NULL; |
591 | u64 min_bytes; | 1121 | u64 min_bytes; |
592 | u64 window_start; | 1122 | u64 window_start; |
593 | u64 window_free; | 1123 | u64 window_free; |
594 | u64 max_extent = 0; | 1124 | u64 max_extent = 0; |
595 | int total_retries = 0; | 1125 | bool found_bitmap = false; |
596 | int ret; | 1126 | int ret; |
597 | 1127 | ||
598 | /* for metadata, allow allocates with more holes */ | 1128 | /* for metadata, allow allocates with more holes */ |
@@ -620,31 +1150,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
620 | goto out; | 1150 | goto out; |
621 | } | 1151 | } |
622 | again: | 1152 | again: |
623 | min_bytes = min(min_bytes, bytes + empty_size); | 1153 | entry = tree_search_offset(block_group, offset, found_bitmap, 1); |
624 | entry = tree_search_bytes(&block_group->free_space_bytes, | ||
625 | offset, min_bytes); | ||
626 | if (!entry) { | 1154 | if (!entry) { |
627 | ret = -ENOSPC; | 1155 | ret = -ENOSPC; |
628 | goto out; | 1156 | goto out; |
629 | } | 1157 | } |
1158 | |||
1159 | /* | ||
1160 | * If found_bitmap is true, we exhausted our search for extent entries, | ||
1161 | * and we just want to search all of the bitmaps that we can find, and | ||
1162 | * ignore any extent entries we find. | ||
1163 | */ | ||
1164 | while (entry->bitmap || found_bitmap || | ||
1165 | (!entry->bitmap && entry->bytes < min_bytes)) { | ||
1166 | struct rb_node *node = rb_next(&entry->offset_index); | ||
1167 | |||
1168 | if (entry->bitmap && entry->bytes > bytes + empty_size) { | ||
1169 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, | ||
1170 | offset, bytes + empty_size, | ||
1171 | min_bytes); | ||
1172 | if (!ret) | ||
1173 | goto got_it; | ||
1174 | } | ||
1175 | |||
1176 | if (!node) { | ||
1177 | ret = -ENOSPC; | ||
1178 | goto out; | ||
1179 | } | ||
1180 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1184 | * We already searched all the extent entries from the passed in offset | ||
1185 | * to the end and didn't find enough space for the cluster, and we also | ||
1186 | * didn't find any bitmaps that met our criteria, just go ahead and exit | ||
1187 | */ | ||
1188 | if (found_bitmap) { | ||
1189 | ret = -ENOSPC; | ||
1190 | goto out; | ||
1191 | } | ||
1192 | |||
1193 | cluster->points_to_bitmap = false; | ||
630 | window_start = entry->offset; | 1194 | window_start = entry->offset; |
631 | window_free = entry->bytes; | 1195 | window_free = entry->bytes; |
632 | last = entry; | 1196 | last = entry; |
633 | max_extent = entry->bytes; | 1197 | max_extent = entry->bytes; |
634 | 1198 | ||
635 | while(1) { | 1199 | while (1) { |
636 | /* out window is just right, lets fill it */ | 1200 | /* out window is just right, lets fill it */ |
637 | if (window_free >= bytes + empty_size) | 1201 | if (window_free >= bytes + empty_size) |
638 | break; | 1202 | break; |
639 | 1203 | ||
640 | node = rb_next(&last->offset_index); | 1204 | node = rb_next(&last->offset_index); |
641 | if (!node) { | 1205 | if (!node) { |
1206 | if (found_bitmap) | ||
1207 | goto again; | ||
642 | ret = -ENOSPC; | 1208 | ret = -ENOSPC; |
643 | goto out; | 1209 | goto out; |
644 | } | 1210 | } |
645 | next = rb_entry(node, struct btrfs_free_space, offset_index); | 1211 | next = rb_entry(node, struct btrfs_free_space, offset_index); |
646 | 1212 | ||
647 | /* | 1213 | /* |
1214 | * we found a bitmap, so if this search doesn't result in a | ||
1215 | * cluster, we know to go and search again for the bitmaps and | ||
1216 | * start looking for space there | ||
1217 | */ | ||
1218 | if (next->bitmap) { | ||
1219 | if (!found_bitmap) | ||
1220 | offset = next->offset; | ||
1221 | found_bitmap = true; | ||
1222 | last = next; | ||
1223 | continue; | ||
1224 | } | ||
1225 | |||
1226 | /* | ||
648 | * we haven't filled the empty size and the window is | 1227 | * we haven't filled the empty size and the window is |
649 | * very large. reset and try again | 1228 | * very large. reset and try again |
650 | */ | 1229 | */ |
@@ -655,19 +1234,6 @@ again: | |||
655 | window_free = entry->bytes; | 1234 | window_free = entry->bytes; |
656 | last = entry; | 1235 | last = entry; |
657 | max_extent = 0; | 1236 | max_extent = 0; |
658 | total_retries++; | ||
659 | if (total_retries % 64 == 0) { | ||
660 | if (min_bytes >= (bytes + empty_size)) { | ||
661 | ret = -ENOSPC; | ||
662 | goto out; | ||
663 | } | ||
664 | /* | ||
665 | * grow our allocation a bit, we're not having | ||
666 | * much luck | ||
667 | */ | ||
668 | min_bytes *= 2; | ||
669 | goto again; | ||
670 | } | ||
671 | } else { | 1237 | } else { |
672 | last = next; | 1238 | last = next; |
673 | window_free += next->bytes; | 1239 | window_free += next->bytes; |
@@ -685,11 +1251,19 @@ again: | |||
685 | * The cluster includes an rbtree, but only uses the offset index | 1251 | * The cluster includes an rbtree, but only uses the offset index |
686 | * of each free space cache entry. | 1252 | * of each free space cache entry. |
687 | */ | 1253 | */ |
688 | while(1) { | 1254 | while (1) { |
689 | node = rb_next(&entry->offset_index); | 1255 | node = rb_next(&entry->offset_index); |
690 | unlink_free_space(block_group, entry); | 1256 | if (entry->bitmap && node) { |
1257 | entry = rb_entry(node, struct btrfs_free_space, | ||
1258 | offset_index); | ||
1259 | continue; | ||
1260 | } else if (entry->bitmap && !node) { | ||
1261 | break; | ||
1262 | } | ||
1263 | |||
1264 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | ||
691 | ret = tree_insert_offset(&cluster->root, entry->offset, | 1265 | ret = tree_insert_offset(&cluster->root, entry->offset, |
692 | &entry->offset_index); | 1266 | &entry->offset_index, 0); |
693 | BUG_ON(ret); | 1267 | BUG_ON(ret); |
694 | 1268 | ||
695 | if (!node || entry == last) | 1269 | if (!node || entry == last) |
@@ -697,8 +1271,10 @@ again: | |||
697 | 1271 | ||
698 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1272 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
699 | } | 1273 | } |
700 | ret = 0; | 1274 | |
701 | cluster->max_size = max_extent; | 1275 | cluster->max_size = max_extent; |
1276 | got_it: | ||
1277 | ret = 0; | ||
702 | atomic_inc(&block_group->count); | 1278 | atomic_inc(&block_group->count); |
703 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); | 1279 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); |
704 | cluster->block_group = block_group; | 1280 | cluster->block_group = block_group; |
@@ -718,6 +1294,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
718 | spin_lock_init(&cluster->refill_lock); | 1294 | spin_lock_init(&cluster->refill_lock); |
719 | cluster->root.rb_node = NULL; | 1295 | cluster->root.rb_node = NULL; |
720 | cluster->max_size = 0; | 1296 | cluster->max_size = 0; |
1297 | cluster->points_to_bitmap = false; | ||
721 | INIT_LIST_HEAD(&cluster->block_group_list); | 1298 | INIT_LIST_HEAD(&cluster->block_group_list); |
722 | cluster->block_group = NULL; | 1299 | cluster->block_group = NULL; |
723 | } | 1300 | } |
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 266fb8764054..890a8e79011b 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -19,6 +19,14 @@ | |||
19 | #ifndef __BTRFS_FREE_SPACE_CACHE | 19 | #ifndef __BTRFS_FREE_SPACE_CACHE |
20 | #define __BTRFS_FREE_SPACE_CACHE | 20 | #define __BTRFS_FREE_SPACE_CACHE |
21 | 21 | ||
22 | struct btrfs_free_space { | ||
23 | struct rb_node offset_index; | ||
24 | u64 offset; | ||
25 | u64 bytes; | ||
26 | unsigned long *bitmap; | ||
27 | struct list_head list; | ||
28 | }; | ||
29 | |||
22 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 30 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
23 | u64 bytenr, u64 size); | 31 | u64 bytenr, u64 size); |
24 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 32 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 791eab19e330..56fe83fa60c4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2603,8 +2603,8 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
2603 | if (root->ref_cows) | 2603 | if (root->ref_cows) |
2604 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); | 2604 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
2605 | path = btrfs_alloc_path(); | 2605 | path = btrfs_alloc_path(); |
2606 | path->reada = -1; | ||
2607 | BUG_ON(!path); | 2606 | BUG_ON(!path); |
2607 | path->reada = -1; | ||
2608 | 2608 | ||
2609 | /* FIXME, add redo link to tree so we don't leak on crash */ | 2609 | /* FIXME, add redo link to tree so we don't leak on crash */ |
2610 | key.objectid = inode->i_ino; | 2610 | key.objectid = inode->i_ino; |
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 6d6523da0a30..0d126be22b63 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c | |||
@@ -309,7 +309,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) | |||
309 | } | 309 | } |
310 | printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", | 310 | printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", |
311 | (unsigned long long)btrfs_header_bytenr(c), | 311 | (unsigned long long)btrfs_header_bytenr(c), |
312 | btrfs_header_level(c), nr, | 312 | level, nr, |
313 | (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); | 313 | (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); |
314 | for (i = 0; i < nr; i++) { | 314 | for (i = 0; i < nr; i++) { |
315 | btrfs_node_key_to_cpu(c, &key, i); | 315 | btrfs_node_key_to_cpu(c, &key, i); |
@@ -326,10 +326,10 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) | |||
326 | btrfs_level_size(root, level - 1), | 326 | btrfs_level_size(root, level - 1), |
327 | btrfs_node_ptr_generation(c, i)); | 327 | btrfs_node_ptr_generation(c, i)); |
328 | if (btrfs_is_leaf(next) && | 328 | if (btrfs_is_leaf(next) && |
329 | btrfs_header_level(c) != 1) | 329 | level != 1) |
330 | BUG(); | 330 | BUG(); |
331 | if (btrfs_header_level(next) != | 331 | if (btrfs_header_level(next) != |
332 | btrfs_header_level(c) - 1) | 332 | level - 1) |
333 | BUG(); | 333 | BUG(); |
334 | btrfs_print_tree(root, next); | 334 | btrfs_print_tree(root, next); |
335 | free_extent_buffer(next); | 335 | free_extent_buffer(next); |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 008397934778..e71264d1c2c9 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -670,6 +670,8 @@ again: | |||
670 | err = ret; | 670 | err = ret; |
671 | goto out; | 671 | goto out; |
672 | } | 672 | } |
673 | if (ret > 0 && path2->slots[level] > 0) | ||
674 | path2->slots[level]--; | ||
673 | 675 | ||
674 | eb = path2->nodes[level]; | 676 | eb = path2->nodes[level]; |
675 | WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != | 677 | WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) != |
@@ -1609,6 +1611,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, | |||
1609 | BUG_ON(level == 0); | 1611 | BUG_ON(level == 0); |
1610 | path->lowest_level = level; | 1612 | path->lowest_level = level; |
1611 | ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); | 1613 | ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0); |
1614 | path->lowest_level = 0; | ||
1612 | if (ret < 0) { | 1615 | if (ret < 0) { |
1613 | btrfs_free_path(path); | 1616 | btrfs_free_path(path); |
1614 | return ret; | 1617 | return ret; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2dbf1c1f56ee..e51d2bc532f8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -40,6 +40,14 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) | |||
40 | } | 40 | } |
41 | } | 41 | } |
42 | 42 | ||
43 | static noinline void switch_commit_root(struct btrfs_root *root) | ||
44 | { | ||
45 | down_write(&root->commit_root_sem); | ||
46 | free_extent_buffer(root->commit_root); | ||
47 | root->commit_root = btrfs_root_node(root); | ||
48 | up_write(&root->commit_root_sem); | ||
49 | } | ||
50 | |||
43 | /* | 51 | /* |
44 | * either allocate a new transaction or hop into the existing one | 52 | * either allocate a new transaction or hop into the existing one |
45 | */ | 53 | */ |
@@ -444,9 +452,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
444 | 452 | ||
445 | btrfs_write_dirty_block_groups(trans, root); | 453 | btrfs_write_dirty_block_groups(trans, root); |
446 | 454 | ||
447 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
448 | BUG_ON(ret); | ||
449 | |||
450 | while (1) { | 455 | while (1) { |
451 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | 456 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); |
452 | if (old_root_bytenr == root->node->start) | 457 | if (old_root_bytenr == root->node->start) |
@@ -457,13 +462,11 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, | |||
457 | &root->root_key, | 462 | &root->root_key, |
458 | &root->root_item); | 463 | &root->root_item); |
459 | BUG_ON(ret); | 464 | BUG_ON(ret); |
460 | btrfs_write_dirty_block_groups(trans, root); | ||
461 | 465 | ||
462 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 466 | ret = btrfs_write_dirty_block_groups(trans, root); |
463 | BUG_ON(ret); | 467 | BUG_ON(ret); |
464 | } | 468 | } |
465 | free_extent_buffer(root->commit_root); | 469 | switch_commit_root(root); |
466 | root->commit_root = btrfs_root_node(root); | ||
467 | return 0; | 470 | return 0; |
468 | } | 471 | } |
469 | 472 | ||
@@ -495,9 +498,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
495 | root = list_entry(next, struct btrfs_root, dirty_list); | 498 | root = list_entry(next, struct btrfs_root, dirty_list); |
496 | 499 | ||
497 | update_cowonly_root(trans, root); | 500 | update_cowonly_root(trans, root); |
498 | |||
499 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | ||
500 | BUG_ON(ret); | ||
501 | } | 501 | } |
502 | return 0; | 502 | return 0; |
503 | } | 503 | } |
@@ -544,8 +544,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
544 | btrfs_update_reloc_root(trans, root); | 544 | btrfs_update_reloc_root(trans, root); |
545 | 545 | ||
546 | if (root->commit_root != root->node) { | 546 | if (root->commit_root != root->node) { |
547 | free_extent_buffer(root->commit_root); | 547 | switch_commit_root(root); |
548 | root->commit_root = btrfs_root_node(root); | ||
549 | btrfs_set_root_node(&root->root_item, | 548 | btrfs_set_root_node(&root->root_item, |
550 | root->node); | 549 | root->node); |
551 | } | 550 | } |
@@ -943,9 +942,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
943 | 942 | ||
944 | mutex_unlock(&root->fs_info->trans_mutex); | 943 | mutex_unlock(&root->fs_info->trans_mutex); |
945 | 944 | ||
946 | if (flush_on_commit || snap_pending) { | 945 | if (flush_on_commit) { |
947 | if (flush_on_commit) | 946 | btrfs_start_delalloc_inodes(root); |
948 | btrfs_start_delalloc_inodes(root); | 947 | ret = btrfs_wait_ordered_extents(root, 0); |
948 | BUG_ON(ret); | ||
949 | } else if (snap_pending) { | ||
949 | ret = btrfs_wait_ordered_extents(root, 1); | 950 | ret = btrfs_wait_ordered_extents(root, 1); |
950 | BUG_ON(ret); | 951 | BUG_ON(ret); |
951 | } | 952 | } |
@@ -1009,15 +1010,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1009 | 1010 | ||
1010 | btrfs_set_root_node(&root->fs_info->tree_root->root_item, | 1011 | btrfs_set_root_node(&root->fs_info->tree_root->root_item, |
1011 | root->fs_info->tree_root->node); | 1012 | root->fs_info->tree_root->node); |
1012 | free_extent_buffer(root->fs_info->tree_root->commit_root); | 1013 | switch_commit_root(root->fs_info->tree_root); |
1013 | root->fs_info->tree_root->commit_root = | ||
1014 | btrfs_root_node(root->fs_info->tree_root); | ||
1015 | 1014 | ||
1016 | btrfs_set_root_node(&root->fs_info->chunk_root->root_item, | 1015 | btrfs_set_root_node(&root->fs_info->chunk_root->root_item, |
1017 | root->fs_info->chunk_root->node); | 1016 | root->fs_info->chunk_root->node); |
1018 | free_extent_buffer(root->fs_info->chunk_root->commit_root); | 1017 | switch_commit_root(root->fs_info->chunk_root); |
1019 | root->fs_info->chunk_root->commit_root = | ||
1020 | btrfs_root_node(root->fs_info->chunk_root); | ||
1021 | 1018 | ||
1022 | update_super_roots(root); | 1019 | update_super_roots(root); |
1023 | 1020 | ||
@@ -1057,6 +1054,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1057 | cur_trans->commit_done = 1; | 1054 | cur_trans->commit_done = 1; |
1058 | 1055 | ||
1059 | root->fs_info->last_trans_committed = cur_trans->transid; | 1056 | root->fs_info->last_trans_committed = cur_trans->transid; |
1057 | |||
1060 | wake_up(&cur_trans->commit_wait); | 1058 | wake_up(&cur_trans->commit_wait); |
1061 | 1059 | ||
1062 | put_transaction(cur_trans); | 1060 | put_transaction(cur_trans); |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c13922206d1b..d91b0de7c502 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -797,7 +797,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
797 | return -ENOENT; | 797 | return -ENOENT; |
798 | 798 | ||
799 | inode = read_one_inode(root, key->objectid); | 799 | inode = read_one_inode(root, key->objectid); |
800 | BUG_ON(!dir); | 800 | BUG_ON(!inode); |
801 | 801 | ||
802 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | 802 | ref_ptr = btrfs_item_ptr_offset(eb, slot); |
803 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | 803 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3ab80e9cd767..5dbefd11b4af 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -721,7 +721,8 @@ error: | |||
721 | */ | 721 | */ |
722 | static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, | 722 | static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, |
723 | struct btrfs_device *device, | 723 | struct btrfs_device *device, |
724 | u64 num_bytes, u64 *start) | 724 | u64 num_bytes, u64 *start, |
725 | u64 *max_avail) | ||
725 | { | 726 | { |
726 | struct btrfs_key key; | 727 | struct btrfs_key key; |
727 | struct btrfs_root *root = device->dev_root; | 728 | struct btrfs_root *root = device->dev_root; |
@@ -758,9 +759,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, | |||
758 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); | 759 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); |
759 | if (ret < 0) | 760 | if (ret < 0) |
760 | goto error; | 761 | goto error; |
761 | ret = btrfs_previous_item(root, path, 0, key.type); | 762 | if (ret > 0) { |
762 | if (ret < 0) | 763 | ret = btrfs_previous_item(root, path, key.objectid, key.type); |
763 | goto error; | 764 | if (ret < 0) |
765 | goto error; | ||
766 | if (ret > 0) | ||
767 | start_found = 1; | ||
768 | } | ||
764 | l = path->nodes[0]; | 769 | l = path->nodes[0]; |
765 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | 770 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); |
766 | while (1) { | 771 | while (1) { |
@@ -803,6 +808,10 @@ no_more_items: | |||
803 | if (last_byte < search_start) | 808 | if (last_byte < search_start) |
804 | last_byte = search_start; | 809 | last_byte = search_start; |
805 | hole_size = key.offset - last_byte; | 810 | hole_size = key.offset - last_byte; |
811 | |||
812 | if (hole_size > *max_avail) | ||
813 | *max_avail = hole_size; | ||
814 | |||
806 | if (key.offset > last_byte && | 815 | if (key.offset > last_byte && |
807 | hole_size >= num_bytes) { | 816 | hole_size >= num_bytes) { |
808 | *start = last_byte; | 817 | *start = last_byte; |
@@ -1621,6 +1630,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans, | |||
1621 | device->fs_devices->total_rw_bytes += diff; | 1630 | device->fs_devices->total_rw_bytes += diff; |
1622 | 1631 | ||
1623 | device->total_bytes = new_size; | 1632 | device->total_bytes = new_size; |
1633 | device->disk_total_bytes = new_size; | ||
1624 | btrfs_clear_space_info_full(device->dev_root->fs_info); | 1634 | btrfs_clear_space_info_full(device->dev_root->fs_info); |
1625 | 1635 | ||
1626 | return btrfs_update_device(trans, device); | 1636 | return btrfs_update_device(trans, device); |
@@ -2007,7 +2017,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) | |||
2007 | goto done; | 2017 | goto done; |
2008 | if (ret) { | 2018 | if (ret) { |
2009 | ret = 0; | 2019 | ret = 0; |
2010 | goto done; | 2020 | break; |
2011 | } | 2021 | } |
2012 | 2022 | ||
2013 | l = path->nodes[0]; | 2023 | l = path->nodes[0]; |
@@ -2015,7 +2025,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) | |||
2015 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | 2025 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); |
2016 | 2026 | ||
2017 | if (key.objectid != device->devid) | 2027 | if (key.objectid != device->devid) |
2018 | goto done; | 2028 | break; |
2019 | 2029 | ||
2020 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | 2030 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
2021 | length = btrfs_dev_extent_length(l, dev_extent); | 2031 | length = btrfs_dev_extent_length(l, dev_extent); |
@@ -2171,6 +2181,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2171 | max_chunk_size); | 2181 | max_chunk_size); |
2172 | 2182 | ||
2173 | again: | 2183 | again: |
2184 | max_avail = 0; | ||
2174 | if (!map || map->num_stripes != num_stripes) { | 2185 | if (!map || map->num_stripes != num_stripes) { |
2175 | kfree(map); | 2186 | kfree(map); |
2176 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); | 2187 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); |
@@ -2219,7 +2230,8 @@ again: | |||
2219 | 2230 | ||
2220 | if (device->in_fs_metadata && avail >= min_free) { | 2231 | if (device->in_fs_metadata && avail >= min_free) { |
2221 | ret = find_free_dev_extent(trans, device, | 2232 | ret = find_free_dev_extent(trans, device, |
2222 | min_free, &dev_offset); | 2233 | min_free, &dev_offset, |
2234 | &max_avail); | ||
2223 | if (ret == 0) { | 2235 | if (ret == 0) { |
2224 | list_move_tail(&device->dev_alloc_list, | 2236 | list_move_tail(&device->dev_alloc_list, |
2225 | &private_devs); | 2237 | &private_devs); |
@@ -2795,26 +2807,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
2795 | } | 2807 | } |
2796 | } | 2808 | } |
2797 | 2809 | ||
2798 | for (i = 0; i > nr; i++) { | ||
2799 | struct btrfs_multi_bio *multi; | ||
2800 | struct btrfs_bio_stripe *stripe; | ||
2801 | int ret; | ||
2802 | |||
2803 | length = 1; | ||
2804 | ret = btrfs_map_block(map_tree, WRITE, buf[i], | ||
2805 | &length, &multi, 0); | ||
2806 | BUG_ON(ret); | ||
2807 | |||
2808 | stripe = multi->stripes; | ||
2809 | for (j = 0; j < multi->num_stripes; j++) { | ||
2810 | if (stripe->physical >= physical && | ||
2811 | physical < stripe->physical + length) | ||
2812 | break; | ||
2813 | } | ||
2814 | BUG_ON(j >= multi->num_stripes); | ||
2815 | kfree(multi); | ||
2816 | } | ||
2817 | |||
2818 | *logical = buf; | 2810 | *logical = buf; |
2819 | *naddrs = nr; | 2811 | *naddrs = nr; |
2820 | *stripe_len = map->stripe_len; | 2812 | *stripe_len = map->stripe_len; |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index af737bb56cb7..259525c9abb8 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -1303,6 +1303,13 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, | |||
1303 | } | 1303 | } |
1304 | (*new_auth_tok)->session_key.encrypted_key_size = | 1304 | (*new_auth_tok)->session_key.encrypted_key_size = |
1305 | (body_size - (ECRYPTFS_SALT_SIZE + 5)); | 1305 | (body_size - (ECRYPTFS_SALT_SIZE + 5)); |
1306 | if ((*new_auth_tok)->session_key.encrypted_key_size | ||
1307 | > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { | ||
1308 | printk(KERN_WARNING "Tag 3 packet contains key larger " | ||
1309 | "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n"); | ||
1310 | rc = -EINVAL; | ||
1311 | goto out_free; | ||
1312 | } | ||
1306 | if (unlikely(data[(*packet_size)++] != 0x04)) { | 1313 | if (unlikely(data[(*packet_size)++] != 0x04)) { |
1307 | printk(KERN_WARNING "Unknown version number [%d]\n", | 1314 | printk(KERN_WARNING "Unknown version number [%d]\n", |
1308 | data[(*packet_size) - 1]); | 1315 | data[(*packet_size) - 1]); |
@@ -1449,6 +1456,12 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents, | |||
1449 | rc = -EINVAL; | 1456 | rc = -EINVAL; |
1450 | goto out; | 1457 | goto out; |
1451 | } | 1458 | } |
1459 | if (unlikely((*tag_11_contents_size) > max_contents_bytes)) { | ||
1460 | printk(KERN_ERR "Literal data section in tag 11 packet exceeds " | ||
1461 | "expected size\n"); | ||
1462 | rc = -EINVAL; | ||
1463 | goto out; | ||
1464 | } | ||
1452 | if (data[(*packet_size)++] != 0x62) { | 1465 | if (data[(*packet_size)++] != 0x62) { |
1453 | printk(KERN_WARNING "Unrecognizable packet\n"); | 1466 | printk(KERN_WARNING "Unrecognizable packet\n"); |
1454 | rc = -EINVAL; | 1467 | rc = -EINVAL; |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index ebb2c417912c..11f0c06316de 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/ramfs.h> | 20 | #include <linux/ramfs.h> |
21 | #include <linux/pagevec.h> | 21 | #include <linux/pagevec.h> |
22 | #include <linux/mman.h> | 22 | #include <linux/mman.h> |
23 | #include <linux/sched.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include "internal.h" | 26 | #include "internal.h" |
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index d88d0fac9fa5..14f2d71ea3ce 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -939,8 +939,10 @@ again: | |||
939 | /* Remove from old parent's list and insert into new parent's list. */ | 939 | /* Remove from old parent's list and insert into new parent's list. */ |
940 | sysfs_unlink_sibling(sd); | 940 | sysfs_unlink_sibling(sd); |
941 | sysfs_get(new_parent_sd); | 941 | sysfs_get(new_parent_sd); |
942 | drop_nlink(old_parent->d_inode); | ||
942 | sysfs_put(sd->s_parent); | 943 | sysfs_put(sd->s_parent); |
943 | sd->s_parent = new_parent_sd; | 944 | sd->s_parent = new_parent_sd; |
945 | inc_nlink(new_parent->d_inode); | ||
944 | sysfs_link_sibling(sd); | 946 | sysfs_link_sibling(sd); |
945 | 947 | ||
946 | out_unlock: | 948 | out_unlock: |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 41862e9a4c20..af4b4826997e 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -506,6 +506,8 @@ typedef struct { | |||
506 | #define DRM_RADEON_GEM_WAIT_IDLE 0x24 | 506 | #define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
507 | #define DRM_RADEON_CS 0x26 | 507 | #define DRM_RADEON_CS 0x26 |
508 | #define DRM_RADEON_INFO 0x27 | 508 | #define DRM_RADEON_INFO 0x27 |
509 | #define DRM_RADEON_GEM_SET_TILING 0x28 | ||
510 | #define DRM_RADEON_GEM_GET_TILING 0x29 | ||
509 | 511 | ||
510 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) | 512 | #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
511 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) | 513 | #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
@@ -544,7 +546,8 @@ typedef struct { | |||
544 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) | 546 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
545 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) | 547 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
546 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) | 548 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
547 | 549 | #define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) | |
550 | #define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | ||
548 | 551 | ||
549 | typedef struct drm_radeon_init { | 552 | typedef struct drm_radeon_init { |
550 | enum { | 553 | enum { |
@@ -796,6 +799,24 @@ struct drm_radeon_gem_create { | |||
796 | uint32_t flags; | 799 | uint32_t flags; |
797 | }; | 800 | }; |
798 | 801 | ||
802 | #define RADEON_TILING_MACRO 0x1 | ||
803 | #define RADEON_TILING_MICRO 0x2 | ||
804 | #define RADEON_TILING_SWAP 0x4 | ||
805 | #define RADEON_TILING_SURFACE 0x8 /* this object requires a surface | ||
806 | * when mapped - i.e. front buffer */ | ||
807 | |||
808 | struct drm_radeon_gem_set_tiling { | ||
809 | uint32_t handle; | ||
810 | uint32_t tiling_flags; | ||
811 | uint32_t pitch; | ||
812 | }; | ||
813 | |||
814 | struct drm_radeon_gem_get_tiling { | ||
815 | uint32_t handle; | ||
816 | uint32_t tiling_flags; | ||
817 | uint32_t pitch; | ||
818 | }; | ||
819 | |||
799 | struct drm_radeon_gem_mmap { | 820 | struct drm_radeon_gem_mmap { |
800 | uint32_t handle; | 821 | uint32_t handle; |
801 | uint32_t pad; | 822 | uint32_t pad; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 62ed733c52a2..a68829db381a 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -121,6 +121,7 @@ struct ttm_backend { | |||
121 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) | 121 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) |
122 | #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) | 122 | #define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5) |
123 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) | 123 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) |
124 | #define TTM_PAGE_FLAG_DMA32 (1 << 7) | ||
124 | 125 | ||
125 | enum ttm_caching_state { | 126 | enum ttm_caching_state { |
126 | tt_uncached, | 127 | tt_uncached, |
@@ -353,6 +354,14 @@ struct ttm_bo_driver { | |||
353 | int (*sync_obj_flush) (void *sync_obj, void *sync_arg); | 354 | int (*sync_obj_flush) (void *sync_obj, void *sync_arg); |
354 | void (*sync_obj_unref) (void **sync_obj); | 355 | void (*sync_obj_unref) (void **sync_obj); |
355 | void *(*sync_obj_ref) (void *sync_obj); | 356 | void *(*sync_obj_ref) (void *sync_obj); |
357 | |||
358 | /* hook to notify driver about a driver move so it | ||
359 | * can do tiling things */ | ||
360 | void (*move_notify)(struct ttm_buffer_object *bo, | ||
361 | struct ttm_mem_reg *new_mem); | ||
362 | /* notify the driver we are taking a fault on this BO | ||
363 | * and have reserved it */ | ||
364 | void (*fault_reserve_notify)(struct ttm_buffer_object *bo); | ||
356 | }; | 365 | }; |
357 | 366 | ||
358 | #define TTM_NUM_MEM_TYPES 8 | 367 | #define TTM_NUM_MEM_TYPES 8 |
@@ -429,6 +438,8 @@ struct ttm_bo_device { | |||
429 | */ | 438 | */ |
430 | 439 | ||
431 | struct delayed_work wq; | 440 | struct delayed_work wq; |
441 | |||
442 | bool need_dma32; | ||
432 | }; | 443 | }; |
433 | 444 | ||
434 | /** | 445 | /** |
@@ -648,7 +659,14 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); | |||
648 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, | 659 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
649 | struct ttm_mem_global *mem_glob, | 660 | struct ttm_mem_global *mem_glob, |
650 | struct ttm_bo_driver *driver, | 661 | struct ttm_bo_driver *driver, |
651 | uint64_t file_page_offset); | 662 | uint64_t file_page_offset, bool need_dma32); |
663 | |||
664 | /** | ||
665 | * ttm_bo_unmap_virtual | ||
666 | * | ||
667 | * @bo: tear down the virtual mappings for this BO | ||
668 | */ | ||
669 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | ||
652 | 670 | ||
653 | /** | 671 | /** |
654 | * ttm_bo_reserve: | 672 | * ttm_bo_reserve: |
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h index 889a4c7958ae..d1d433834e4f 100644 --- a/include/drm/ttm/ttm_module.h +++ b/include/drm/ttm/ttm_module.h | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
35 | 35 | ||
36 | #define TTM_PFX "[TTM]" | 36 | #define TTM_PFX "[TTM] " |
37 | 37 | ||
38 | enum ttm_global_types { | 38 | enum ttm_global_types { |
39 | TTM_GLOBAL_TTM_MEM = 0, | 39 | TTM_GLOBAL_TTM_MEM = 0, |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 665fa70e4094..90bba9e62286 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -179,14 +179,11 @@ struct cgroup { | |||
179 | */ | 179 | */ |
180 | struct list_head release_list; | 180 | struct list_head release_list; |
181 | 181 | ||
182 | /* pids_mutex protects the fields below */ | 182 | /* pids_mutex protects pids_list and cached pid arrays. */ |
183 | struct rw_semaphore pids_mutex; | 183 | struct rw_semaphore pids_mutex; |
184 | /* Array of process ids in the cgroup */ | 184 | |
185 | pid_t *tasks_pids; | 185 | /* Linked list of struct cgroup_pids */ |
186 | /* How many files are using the current tasks_pids array */ | 186 | struct list_head pids_list; |
187 | int pids_use_count; | ||
188 | /* Length of the current tasks_pids array */ | ||
189 | int pids_length; | ||
190 | 187 | ||
191 | /* For RCU-protected deletion */ | 188 | /* For RCU-protected deletion */ |
192 | struct rcu_head rcu_head; | 189 | struct rcu_head rcu_head; |
@@ -366,6 +363,23 @@ int cgroup_task_count(const struct cgroup *cgrp); | |||
366 | int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); | 363 | int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); |
367 | 364 | ||
368 | /* | 365 | /* |
366 | * When the subsys has to access css and may add permanent refcnt to css, | ||
367 | * it should take care of racy conditions with rmdir(). Following set of | ||
368 | * functions, is for stop/restart rmdir if necessary. | ||
369 | * Because these will call css_get/put, "css" should be alive css. | ||
370 | * | ||
371 | * cgroup_exclude_rmdir(); | ||
372 | * ...do some jobs which may access arbitrary empty cgroup | ||
373 | * cgroup_release_and_wakeup_rmdir(); | ||
374 | * | ||
375 | * When someone removes a cgroup while cgroup_exclude_rmdir() holds it, | ||
376 | * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up. | ||
377 | */ | ||
378 | |||
379 | void cgroup_exclude_rmdir(struct cgroup_subsys_state *css); | ||
380 | void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); | ||
381 | |||
382 | /* | ||
369 | * Control Group subsystem type. | 383 | * Control Group subsystem type. |
370 | * See Documentation/cgroups/cgroups.txt for details | 384 | * See Documentation/cgroups/cgroups.txt for details |
371 | */ | 385 | */ |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h new file mode 100644 index 000000000000..23c1ec79a31b --- /dev/null +++ b/include/linux/flex_array.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef _FLEX_ARRAY_H | ||
2 | #define _FLEX_ARRAY_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | #define FLEX_ARRAY_PART_SIZE PAGE_SIZE | ||
8 | #define FLEX_ARRAY_BASE_SIZE PAGE_SIZE | ||
9 | |||
10 | struct flex_array_part; | ||
11 | |||
12 | /* | ||
13 | * This is meant to replace cases where an array-like | ||
14 | * structure has gotten too big to fit into kmalloc() | ||
15 | * and the developer is getting tempted to use | ||
16 | * vmalloc(). | ||
17 | */ | ||
18 | |||
19 | struct flex_array { | ||
20 | union { | ||
21 | struct { | ||
22 | int element_size; | ||
23 | int total_nr_elements; | ||
24 | struct flex_array_part *parts[0]; | ||
25 | }; | ||
26 | /* | ||
27 | * This little trick makes sure that | ||
28 | * sizeof(flex_array) == PAGE_SIZE | ||
29 | */ | ||
30 | char padding[FLEX_ARRAY_BASE_SIZE]; | ||
31 | }; | ||
32 | }; | ||
33 | |||
34 | #define FLEX_ARRAY_INIT(size, total) { { {\ | ||
35 | .element_size = (size), \ | ||
36 | .total_nr_elements = (total), \ | ||
37 | } } } | ||
38 | |||
39 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags); | ||
40 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags); | ||
41 | void flex_array_free(struct flex_array *fa); | ||
42 | void flex_array_free_parts(struct flex_array *fa); | ||
43 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, | ||
44 | gfp_t flags); | ||
45 | void *flex_array_get(struct flex_array *fa, int element_nr); | ||
46 | |||
47 | #endif /* _FLEX_ARRAY_H */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 0872372184fe..a36ffa5a77a4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1946,6 +1946,7 @@ extern void putname(const char *name); | |||
1946 | extern int register_blkdev(unsigned int, const char *); | 1946 | extern int register_blkdev(unsigned int, const char *); |
1947 | extern void unregister_blkdev(unsigned int, const char *); | 1947 | extern void unregister_blkdev(unsigned int, const char *); |
1948 | extern struct block_device *bdget(dev_t); | 1948 | extern struct block_device *bdget(dev_t); |
1949 | extern struct block_device *bdgrab(struct block_device *bdev); | ||
1949 | extern void bd_set_size(struct block_device *, loff_t size); | 1950 | extern void bd_set_size(struct block_device *, loff_t size); |
1950 | extern void bd_forget(struct inode *inode); | 1951 | extern void bd_forget(struct inode *inode); |
1951 | extern void bdput(struct block_device *); | 1952 | extern void bdput(struct block_device *); |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index dbf2479e808e..2fb1dcbcb5aa 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Things the lguest guest needs to know. Note: like all lguest interfaces, | 1 | /* |
2 | * this is subject to wild and random change between versions. */ | 2 | * Things the lguest guest needs to know. Note: like all lguest interfaces, |
3 | * this is subject to wild and random change between versions. | ||
4 | */ | ||
3 | #ifndef _LINUX_LGUEST_H | 5 | #ifndef _LINUX_LGUEST_H |
4 | #define _LINUX_LGUEST_H | 6 | #define _LINUX_LGUEST_H |
5 | 7 | ||
@@ -11,32 +13,41 @@ | |||
11 | #define LG_CLOCK_MIN_DELTA 100UL | 13 | #define LG_CLOCK_MIN_DELTA 100UL |
12 | #define LG_CLOCK_MAX_DELTA ULONG_MAX | 14 | #define LG_CLOCK_MAX_DELTA ULONG_MAX |
13 | 15 | ||
14 | /*G:031 The second method of communicating with the Host is to via "struct | 16 | /*G:031 |
17 | * The second method of communicating with the Host is to via "struct | ||
15 | * lguest_data". Once the Guest's initialization hypercall tells the Host where | 18 | * lguest_data". Once the Guest's initialization hypercall tells the Host where |
16 | * this is, the Guest and Host both publish information in it. :*/ | 19 | * this is, the Guest and Host both publish information in it. |
17 | struct lguest_data | 20 | :*/ |
18 | { | 21 | struct lguest_data { |
19 | /* 512 == enabled (same as eflags in normal hardware). The Guest | 22 | /* |
20 | * changes interrupts so often that a hypercall is too slow. */ | 23 | * 512 == enabled (same as eflags in normal hardware). The Guest |
24 | * changes interrupts so often that a hypercall is too slow. | ||
25 | */ | ||
21 | unsigned int irq_enabled; | 26 | unsigned int irq_enabled; |
22 | /* Fine-grained interrupt disabling by the Guest */ | 27 | /* Fine-grained interrupt disabling by the Guest */ |
23 | DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); | 28 | DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); |
24 | 29 | ||
25 | /* The Host writes the virtual address of the last page fault here, | 30 | /* |
31 | * The Host writes the virtual address of the last page fault here, | ||
26 | * which saves the Guest a hypercall. CR2 is the native register where | 32 | * which saves the Guest a hypercall. CR2 is the native register where |
27 | * this address would normally be found. */ | 33 | * this address would normally be found. |
34 | */ | ||
28 | unsigned long cr2; | 35 | unsigned long cr2; |
29 | 36 | ||
30 | /* Wallclock time set by the Host. */ | 37 | /* Wallclock time set by the Host. */ |
31 | struct timespec time; | 38 | struct timespec time; |
32 | 39 | ||
33 | /* Interrupt pending set by the Host. The Guest should do a hypercall | 40 | /* |
34 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ | 41 | * Interrupt pending set by the Host. The Guest should do a hypercall |
42 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). | ||
43 | */ | ||
35 | int irq_pending; | 44 | int irq_pending; |
36 | 45 | ||
37 | /* Async hypercall ring. Instead of directly making hypercalls, we can | 46 | /* |
47 | * Async hypercall ring. Instead of directly making hypercalls, we can | ||
38 | * place them in here for processing the next time the Host wants. | 48 | * place them in here for processing the next time the Host wants. |
39 | * This batching can be quite efficient. */ | 49 | * This batching can be quite efficient. |
50 | */ | ||
40 | 51 | ||
41 | /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ | 52 | /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ |
42 | u8 hcall_status[LHCALL_RING_SIZE]; | 53 | u8 hcall_status[LHCALL_RING_SIZE]; |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index bfefbdf7498a..495203ff221c 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -29,8 +29,10 @@ struct lguest_device_desc { | |||
29 | __u8 type; | 29 | __u8 type; |
30 | /* The number of virtqueues (first in config array) */ | 30 | /* The number of virtqueues (first in config array) */ |
31 | __u8 num_vq; | 31 | __u8 num_vq; |
32 | /* The number of bytes of feature bits. Multiply by 2: one for host | 32 | /* |
33 | * features and one for Guest acknowledgements. */ | 33 | * The number of bytes of feature bits. Multiply by 2: one for host |
34 | * features and one for Guest acknowledgements. | ||
35 | */ | ||
34 | __u8 feature_len; | 36 | __u8 feature_len; |
35 | /* The number of bytes of the config array after virtqueues. */ | 37 | /* The number of bytes of the config array after virtqueues. */ |
36 | __u8 config_len; | 38 | __u8 config_len; |
@@ -39,8 +41,10 @@ struct lguest_device_desc { | |||
39 | __u8 config[0]; | 41 | __u8 config[0]; |
40 | }; | 42 | }; |
41 | 43 | ||
42 | /*D:135 This is how we expect the device configuration field for a virtqueue | 44 | /*D:135 |
43 | * to be laid out in config space. */ | 45 | * This is how we expect the device configuration field for a virtqueue |
46 | * to be laid out in config space. | ||
47 | */ | ||
44 | struct lguest_vqconfig { | 48 | struct lguest_vqconfig { |
45 | /* The number of entries in the virtio_ring */ | 49 | /* The number of entries in the virtio_ring */ |
46 | __u16 num; | 50 | __u16 num; |
@@ -61,7 +65,9 @@ enum lguest_req | |||
61 | LHREQ_EVENTFD, /* + address, fd. */ | 65 | LHREQ_EVENTFD, /* + address, fd. */ |
62 | }; | 66 | }; |
63 | 67 | ||
64 | /* The alignment to use between consumer and producer parts of vring. | 68 | /* |
65 | * x86 pagesize for historical reasons. */ | 69 | * The alignment to use between consumer and producer parts of vring. |
70 | * x86 pagesize for historical reasons. | ||
71 | */ | ||
66 | #define LGUEST_VRING_ALIGN 4096 | 72 | #define LGUEST_VRING_ALIGN 4096 |
67 | #endif /* _LINUX_LGUEST_LAUNCHER */ | 73 | #endif /* _LINUX_LGUEST_LAUNCHER */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 79b6d7fd4ac2..e5b6e33c6571 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -589,6 +589,7 @@ struct ata_device { | |||
589 | #endif | 589 | #endif |
590 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ | 590 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
591 | u64 n_sectors; /* size of device, if ATA */ | 591 | u64 n_sectors; /* size of device, if ATA */ |
592 | u64 n_native_sectors; /* native size, if ATA */ | ||
592 | unsigned int class; /* ATA_DEV_xxx */ | 593 | unsigned int class; /* ATA_DEV_xxx */ |
593 | unsigned long unpark_deadline; | 594 | unsigned long unpark_deadline; |
594 | 595 | ||
diff --git a/include/linux/pps.h b/include/linux/pps.h index cfe5c7214ec6..0194ab06177b 100644 --- a/include/linux/pps.h +++ b/include/linux/pps.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #ifndef _PPS_H_ | 22 | #ifndef _PPS_H_ |
23 | #define _PPS_H_ | 23 | #define _PPS_H_ |
24 | 24 | ||
25 | #include <linux/types.h> | ||
26 | |||
25 | #define PPS_VERSION "5.3.6" | 27 | #define PPS_VERSION "5.3.6" |
26 | #define PPS_MAX_SOURCES 16 /* should be enough... */ | 28 | #define PPS_MAX_SOURCES 16 /* should be enough... */ |
27 | 29 | ||
diff --git a/include/linux/tty.h b/include/linux/tty.h index 1488d8c81aac..e8c6c9136c97 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -394,6 +394,7 @@ extern void __do_SAK(struct tty_struct *tty); | |||
394 | extern void disassociate_ctty(int priv); | 394 | extern void disassociate_ctty(int priv); |
395 | extern void no_tty(void); | 395 | extern void no_tty(void); |
396 | extern void tty_flip_buffer_push(struct tty_struct *tty); | 396 | extern void tty_flip_buffer_push(struct tty_struct *tty); |
397 | extern void tty_flush_to_ldisc(struct tty_struct *tty); | ||
397 | extern void tty_buffer_free_all(struct tty_struct *tty); | 398 | extern void tty_buffer_free_all(struct tty_struct *tty); |
398 | extern void tty_buffer_flush(struct tty_struct *tty); | 399 | extern void tty_buffer_flush(struct tty_struct *tty); |
399 | extern void tty_buffer_init(struct tty_struct *tty); | 400 | extern void tty_buffer_init(struct tty_struct *tty); |
diff --git a/include/linux/uio.h b/include/linux/uio.h index b7fe13883bdb..98c114323a8b 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -19,15 +19,6 @@ struct iovec | |||
19 | __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ | 19 | __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ |
20 | }; | 20 | }; |
21 | 21 | ||
22 | #ifdef __KERNEL__ | ||
23 | |||
24 | struct kvec { | ||
25 | void *iov_base; /* and that should *never* hold a userland pointer */ | ||
26 | size_t iov_len; | ||
27 | }; | ||
28 | |||
29 | #endif | ||
30 | |||
31 | /* | 22 | /* |
32 | * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1) | 23 | * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1) |
33 | */ | 24 | */ |
@@ -35,6 +26,13 @@ struct kvec { | |||
35 | #define UIO_FASTIOV 8 | 26 | #define UIO_FASTIOV 8 |
36 | #define UIO_MAXIOV 1024 | 27 | #define UIO_MAXIOV 1024 |
37 | 28 | ||
29 | #ifdef __KERNEL__ | ||
30 | |||
31 | struct kvec { | ||
32 | void *iov_base; /* and that should *never* hold a userland pointer */ | ||
33 | size_t iov_len; | ||
34 | }; | ||
35 | |||
38 | /* | 36 | /* |
39 | * Total number of bytes covered by an iovec. | 37 | * Total number of bytes covered by an iovec. |
40 | * | 38 | * |
@@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) | |||
53 | } | 51 | } |
54 | 52 | ||
55 | unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); | 53 | unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); |
54 | #endif | ||
56 | 55 | ||
57 | #endif | 56 | #endif |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index be7d255fc7cf..8dab9f2b8832 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
@@ -20,8 +20,7 @@ | |||
20 | 20 | ||
21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | 21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ |
22 | 22 | ||
23 | struct virtio_blk_config | 23 | struct virtio_blk_config { |
24 | { | ||
25 | /* The capacity (in 512-byte sectors). */ | 24 | /* The capacity (in 512-byte sectors). */ |
26 | __u64 capacity; | 25 | __u64 capacity; |
27 | /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ | 26 | /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ |
@@ -50,8 +49,7 @@ struct virtio_blk_config | |||
50 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | 49 | #define VIRTIO_BLK_T_BARRIER 0x80000000 |
51 | 50 | ||
52 | /* This is the first element of the read scatter-gather list. */ | 51 | /* This is the first element of the read scatter-gather list. */ |
53 | struct virtio_blk_outhdr | 52 | struct virtio_blk_outhdr { |
54 | { | ||
55 | /* VIRTIO_BLK_T* */ | 53 | /* VIRTIO_BLK_T* */ |
56 | __u32 type; | 54 | __u32 type; |
57 | /* io priority. */ | 55 | /* io priority. */ |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 99f514575f6a..e547e3c8ee9a 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -79,8 +79,7 @@ | |||
79 | * the dev->feature bits if it wants. | 79 | * the dev->feature bits if it wants. |
80 | */ | 80 | */ |
81 | typedef void vq_callback_t(struct virtqueue *); | 81 | typedef void vq_callback_t(struct virtqueue *); |
82 | struct virtio_config_ops | 82 | struct virtio_config_ops { |
83 | { | ||
84 | void (*get)(struct virtio_device *vdev, unsigned offset, | 83 | void (*get)(struct virtio_device *vdev, unsigned offset, |
85 | void *buf, unsigned len); | 84 | void *buf, unsigned len); |
86 | void (*set)(struct virtio_device *vdev, unsigned offset, | 85 | void (*set)(struct virtio_device *vdev, unsigned offset, |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 9c543d6ac535..d8dd539c9f48 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -31,8 +31,7 @@ | |||
31 | 31 | ||
32 | #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ | 32 | #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ |
33 | 33 | ||
34 | struct virtio_net_config | 34 | struct virtio_net_config { |
35 | { | ||
36 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ | 35 | /* The config defining mac address (if VIRTIO_NET_F_MAC) */ |
37 | __u8 mac[6]; | 36 | __u8 mac[6]; |
38 | /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ | 37 | /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ |
@@ -41,8 +40,7 @@ struct virtio_net_config | |||
41 | 40 | ||
42 | /* This is the first element of the scatter-gather list. If you don't | 41 | /* This is the first element of the scatter-gather list. If you don't |
43 | * specify GSO or CSUM features, you can simply ignore the header. */ | 42 | * specify GSO or CSUM features, you can simply ignore the header. */ |
44 | struct virtio_net_hdr | 43 | struct virtio_net_hdr { |
45 | { | ||
46 | #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset | 44 | #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset |
47 | __u8 flags; | 45 | __u8 flags; |
48 | #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame | 46 | #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 693e0ec5afa6..e4d144b132b5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -30,8 +30,7 @@ | |||
30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 | 30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 |
31 | 31 | ||
32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ | 32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ |
33 | struct vring_desc | 33 | struct vring_desc { |
34 | { | ||
35 | /* Address (guest-physical). */ | 34 | /* Address (guest-physical). */ |
36 | __u64 addr; | 35 | __u64 addr; |
37 | /* Length. */ | 36 | /* Length. */ |
@@ -42,24 +41,21 @@ struct vring_desc | |||
42 | __u16 next; | 41 | __u16 next; |
43 | }; | 42 | }; |
44 | 43 | ||
45 | struct vring_avail | 44 | struct vring_avail { |
46 | { | ||
47 | __u16 flags; | 45 | __u16 flags; |
48 | __u16 idx; | 46 | __u16 idx; |
49 | __u16 ring[]; | 47 | __u16 ring[]; |
50 | }; | 48 | }; |
51 | 49 | ||
52 | /* u32 is used here for ids for padding reasons. */ | 50 | /* u32 is used here for ids for padding reasons. */ |
53 | struct vring_used_elem | 51 | struct vring_used_elem { |
54 | { | ||
55 | /* Index of start of used descriptor chain. */ | 52 | /* Index of start of used descriptor chain. */ |
56 | __u32 id; | 53 | __u32 id; |
57 | /* Total length of the descriptor chain which was used (written to) */ | 54 | /* Total length of the descriptor chain which was used (written to) */ |
58 | __u32 len; | 55 | __u32 len; |
59 | }; | 56 | }; |
60 | 57 | ||
61 | struct vring_used | 58 | struct vring_used { |
62 | { | ||
63 | __u16 flags; | 59 | __u16 flags; |
64 | __u16 idx; | 60 | __u16 idx; |
65 | struct vring_used_elem ring[]; | 61 | struct vring_used_elem ring[]; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3737a682cdf5..b6eadfe30e7b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/hash.h> | 47 | #include <linux/hash.h> |
48 | #include <linux/namei.h> | 48 | #include <linux/namei.h> |
49 | #include <linux/smp_lock.h> | 49 | #include <linux/smp_lock.h> |
50 | #include <linux/pid_namespace.h> | ||
50 | 51 | ||
51 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
52 | 53 | ||
@@ -734,16 +735,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry) | |||
734 | * reference to css->refcnt. In general, this refcnt is expected to goes down | 735 | * reference to css->refcnt. In general, this refcnt is expected to goes down |
735 | * to zero, soon. | 736 | * to zero, soon. |
736 | * | 737 | * |
737 | * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; | 738 | * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex; |
738 | */ | 739 | */ |
739 | DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); | 740 | DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); |
740 | 741 | ||
741 | static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) | 742 | static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp) |
742 | { | 743 | { |
743 | if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) | 744 | if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) |
744 | wake_up_all(&cgroup_rmdir_waitq); | 745 | wake_up_all(&cgroup_rmdir_waitq); |
745 | } | 746 | } |
746 | 747 | ||
748 | void cgroup_exclude_rmdir(struct cgroup_subsys_state *css) | ||
749 | { | ||
750 | css_get(css); | ||
751 | } | ||
752 | |||
753 | void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css) | ||
754 | { | ||
755 | cgroup_wakeup_rmdir_waiter(css->cgroup); | ||
756 | css_put(css); | ||
757 | } | ||
758 | |||
759 | |||
747 | static int rebind_subsystems(struct cgroupfs_root *root, | 760 | static int rebind_subsystems(struct cgroupfs_root *root, |
748 | unsigned long final_bits) | 761 | unsigned long final_bits) |
749 | { | 762 | { |
@@ -960,6 +973,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | |||
960 | INIT_LIST_HEAD(&cgrp->children); | 973 | INIT_LIST_HEAD(&cgrp->children); |
961 | INIT_LIST_HEAD(&cgrp->css_sets); | 974 | INIT_LIST_HEAD(&cgrp->css_sets); |
962 | INIT_LIST_HEAD(&cgrp->release_list); | 975 | INIT_LIST_HEAD(&cgrp->release_list); |
976 | INIT_LIST_HEAD(&cgrp->pids_list); | ||
963 | init_rwsem(&cgrp->pids_mutex); | 977 | init_rwsem(&cgrp->pids_mutex); |
964 | } | 978 | } |
965 | static void init_cgroup_root(struct cgroupfs_root *root) | 979 | static void init_cgroup_root(struct cgroupfs_root *root) |
@@ -1357,7 +1371,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1357 | * wake up rmdir() waiter. the rmdir should fail since the cgroup | 1371 | * wake up rmdir() waiter. the rmdir should fail since the cgroup |
1358 | * is no longer empty. | 1372 | * is no longer empty. |
1359 | */ | 1373 | */ |
1360 | cgroup_wakeup_rmdir_waiters(cgrp); | 1374 | cgroup_wakeup_rmdir_waiter(cgrp); |
1361 | return 0; | 1375 | return 0; |
1362 | } | 1376 | } |
1363 | 1377 | ||
@@ -2201,12 +2215,30 @@ err: | |||
2201 | return ret; | 2215 | return ret; |
2202 | } | 2216 | } |
2203 | 2217 | ||
2218 | /* | ||
2219 | * Cache pids for all threads in the same pid namespace that are | ||
2220 | * opening the same "tasks" file. | ||
2221 | */ | ||
2222 | struct cgroup_pids { | ||
2223 | /* The node in cgrp->pids_list */ | ||
2224 | struct list_head list; | ||
2225 | /* The cgroup those pids belong to */ | ||
2226 | struct cgroup *cgrp; | ||
2227 | /* The namepsace those pids belong to */ | ||
2228 | struct pid_namespace *ns; | ||
2229 | /* Array of process ids in the cgroup */ | ||
2230 | pid_t *tasks_pids; | ||
2231 | /* How many files are using the this tasks_pids array */ | ||
2232 | int use_count; | ||
2233 | /* Length of the current tasks_pids array */ | ||
2234 | int length; | ||
2235 | }; | ||
2236 | |||
2204 | static int cmppid(const void *a, const void *b) | 2237 | static int cmppid(const void *a, const void *b) |
2205 | { | 2238 | { |
2206 | return *(pid_t *)a - *(pid_t *)b; | 2239 | return *(pid_t *)a - *(pid_t *)b; |
2207 | } | 2240 | } |
2208 | 2241 | ||
2209 | |||
2210 | /* | 2242 | /* |
2211 | * seq_file methods for the "tasks" file. The seq_file position is the | 2243 | * seq_file methods for the "tasks" file. The seq_file position is the |
2212 | * next pid to display; the seq_file iterator is a pointer to the pid | 2244 | * next pid to display; the seq_file iterator is a pointer to the pid |
@@ -2221,45 +2253,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | |||
2221 | * after a seek to the start). Use a binary-search to find the | 2253 | * after a seek to the start). Use a binary-search to find the |
2222 | * next pid to display, if any | 2254 | * next pid to display, if any |
2223 | */ | 2255 | */ |
2224 | struct cgroup *cgrp = s->private; | 2256 | struct cgroup_pids *cp = s->private; |
2257 | struct cgroup *cgrp = cp->cgrp; | ||
2225 | int index = 0, pid = *pos; | 2258 | int index = 0, pid = *pos; |
2226 | int *iter; | 2259 | int *iter; |
2227 | 2260 | ||
2228 | down_read(&cgrp->pids_mutex); | 2261 | down_read(&cgrp->pids_mutex); |
2229 | if (pid) { | 2262 | if (pid) { |
2230 | int end = cgrp->pids_length; | 2263 | int end = cp->length; |
2231 | 2264 | ||
2232 | while (index < end) { | 2265 | while (index < end) { |
2233 | int mid = (index + end) / 2; | 2266 | int mid = (index + end) / 2; |
2234 | if (cgrp->tasks_pids[mid] == pid) { | 2267 | if (cp->tasks_pids[mid] == pid) { |
2235 | index = mid; | 2268 | index = mid; |
2236 | break; | 2269 | break; |
2237 | } else if (cgrp->tasks_pids[mid] <= pid) | 2270 | } else if (cp->tasks_pids[mid] <= pid) |
2238 | index = mid + 1; | 2271 | index = mid + 1; |
2239 | else | 2272 | else |
2240 | end = mid; | 2273 | end = mid; |
2241 | } | 2274 | } |
2242 | } | 2275 | } |
2243 | /* If we're off the end of the array, we're done */ | 2276 | /* If we're off the end of the array, we're done */ |
2244 | if (index >= cgrp->pids_length) | 2277 | if (index >= cp->length) |
2245 | return NULL; | 2278 | return NULL; |
2246 | /* Update the abstract position to be the actual pid that we found */ | 2279 | /* Update the abstract position to be the actual pid that we found */ |
2247 | iter = cgrp->tasks_pids + index; | 2280 | iter = cp->tasks_pids + index; |
2248 | *pos = *iter; | 2281 | *pos = *iter; |
2249 | return iter; | 2282 | return iter; |
2250 | } | 2283 | } |
2251 | 2284 | ||
2252 | static void cgroup_tasks_stop(struct seq_file *s, void *v) | 2285 | static void cgroup_tasks_stop(struct seq_file *s, void *v) |
2253 | { | 2286 | { |
2254 | struct cgroup *cgrp = s->private; | 2287 | struct cgroup_pids *cp = s->private; |
2288 | struct cgroup *cgrp = cp->cgrp; | ||
2255 | up_read(&cgrp->pids_mutex); | 2289 | up_read(&cgrp->pids_mutex); |
2256 | } | 2290 | } |
2257 | 2291 | ||
2258 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | 2292 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) |
2259 | { | 2293 | { |
2260 | struct cgroup *cgrp = s->private; | 2294 | struct cgroup_pids *cp = s->private; |
2261 | int *p = v; | 2295 | int *p = v; |
2262 | int *end = cgrp->tasks_pids + cgrp->pids_length; | 2296 | int *end = cp->tasks_pids + cp->length; |
2263 | 2297 | ||
2264 | /* | 2298 | /* |
2265 | * Advance to the next pid in the array. If this goes off the | 2299 | * Advance to the next pid in the array. If this goes off the |
@@ -2286,26 +2320,33 @@ static struct seq_operations cgroup_tasks_seq_operations = { | |||
2286 | .show = cgroup_tasks_show, | 2320 | .show = cgroup_tasks_show, |
2287 | }; | 2321 | }; |
2288 | 2322 | ||
2289 | static void release_cgroup_pid_array(struct cgroup *cgrp) | 2323 | static void release_cgroup_pid_array(struct cgroup_pids *cp) |
2290 | { | 2324 | { |
2325 | struct cgroup *cgrp = cp->cgrp; | ||
2326 | |||
2291 | down_write(&cgrp->pids_mutex); | 2327 | down_write(&cgrp->pids_mutex); |
2292 | BUG_ON(!cgrp->pids_use_count); | 2328 | BUG_ON(!cp->use_count); |
2293 | if (!--cgrp->pids_use_count) { | 2329 | if (!--cp->use_count) { |
2294 | kfree(cgrp->tasks_pids); | 2330 | list_del(&cp->list); |
2295 | cgrp->tasks_pids = NULL; | 2331 | put_pid_ns(cp->ns); |
2296 | cgrp->pids_length = 0; | 2332 | kfree(cp->tasks_pids); |
2333 | kfree(cp); | ||
2297 | } | 2334 | } |
2298 | up_write(&cgrp->pids_mutex); | 2335 | up_write(&cgrp->pids_mutex); |
2299 | } | 2336 | } |
2300 | 2337 | ||
2301 | static int cgroup_tasks_release(struct inode *inode, struct file *file) | 2338 | static int cgroup_tasks_release(struct inode *inode, struct file *file) |
2302 | { | 2339 | { |
2303 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2340 | struct seq_file *seq; |
2341 | struct cgroup_pids *cp; | ||
2304 | 2342 | ||
2305 | if (!(file->f_mode & FMODE_READ)) | 2343 | if (!(file->f_mode & FMODE_READ)) |
2306 | return 0; | 2344 | return 0; |
2307 | 2345 | ||
2308 | release_cgroup_pid_array(cgrp); | 2346 | seq = file->private_data; |
2347 | cp = seq->private; | ||
2348 | |||
2349 | release_cgroup_pid_array(cp); | ||
2309 | return seq_release(inode, file); | 2350 | return seq_release(inode, file); |
2310 | } | 2351 | } |
2311 | 2352 | ||
@@ -2324,6 +2365,8 @@ static struct file_operations cgroup_tasks_operations = { | |||
2324 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | 2365 | static int cgroup_tasks_open(struct inode *unused, struct file *file) |
2325 | { | 2366 | { |
2326 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2367 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); |
2368 | struct pid_namespace *ns = current->nsproxy->pid_ns; | ||
2369 | struct cgroup_pids *cp; | ||
2327 | pid_t *pidarray; | 2370 | pid_t *pidarray; |
2328 | int npids; | 2371 | int npids; |
2329 | int retval; | 2372 | int retval; |
@@ -2350,20 +2393,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file) | |||
2350 | * array if necessary | 2393 | * array if necessary |
2351 | */ | 2394 | */ |
2352 | down_write(&cgrp->pids_mutex); | 2395 | down_write(&cgrp->pids_mutex); |
2353 | kfree(cgrp->tasks_pids); | 2396 | |
2354 | cgrp->tasks_pids = pidarray; | 2397 | list_for_each_entry(cp, &cgrp->pids_list, list) { |
2355 | cgrp->pids_length = npids; | 2398 | if (ns == cp->ns) |
2356 | cgrp->pids_use_count++; | 2399 | goto found; |
2400 | } | ||
2401 | |||
2402 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
2403 | if (!cp) { | ||
2404 | up_write(&cgrp->pids_mutex); | ||
2405 | kfree(pidarray); | ||
2406 | return -ENOMEM; | ||
2407 | } | ||
2408 | cp->cgrp = cgrp; | ||
2409 | cp->ns = ns; | ||
2410 | get_pid_ns(ns); | ||
2411 | list_add(&cp->list, &cgrp->pids_list); | ||
2412 | found: | ||
2413 | kfree(cp->tasks_pids); | ||
2414 | cp->tasks_pids = pidarray; | ||
2415 | cp->length = npids; | ||
2416 | cp->use_count++; | ||
2357 | up_write(&cgrp->pids_mutex); | 2417 | up_write(&cgrp->pids_mutex); |
2358 | 2418 | ||
2359 | file->f_op = &cgroup_tasks_operations; | 2419 | file->f_op = &cgroup_tasks_operations; |
2360 | 2420 | ||
2361 | retval = seq_open(file, &cgroup_tasks_seq_operations); | 2421 | retval = seq_open(file, &cgroup_tasks_seq_operations); |
2362 | if (retval) { | 2422 | if (retval) { |
2363 | release_cgroup_pid_array(cgrp); | 2423 | release_cgroup_pid_array(cp); |
2364 | return retval; | 2424 | return retval; |
2365 | } | 2425 | } |
2366 | ((struct seq_file *)file->private_data)->private = cgrp; | 2426 | ((struct seq_file *)file->private_data)->private = cp; |
2367 | return 0; | 2427 | return 0; |
2368 | } | 2428 | } |
2369 | 2429 | ||
@@ -2696,33 +2756,42 @@ again: | |||
2696 | mutex_unlock(&cgroup_mutex); | 2756 | mutex_unlock(&cgroup_mutex); |
2697 | 2757 | ||
2698 | /* | 2758 | /* |
2759 | * In general, subsystem has no css->refcnt after pre_destroy(). But | ||
2760 | * in racy cases, subsystem may have to get css->refcnt after | ||
2761 | * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes | ||
2762 | * make rmdir return -EBUSY too often. To avoid that, we use waitqueue | ||
2763 | * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir | ||
2764 | * and subsystem's reference count handling. Please see css_get/put | ||
2765 | * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation. | ||
2766 | */ | ||
2767 | set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2768 | |||
2769 | /* | ||
2699 | * Call pre_destroy handlers of subsys. Notify subsystems | 2770 | * Call pre_destroy handlers of subsys. Notify subsystems |
2700 | * that rmdir() request comes. | 2771 | * that rmdir() request comes. |
2701 | */ | 2772 | */ |
2702 | ret = cgroup_call_pre_destroy(cgrp); | 2773 | ret = cgroup_call_pre_destroy(cgrp); |
2703 | if (ret) | 2774 | if (ret) { |
2775 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2704 | return ret; | 2776 | return ret; |
2777 | } | ||
2705 | 2778 | ||
2706 | mutex_lock(&cgroup_mutex); | 2779 | mutex_lock(&cgroup_mutex); |
2707 | parent = cgrp->parent; | 2780 | parent = cgrp->parent; |
2708 | if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { | 2781 | if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { |
2782 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2709 | mutex_unlock(&cgroup_mutex); | 2783 | mutex_unlock(&cgroup_mutex); |
2710 | return -EBUSY; | 2784 | return -EBUSY; |
2711 | } | 2785 | } |
2712 | /* | ||
2713 | * css_put/get is provided for subsys to grab refcnt to css. In typical | ||
2714 | * case, subsystem has no reference after pre_destroy(). But, under | ||
2715 | * hierarchy management, some *temporal* refcnt can be hold. | ||
2716 | * To avoid returning -EBUSY to a user, waitqueue is used. If subsys | ||
2717 | * is really busy, it should return -EBUSY at pre_destroy(). wake_up | ||
2718 | * is called when css_put() is called and refcnt goes down to 0. | ||
2719 | */ | ||
2720 | set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2721 | prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); | 2786 | prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); |
2722 | |||
2723 | if (!cgroup_clear_css_refs(cgrp)) { | 2787 | if (!cgroup_clear_css_refs(cgrp)) { |
2724 | mutex_unlock(&cgroup_mutex); | 2788 | mutex_unlock(&cgroup_mutex); |
2725 | schedule(); | 2789 | /* |
2790 | * Because someone may call cgroup_wakeup_rmdir_waiter() before | ||
2791 | * prepare_to_wait(), we need to check this flag. | ||
2792 | */ | ||
2793 | if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)) | ||
2794 | schedule(); | ||
2726 | finish_wait(&cgroup_rmdir_waitq, &wait); | 2795 | finish_wait(&cgroup_rmdir_waitq, &wait); |
2727 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | 2796 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); |
2728 | if (signal_pending(current)) | 2797 | if (signal_pending(current)) |
@@ -3294,7 +3363,7 @@ void __css_put(struct cgroup_subsys_state *css) | |||
3294 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 3363 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
3295 | check_for_release(cgrp); | 3364 | check_for_release(cgrp); |
3296 | } | 3365 | } |
3297 | cgroup_wakeup_rmdir_waiters(cgrp); | 3366 | cgroup_wakeup_rmdir_waiter(cgrp); |
3298 | } | 3367 | } |
3299 | rcu_read_unlock(); | 3368 | rcu_read_unlock(); |
3300 | } | 3369 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 9b42695f0d14..29b532e718f7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
426 | init_rwsem(&mm->mmap_sem); | 426 | init_rwsem(&mm->mmap_sem); |
427 | INIT_LIST_HEAD(&mm->mmlist); | 427 | INIT_LIST_HEAD(&mm->mmlist); |
428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; | 428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; |
429 | mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0; | ||
429 | mm->core_state = NULL; | 430 | mm->core_state = NULL; |
430 | mm->nr_ptes = 0; | 431 | mm->nr_ptes = 0; |
431 | set_mm_counter(mm, file_rss, 0); | 432 | set_mm_counter(mm, file_rss, 0); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index ae1c35201cc8..f336e2107f98 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline, | |||
1228 | } while (*cur++ == ','); | 1228 | } while (*cur++ == ','); |
1229 | 1229 | ||
1230 | if (*crash_size > 0) { | 1230 | if (*crash_size > 0) { |
1231 | while (*cur != ' ' && *cur != '@') | 1231 | while (*cur && *cur != ' ' && *cur != '@') |
1232 | cur++; | 1232 | cur++; |
1233 | if (*cur == '@') { | 1233 | if (*cur == '@') { |
1234 | cur++; | 1234 | cur++; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 16b5739c516a..0540948e29ab 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -694,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
694 | p->addr = addr; | 694 | p->addr = addr; |
695 | 695 | ||
696 | preempt_disable(); | 696 | preempt_disable(); |
697 | if (!__kernel_text_address((unsigned long) p->addr) || | 697 | if (!kernel_text_address((unsigned long) p->addr) || |
698 | in_kprobes_functions((unsigned long) p->addr)) { | 698 | in_kprobes_functions((unsigned long) p->addr)) { |
699 | preempt_enable(); | 699 | preempt_enable(); |
700 | return -EINVAL; | 700 | return -EINVAL; |
diff --git a/kernel/profile.c b/kernel/profile.c index 69911b5745eb..419250ebec4d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -117,11 +117,12 @@ int __ref profile_init(void) | |||
117 | 117 | ||
118 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | 118 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
119 | 119 | ||
120 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); | 120 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
121 | if (prof_buffer) | 121 | if (prof_buffer) |
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO); | 124 | prof_buffer = alloc_pages_exact(buffer_bytes, |
125 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); | ||
125 | if (prof_buffer) | 126 | if (prof_buffer) |
126 | return 0; | 127 | return 0; |
127 | 128 | ||
diff --git a/lib/Makefile b/lib/Makefile index b6d1857bbf08..2e78277eff9d 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o decompress.o | 15 | is_single_threaded.o plist.o decompress.o flex_array.o |
16 | 16 | ||
17 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
18 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
diff --git a/lib/atomic64.c b/lib/atomic64.c index c5e725562416..8bee16ec7524 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/cache.h> | 13 | #include <linux/cache.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | ||
16 | #include <asm/atomic.h> | 17 | #include <asm/atomic.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v) | |||
52 | spin_unlock_irqrestore(lock, flags); | 53 | spin_unlock_irqrestore(lock, flags); |
53 | return val; | 54 | return val; |
54 | } | 55 | } |
56 | EXPORT_SYMBOL(atomic64_read); | ||
55 | 57 | ||
56 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
57 | { | 59 | { |
@@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i) | |||
62 | v->counter = i; | 64 | v->counter = i; |
63 | spin_unlock_irqrestore(lock, flags); | 65 | spin_unlock_irqrestore(lock, flags); |
64 | } | 66 | } |
67 | EXPORT_SYMBOL(atomic64_set); | ||
65 | 68 | ||
66 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
67 | { | 70 | { |
@@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v) | |||
72 | v->counter += a; | 75 | v->counter += a; |
73 | spin_unlock_irqrestore(lock, flags); | 76 | spin_unlock_irqrestore(lock, flags); |
74 | } | 77 | } |
78 | EXPORT_SYMBOL(atomic64_add); | ||
75 | 79 | ||
76 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
77 | { | 81 | { |
@@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v) | |||
84 | spin_unlock_irqrestore(lock, flags); | 88 | spin_unlock_irqrestore(lock, flags); |
85 | return val; | 89 | return val; |
86 | } | 90 | } |
91 | EXPORT_SYMBOL(atomic64_add_return); | ||
87 | 92 | ||
88 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
89 | { | 94 | { |
@@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v) | |||
94 | v->counter -= a; | 99 | v->counter -= a; |
95 | spin_unlock_irqrestore(lock, flags); | 100 | spin_unlock_irqrestore(lock, flags); |
96 | } | 101 | } |
102 | EXPORT_SYMBOL(atomic64_sub); | ||
97 | 103 | ||
98 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
99 | { | 105 | { |
@@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v) | |||
106 | spin_unlock_irqrestore(lock, flags); | 112 | spin_unlock_irqrestore(lock, flags); |
107 | return val; | 113 | return val; |
108 | } | 114 | } |
115 | EXPORT_SYMBOL(atomic64_sub_return); | ||
109 | 116 | ||
110 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
111 | { | 118 | { |
@@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v) | |||
120 | spin_unlock_irqrestore(lock, flags); | 127 | spin_unlock_irqrestore(lock, flags); |
121 | return val; | 128 | return val; |
122 | } | 129 | } |
130 | EXPORT_SYMBOL(atomic64_dec_if_positive); | ||
123 | 131 | ||
124 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
125 | { | 133 | { |
@@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |||
134 | spin_unlock_irqrestore(lock, flags); | 142 | spin_unlock_irqrestore(lock, flags); |
135 | return val; | 143 | return val; |
136 | } | 144 | } |
145 | EXPORT_SYMBOL(atomic64_cmpxchg); | ||
137 | 146 | ||
138 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
139 | { | 148 | { |
@@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new) | |||
147 | spin_unlock_irqrestore(lock, flags); | 156 | spin_unlock_irqrestore(lock, flags); |
148 | return val; | 157 | return val; |
149 | } | 158 | } |
159 | EXPORT_SYMBOL(atomic64_xchg); | ||
150 | 160 | ||
151 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
152 | { | 162 | { |
@@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |||
162 | spin_unlock_irqrestore(lock, flags); | 172 | spin_unlock_irqrestore(lock, flags); |
163 | return ret; | 173 | return ret; |
164 | } | 174 | } |
175 | EXPORT_SYMBOL(atomic64_add_unless); | ||
165 | 176 | ||
166 | static int init_atomic64_lock(void) | 177 | static int init_atomic64_lock(void) |
167 | { | 178 | { |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 833139ce1e22..e22c148e4b7f 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query, | |||
164 | 164 | ||
165 | if (!newflags) | 165 | if (!newflags) |
166 | dt->num_enabled--; | 166 | dt->num_enabled--; |
167 | else if (!dp-flags) | 167 | else if (!dp->flags) |
168 | dt->num_enabled++; | 168 | dt->num_enabled++; |
169 | dp->flags = newflags; | 169 | dp->flags = newflags; |
170 | if (newflags) { | 170 | if (newflags) { |
diff --git a/lib/flex_array.c b/lib/flex_array.c new file mode 100644 index 000000000000..0e7894ce8882 --- /dev/null +++ b/lib/flex_array.c | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * Flexible array managed in PAGE_SIZE parts | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2009 | ||
19 | * | ||
20 | * Author: Dave Hansen <dave@linux.vnet.ibm.com> | ||
21 | */ | ||
22 | |||
23 | #include <linux/flex_array.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/stddef.h> | ||
26 | |||
27 | struct flex_array_part { | ||
28 | char elements[FLEX_ARRAY_PART_SIZE]; | ||
29 | }; | ||
30 | |||
31 | static inline int __elements_per_part(int element_size) | ||
32 | { | ||
33 | return FLEX_ARRAY_PART_SIZE / element_size; | ||
34 | } | ||
35 | |||
36 | static inline int bytes_left_in_base(void) | ||
37 | { | ||
38 | int element_offset = offsetof(struct flex_array, parts); | ||
39 | int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; | ||
40 | return bytes_left; | ||
41 | } | ||
42 | |||
43 | static inline int nr_base_part_ptrs(void) | ||
44 | { | ||
45 | return bytes_left_in_base() / sizeof(struct flex_array_part *); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * If a user requests an allocation which is small | ||
50 | * enough, we may simply use the space in the | ||
51 | * flex_array->parts[] array to store the user | ||
52 | * data. | ||
53 | */ | ||
54 | static inline int elements_fit_in_base(struct flex_array *fa) | ||
55 | { | ||
56 | int data_size = fa->element_size * fa->total_nr_elements; | ||
57 | if (data_size <= bytes_left_in_base()) | ||
58 | return 1; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * flex_array_alloc - allocate a new flexible array | ||
64 | * @element_size: the size of individual elements in the array | ||
65 | * @total: total number of elements that this should hold | ||
66 | * | ||
67 | * Note: all locking must be provided by the caller. | ||
68 | * | ||
69 | * @total is used to size internal structures. If the user ever | ||
70 | * accesses any array indexes >=@total, it will produce errors. | ||
71 | * | ||
72 | * The maximum number of elements is defined as: the number of | ||
73 | * elements that can be stored in a page times the number of | ||
74 | * page pointers that we can fit in the base structure or (using | ||
75 | * integer math): | ||
76 | * | ||
77 | * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) | ||
78 | * | ||
79 | * Here's a table showing example capacities. Note that the maximum | ||
80 | * index that the get/put() functions is just nr_objects-1. This | ||
81 | * basically means that you get 4MB of storage on 32-bit and 2MB on | ||
82 | * 64-bit. | ||
83 | * | ||
84 | * | ||
85 | * Element size | Objects | Objects | | ||
86 | * PAGE_SIZE=4k | 32-bit | 64-bit | | ||
87 | * ---------------------------------| | ||
88 | * 1 bytes | 4186112 | 2093056 | | ||
89 | * 2 bytes | 2093056 | 1046528 | | ||
90 | * 3 bytes | 1395030 | 697515 | | ||
91 | * 4 bytes | 1046528 | 523264 | | ||
92 | * 32 bytes | 130816 | 65408 | | ||
93 | * 33 bytes | 126728 | 63364 | | ||
94 | * 2048 bytes | 2044 | 1022 | | ||
95 | * 2049 bytes | 1022 | 511 | | ||
96 | * void * | 1046528 | 261632 | | ||
97 | * | ||
98 | * Since 64-bit pointers are twice the size, we lose half the | ||
99 | * capacity in the base structure. Also note that no effort is made | ||
100 | * to efficiently pack objects across page boundaries. | ||
101 | */ | ||
102 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | ||
103 | { | ||
104 | struct flex_array *ret; | ||
105 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); | ||
106 | |||
107 | /* max_size will end up 0 if element_size > PAGE_SIZE */ | ||
108 | if (total > max_size) | ||
109 | return NULL; | ||
110 | ret = kzalloc(sizeof(struct flex_array), flags); | ||
111 | if (!ret) | ||
112 | return NULL; | ||
113 | ret->element_size = element_size; | ||
114 | ret->total_nr_elements = total; | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) | ||
119 | { | ||
120 | return element_nr / __elements_per_part(fa->element_size); | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * flex_array_free_parts - just free the second-level pages | ||
125 | * @src: address of data to copy into the array | ||
126 | * @element_nr: index of the position in which to insert | ||
127 | * the new element. | ||
128 | * | ||
129 | * This is to be used in cases where the base 'struct flex_array' | ||
130 | * has been statically allocated and should not be free. | ||
131 | */ | ||
132 | void flex_array_free_parts(struct flex_array *fa) | ||
133 | { | ||
134 | int part_nr; | ||
135 | int max_part = nr_base_part_ptrs(); | ||
136 | |||
137 | if (elements_fit_in_base(fa)) | ||
138 | return; | ||
139 | for (part_nr = 0; part_nr < max_part; part_nr++) | ||
140 | kfree(fa->parts[part_nr]); | ||
141 | } | ||
142 | |||
143 | void flex_array_free(struct flex_array *fa) | ||
144 | { | ||
145 | flex_array_free_parts(fa); | ||
146 | kfree(fa); | ||
147 | } | ||
148 | |||
149 | static int fa_index_inside_part(struct flex_array *fa, int element_nr) | ||
150 | { | ||
151 | return element_nr % __elements_per_part(fa->element_size); | ||
152 | } | ||
153 | |||
154 | static int index_inside_part(struct flex_array *fa, int element_nr) | ||
155 | { | ||
156 | int part_offset = fa_index_inside_part(fa, element_nr); | ||
157 | return part_offset * fa->element_size; | ||
158 | } | ||
159 | |||
160 | static struct flex_array_part * | ||
161 | __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | ||
162 | { | ||
163 | struct flex_array_part *part = fa->parts[part_nr]; | ||
164 | if (!part) { | ||
165 | /* | ||
166 | * This leaves the part pages uninitialized | ||
167 | * and with potentially random data, just | ||
168 | * as if the user had kmalloc()'d the whole. | ||
169 | * __GFP_ZERO can be used to zero it. | ||
170 | */ | ||
171 | part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); | ||
172 | if (!part) | ||
173 | return NULL; | ||
174 | fa->parts[part_nr] = part; | ||
175 | } | ||
176 | return part; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * flex_array_put - copy data into the array at @element_nr | ||
181 | * @src: address of data to copy into the array | ||
182 | * @element_nr: index of the position in which to insert | ||
183 | * the new element. | ||
184 | * | ||
185 | * Note that this *copies* the contents of @src into | ||
186 | * the array. If you are trying to store an array of | ||
187 | * pointers, make sure to pass in &ptr instead of ptr. | ||
188 | * | ||
189 | * Locking must be provided by the caller. | ||
190 | */ | ||
191 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) | ||
192 | { | ||
193 | int part_nr = fa_element_to_part_nr(fa, element_nr); | ||
194 | struct flex_array_part *part; | ||
195 | void *dst; | ||
196 | |||
197 | if (element_nr >= fa->total_nr_elements) | ||
198 | return -ENOSPC; | ||
199 | if (elements_fit_in_base(fa)) | ||
200 | part = (struct flex_array_part *)&fa->parts[0]; | ||
201 | else | ||
202 | part = __fa_get_part(fa, part_nr, flags); | ||
203 | if (!part) | ||
204 | return -ENOMEM; | ||
205 | dst = &part->elements[index_inside_part(fa, element_nr)]; | ||
206 | memcpy(dst, src, fa->element_size); | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * flex_array_prealloc - guarantee that array space exists | ||
212 | * @start: index of first array element for which space is allocated | ||
213 | * @end: index of last (inclusive) element for which space is allocated | ||
214 | * | ||
215 | * This will guarantee that no future calls to flex_array_put() | ||
216 | * will allocate memory. It can be used if you are expecting to | ||
217 | * be holding a lock or in some atomic context while writing | ||
218 | * data into the array. | ||
219 | * | ||
220 | * Locking must be provided by the caller. | ||
221 | */ | ||
222 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | ||
223 | { | ||
224 | int start_part; | ||
225 | int end_part; | ||
226 | int part_nr; | ||
227 | struct flex_array_part *part; | ||
228 | |||
229 | if (start >= fa->total_nr_elements || end >= fa->total_nr_elements) | ||
230 | return -ENOSPC; | ||
231 | if (elements_fit_in_base(fa)) | ||
232 | return 0; | ||
233 | start_part = fa_element_to_part_nr(fa, start); | ||
234 | end_part = fa_element_to_part_nr(fa, end); | ||
235 | for (part_nr = start_part; part_nr <= end_part; part_nr++) { | ||
236 | part = __fa_get_part(fa, part_nr, flags); | ||
237 | if (!part) | ||
238 | return -ENOMEM; | ||
239 | } | ||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * flex_array_get - pull data back out of the array | ||
245 | * @element_nr: index of the element to fetch from the array | ||
246 | * | ||
247 | * Returns a pointer to the data at index @element_nr. Note | ||
248 | * that this is a copy of the data that was passed in. If you | ||
249 | * are using this to store pointers, you'll get back &ptr. | ||
250 | * | ||
251 | * Locking must be provided by the caller. | ||
252 | */ | ||
253 | void *flex_array_get(struct flex_array *fa, int element_nr) | ||
254 | { | ||
255 | int part_nr = fa_element_to_part_nr(fa, element_nr); | ||
256 | struct flex_array_part *part; | ||
257 | int index; | ||
258 | |||
259 | if (element_nr >= fa->total_nr_elements) | ||
260 | return NULL; | ||
261 | if (!fa->parts[part_nr]) | ||
262 | return NULL; | ||
263 | if (elements_fit_in_base(fa)) | ||
264 | part = (struct flex_array_part *)&fa->parts[0]; | ||
265 | else | ||
266 | part = fa->parts[part_nr]; | ||
267 | index = index_inside_part(fa, element_nr); | ||
268 | return &part->elements[index_inside_part(fa, element_nr)]; | ||
269 | } | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d0351e31f474..cafdcee154e8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2370,7 +2370,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) | |||
2370 | long chg = region_truncate(&inode->i_mapping->private_list, offset); | 2370 | long chg = region_truncate(&inode->i_mapping->private_list, offset); |
2371 | 2371 | ||
2372 | spin_lock(&inode->i_lock); | 2372 | spin_lock(&inode->i_lock); |
2373 | inode->i_blocks -= blocks_per_huge_page(h); | 2373 | inode->i_blocks -= (blocks_per_huge_page(h) * freed); |
2374 | spin_unlock(&inode->i_lock); | 2374 | spin_unlock(&inode->i_lock); |
2375 | 2375 | ||
2376 | hugetlb_put_quota(inode->i_mapping, (chg - freed)); | 2376 | hugetlb_put_quota(inode->i_mapping, (chg - freed)); |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5aabd41ffb8f..487267310a84 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -1217,7 +1217,6 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |||
1217 | } | 1217 | } |
1218 | object = NULL; | 1218 | object = NULL; |
1219 | out: | 1219 | out: |
1220 | rcu_read_unlock(); | ||
1221 | return object; | 1220 | return object; |
1222 | } | 1221 | } |
1223 | 1222 | ||
@@ -1233,13 +1232,11 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1233 | 1232 | ||
1234 | ++(*pos); | 1233 | ++(*pos); |
1235 | 1234 | ||
1236 | rcu_read_lock(); | ||
1237 | list_for_each_continue_rcu(n, &object_list) { | 1235 | list_for_each_continue_rcu(n, &object_list) { |
1238 | next_obj = list_entry(n, struct kmemleak_object, object_list); | 1236 | next_obj = list_entry(n, struct kmemleak_object, object_list); |
1239 | if (get_object(next_obj)) | 1237 | if (get_object(next_obj)) |
1240 | break; | 1238 | break; |
1241 | } | 1239 | } |
1242 | rcu_read_unlock(); | ||
1243 | 1240 | ||
1244 | put_object(prev_obj); | 1241 | put_object(prev_obj); |
1245 | return next_obj; | 1242 | return next_obj; |
@@ -1255,6 +1252,7 @@ static void kmemleak_seq_stop(struct seq_file *seq, void *v) | |||
1255 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex | 1252 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex |
1256 | * waiting was interrupted, so only release it if !IS_ERR. | 1253 | * waiting was interrupted, so only release it if !IS_ERR. |
1257 | */ | 1254 | */ |
1255 | rcu_read_unlock(); | ||
1258 | mutex_unlock(&scan_mutex); | 1256 | mutex_unlock(&scan_mutex); |
1259 | if (v) | 1257 | if (v) |
1260 | put_object(v); | 1258 | put_object(v); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e717964cb5a0..fd4529d86de5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1207,6 +1207,12 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, | |||
1207 | ret = 0; | 1207 | ret = 0; |
1208 | out: | 1208 | out: |
1209 | unlock_page_cgroup(pc); | 1209 | unlock_page_cgroup(pc); |
1210 | /* | ||
1211 | * We charges against "to" which may not have any tasks. Then, "to" | ||
1212 | * can be under rmdir(). But in current implementation, caller of | ||
1213 | * this function is just force_empty() and it's garanteed that | ||
1214 | * "to" is never removed. So, we don't check rmdir status here. | ||
1215 | */ | ||
1210 | return ret; | 1216 | return ret; |
1211 | } | 1217 | } |
1212 | 1218 | ||
@@ -1428,6 +1434,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | |||
1428 | return; | 1434 | return; |
1429 | if (!ptr) | 1435 | if (!ptr) |
1430 | return; | 1436 | return; |
1437 | cgroup_exclude_rmdir(&ptr->css); | ||
1431 | pc = lookup_page_cgroup(page); | 1438 | pc = lookup_page_cgroup(page); |
1432 | mem_cgroup_lru_del_before_commit_swapcache(page); | 1439 | mem_cgroup_lru_del_before_commit_swapcache(page); |
1433 | __mem_cgroup_commit_charge(ptr, pc, ctype); | 1440 | __mem_cgroup_commit_charge(ptr, pc, ctype); |
@@ -1457,8 +1464,12 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | |||
1457 | } | 1464 | } |
1458 | rcu_read_unlock(); | 1465 | rcu_read_unlock(); |
1459 | } | 1466 | } |
1460 | /* add this page(page_cgroup) to the LRU we want. */ | 1467 | /* |
1461 | 1468 | * At swapin, we may charge account against cgroup which has no tasks. | |
1469 | * So, rmdir()->pre_destroy() can be called while we do this charge. | ||
1470 | * In that case, we need to call pre_destroy() again. check it here. | ||
1471 | */ | ||
1472 | cgroup_release_and_wakeup_rmdir(&ptr->css); | ||
1462 | } | 1473 | } |
1463 | 1474 | ||
1464 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | 1475 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) |
@@ -1664,7 +1675,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
1664 | 1675 | ||
1665 | if (!mem) | 1676 | if (!mem) |
1666 | return; | 1677 | return; |
1667 | 1678 | cgroup_exclude_rmdir(&mem->css); | |
1668 | /* at migration success, oldpage->mapping is NULL. */ | 1679 | /* at migration success, oldpage->mapping is NULL. */ |
1669 | if (oldpage->mapping) { | 1680 | if (oldpage->mapping) { |
1670 | target = oldpage; | 1681 | target = oldpage; |
@@ -1704,6 +1715,12 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
1704 | */ | 1715 | */ |
1705 | if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) | 1716 | if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) |
1706 | mem_cgroup_uncharge_page(target); | 1717 | mem_cgroup_uncharge_page(target); |
1718 | /* | ||
1719 | * At migration, we may charge account against cgroup which has no tasks | ||
1720 | * So, rmdir()->pre_destroy() can be called while we do this charge. | ||
1721 | * In that case, we need to call pre_destroy() again. check it here. | ||
1722 | */ | ||
1723 | cgroup_release_and_wakeup_rmdir(&mem->css); | ||
1707 | } | 1724 | } |
1708 | 1725 | ||
1709 | /* | 1726 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index caa92689aac9..d052abbe3063 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -882,7 +882,7 @@ retry_reserve: | |||
882 | */ | 882 | */ |
883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 883 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
884 | unsigned long count, struct list_head *list, | 884 | unsigned long count, struct list_head *list, |
885 | int migratetype) | 885 | int migratetype, int cold) |
886 | { | 886 | { |
887 | int i; | 887 | int i; |
888 | 888 | ||
@@ -901,7 +901,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
901 | * merge IO requests if the physical pages are ordered | 901 | * merge IO requests if the physical pages are ordered |
902 | * properly. | 902 | * properly. |
903 | */ | 903 | */ |
904 | list_add(&page->lru, list); | 904 | if (likely(cold == 0)) |
905 | list_add(&page->lru, list); | ||
906 | else | ||
907 | list_add_tail(&page->lru, list); | ||
905 | set_page_private(page, migratetype); | 908 | set_page_private(page, migratetype); |
906 | list = &page->lru; | 909 | list = &page->lru; |
907 | } | 910 | } |
@@ -1119,7 +1122,8 @@ again: | |||
1119 | local_irq_save(flags); | 1122 | local_irq_save(flags); |
1120 | if (!pcp->count) { | 1123 | if (!pcp->count) { |
1121 | pcp->count = rmqueue_bulk(zone, 0, | 1124 | pcp->count = rmqueue_bulk(zone, 0, |
1122 | pcp->batch, &pcp->list, migratetype); | 1125 | pcp->batch, &pcp->list, |
1126 | migratetype, cold); | ||
1123 | if (unlikely(!pcp->count)) | 1127 | if (unlikely(!pcp->count)) |
1124 | goto failed; | 1128 | goto failed; |
1125 | } | 1129 | } |
@@ -1138,7 +1142,8 @@ again: | |||
1138 | /* Allocate more to the pcp list if necessary */ | 1142 | /* Allocate more to the pcp list if necessary */ |
1139 | if (unlikely(&page->lru == &pcp->list)) { | 1143 | if (unlikely(&page->lru == &pcp->list)) { |
1140 | pcp->count += rmqueue_bulk(zone, 0, | 1144 | pcp->count += rmqueue_bulk(zone, 0, |
1141 | pcp->batch, &pcp->list, migratetype); | 1145 | pcp->batch, &pcp->list, |
1146 | migratetype, cold); | ||
1142 | page = list_entry(pcp->list.next, struct page, lru); | 1147 | page = list_entry(pcp->list.next, struct page, lru); |
1143 | } | 1148 | } |
1144 | 1149 | ||
@@ -1740,8 +1745,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
1740 | * be using allocators in order of preference for an area that is | 1745 | * be using allocators in order of preference for an area that is |
1741 | * too large. | 1746 | * too large. |
1742 | */ | 1747 | */ |
1743 | if (WARN_ON_ONCE(order >= MAX_ORDER)) | 1748 | if (order >= MAX_ORDER) { |
1749 | WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); | ||
1744 | return NULL; | 1750 | return NULL; |
1751 | } | ||
1745 | 1752 | ||
1746 | /* | 1753 | /* |
1747 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and | 1754 | * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and |
@@ -1789,6 +1796,10 @@ rebalance: | |||
1789 | if (p->flags & PF_MEMALLOC) | 1796 | if (p->flags & PF_MEMALLOC) |
1790 | goto nopage; | 1797 | goto nopage; |
1791 | 1798 | ||
1799 | /* Avoid allocations with no watermarks from looping endlessly */ | ||
1800 | if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) | ||
1801 | goto nopage; | ||
1802 | |||
1792 | /* Try direct reclaim and then allocating */ | 1803 | /* Try direct reclaim and then allocating */ |
1793 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 1804 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |
1794 | zonelist, high_zoneidx, | 1805 | zonelist, high_zoneidx, |
diff --git a/mm/swapfile.c b/mm/swapfile.c index d1ade1a48ee7..8ffdc0d23c53 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -753,7 +753,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) | |||
753 | 753 | ||
754 | if (!bdev) { | 754 | if (!bdev) { |
755 | if (bdev_p) | 755 | if (bdev_p) |
756 | *bdev_p = bdget(sis->bdev->bd_dev); | 756 | *bdev_p = bdgrab(sis->bdev); |
757 | 757 | ||
758 | spin_unlock(&swap_lock); | 758 | spin_unlock(&swap_lock); |
759 | return i; | 759 | return i; |
@@ -765,7 +765,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) | |||
765 | struct swap_extent, list); | 765 | struct swap_extent, list); |
766 | if (se->start_block == offset) { | 766 | if (se->start_block == offset) { |
767 | if (bdev_p) | 767 | if (bdev_p) |
768 | *bdev_p = bdget(sis->bdev->bd_dev); | 768 | *bdev_p = bdgrab(sis->bdev); |
769 | 769 | ||
770 | spin_unlock(&swap_lock); | 770 | spin_unlock(&swap_lock); |
771 | bdput(bdev); | 771 | bdput(bdev); |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 94ca8eaace7d..3281013ce038 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1066,7 +1066,7 @@ static int __init dccp_init(void) | |||
1066 | (dccp_hashinfo.ehash_size - 1)) | 1066 | (dccp_hashinfo.ehash_size - 1)) |
1067 | dccp_hashinfo.ehash_size--; | 1067 | dccp_hashinfo.ehash_size--; |
1068 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) | 1068 | dccp_hashinfo.ehash = (struct inet_ehash_bucket *) |
1069 | __get_free_pages(GFP_ATOMIC, ehash_order); | 1069 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); |
1070 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); | 1070 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); |
1071 | 1071 | ||
1072 | if (!dccp_hashinfo.ehash) { | 1072 | if (!dccp_hashinfo.ehash) { |
@@ -1091,7 +1091,7 @@ static int __init dccp_init(void) | |||
1091 | bhash_order > 0) | 1091 | bhash_order > 0) |
1092 | continue; | 1092 | continue; |
1093 | dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) | 1093 | dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) |
1094 | __get_free_pages(GFP_ATOMIC, bhash_order); | 1094 | __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order); |
1095 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); | 1095 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); |
1096 | 1096 | ||
1097 | if (!dccp_hashinfo.bhash) { | 1097 | if (!dccp_hashinfo.bhash) { |
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index 3e733146cd51..278a45bd45a5 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl | |||
@@ -13,7 +13,7 @@ | |||
13 | use strict; | 13 | use strict; |
14 | 14 | ||
15 | my $P = $0; | 15 | my $P = $0; |
16 | my $V = '0.16'; | 16 | my $V = '0.17'; |
17 | 17 | ||
18 | use Getopt::Long qw(:config no_auto_abbrev); | 18 | use Getopt::Long qw(:config no_auto_abbrev); |
19 | 19 | ||
@@ -27,6 +27,7 @@ my $email_git = 1; | |||
27 | my $email_git_penguin_chiefs = 0; | 27 | my $email_git_penguin_chiefs = 0; |
28 | my $email_git_min_signatures = 1; | 28 | my $email_git_min_signatures = 1; |
29 | my $email_git_max_maintainers = 5; | 29 | my $email_git_max_maintainers = 5; |
30 | my $email_git_min_percent = 5; | ||
30 | my $email_git_since = "1-year-ago"; | 31 | my $email_git_since = "1-year-ago"; |
31 | my $output_multiline = 1; | 32 | my $output_multiline = 1; |
32 | my $output_separator = ", "; | 33 | my $output_separator = ", "; |
@@ -65,6 +66,7 @@ if (!GetOptions( | |||
65 | 'git-chief-penguins!' => \$email_git_penguin_chiefs, | 66 | 'git-chief-penguins!' => \$email_git_penguin_chiefs, |
66 | 'git-min-signatures=i' => \$email_git_min_signatures, | 67 | 'git-min-signatures=i' => \$email_git_min_signatures, |
67 | 'git-max-maintainers=i' => \$email_git_max_maintainers, | 68 | 'git-max-maintainers=i' => \$email_git_max_maintainers, |
69 | 'git-min-percent=i' => \$email_git_min_percent, | ||
68 | 'git-since=s' => \$email_git_since, | 70 | 'git-since=s' => \$email_git_since, |
69 | 'm!' => \$email_maintainer, | 71 | 'm!' => \$email_maintainer, |
70 | 'n!' => \$email_usename, | 72 | 'n!' => \$email_usename, |
@@ -132,6 +134,10 @@ while (<MAINT>) { | |||
132 | $value =~ s@\.@\\\.@g; ##Convert . to \. | 134 | $value =~ s@\.@\\\.@g; ##Convert . to \. |
133 | $value =~ s/\*/\.\*/g; ##Convert * to .* | 135 | $value =~ s/\*/\.\*/g; ##Convert * to .* |
134 | $value =~ s/\?/\./g; ##Convert ? to . | 136 | $value =~ s/\?/\./g; ##Convert ? to . |
137 | ##if pattern is a directory and it lacks a trailing slash, add one | ||
138 | if ((-d $value)) { | ||
139 | $value =~ s@([^/])$@$1/@; | ||
140 | } | ||
135 | } | 141 | } |
136 | push(@typevalue, "$type:$value"); | 142 | push(@typevalue, "$type:$value"); |
137 | } elsif (!/^(\s)*$/) { | 143 | } elsif (!/^(\s)*$/) { |
@@ -146,8 +152,10 @@ close(MAINT); | |||
146 | my @files = (); | 152 | my @files = (); |
147 | 153 | ||
148 | foreach my $file (@ARGV) { | 154 | foreach my $file (@ARGV) { |
149 | next if ((-d $file)); | 155 | ##if $file is a directory and it lacks a trailing slash, add one |
150 | if (!(-f $file)) { | 156 | if ((-d $file)) { |
157 | $file =~ s@([^/])$@$1/@; | ||
158 | } elsif (!(-f $file)) { | ||
151 | die "$P: file '${file}' not found\n"; | 159 | die "$P: file '${file}' not found\n"; |
152 | } | 160 | } |
153 | if ($from_filename) { | 161 | if ($from_filename) { |
@@ -292,7 +300,7 @@ sub file_match_pattern { | |||
292 | sub usage { | 300 | sub usage { |
293 | print <<EOT; | 301 | print <<EOT; |
294 | usage: $P [options] patchfile | 302 | usage: $P [options] patchfile |
295 | $P [options] -f file | 303 | $P [options] -f file|directory |
296 | version: $V | 304 | version: $V |
297 | 305 | ||
298 | MAINTAINER field selection options: | 306 | MAINTAINER field selection options: |
@@ -301,6 +309,7 @@ MAINTAINER field selection options: | |||
301 | --git-chief-penguins => include ${penguin_chiefs} | 309 | --git-chief-penguins => include ${penguin_chiefs} |
302 | --git-min-signatures => number of signatures required (default: 1) | 310 | --git-min-signatures => number of signatures required (default: 1) |
303 | --git-max-maintainers => maximum maintainers to add (default: 5) | 311 | --git-max-maintainers => maximum maintainers to add (default: 5) |
312 | --git-min-percent => minimum percentage of commits required (default: 5) | ||
304 | --git-since => git history to use (default: 1-year-ago) | 313 | --git-since => git history to use (default: 1-year-ago) |
305 | --m => include maintainer(s) if any | 314 | --m => include maintainer(s) if any |
306 | --n => include name 'Full Name <addr\@domain.tld>' | 315 | --n => include name 'Full Name <addr\@domain.tld>' |
@@ -322,6 +331,15 @@ Other options: | |||
322 | --version => show version | 331 | --version => show version |
323 | --help => show this help information | 332 | --help => show this help information |
324 | 333 | ||
334 | Notes: | ||
335 | Using "-f directory" may give unexpected results: | ||
336 | |||
337 | Used with "--git", git signators for _all_ files in and below | ||
338 | directory are examined as git recurses directories. | ||
339 | Any specified X: (exclude) pattern matches are _not_ ignored. | ||
340 | Used with "--nogit", directory is used as a pattern match, | ||
341 | no individual file within the directory or subdirectory | ||
342 | is matched. | ||
325 | EOT | 343 | EOT |
326 | } | 344 | } |
327 | 345 | ||
@@ -482,6 +500,7 @@ sub recent_git_signoffs { | |||
482 | my $output = ""; | 500 | my $output = ""; |
483 | my $count = 0; | 501 | my $count = 0; |
484 | my @lines = (); | 502 | my @lines = (); |
503 | my $total_sign_offs; | ||
485 | 504 | ||
486 | if (which("git") eq "") { | 505 | if (which("git") eq "") { |
487 | warn("$P: git not found. Add --nogit to options?\n"); | 506 | warn("$P: git not found. Add --nogit to options?\n"); |
@@ -505,17 +524,26 @@ sub recent_git_signoffs { | |||
505 | $output =~ s/^\s*//gm; | 524 | $output =~ s/^\s*//gm; |
506 | 525 | ||
507 | @lines = split("\n", $output); | 526 | @lines = split("\n", $output); |
527 | |||
528 | $total_sign_offs = 0; | ||
529 | foreach my $line (@lines) { | ||
530 | if ($line =~ m/([0-9]+)\s+(.*)/) { | ||
531 | $total_sign_offs += $1; | ||
532 | } else { | ||
533 | die("$P: Unexpected git output: ${line}\n"); | ||
534 | } | ||
535 | } | ||
536 | |||
508 | foreach my $line (@lines) { | 537 | foreach my $line (@lines) { |
509 | if ($line =~ m/([0-9]+)\s+(.*)/) { | 538 | if ($line =~ m/([0-9]+)\s+(.*)/) { |
510 | my $sign_offs = $1; | 539 | my $sign_offs = $1; |
511 | $line = $2; | 540 | $line = $2; |
512 | $count++; | 541 | $count++; |
513 | if ($sign_offs < $email_git_min_signatures || | 542 | if ($sign_offs < $email_git_min_signatures || |
514 | $count > $email_git_max_maintainers) { | 543 | $count > $email_git_max_maintainers || |
544 | $sign_offs * 100 / $total_sign_offs < $email_git_min_percent) { | ||
515 | last; | 545 | last; |
516 | } | 546 | } |
517 | } else { | ||
518 | die("$P: Unexpected git output: ${line}\n"); | ||
519 | } | 547 | } |
520 | if ($line =~ m/(.+)<(.+)>/) { | 548 | if ($line =~ m/(.+)<(.+)>/) { |
521 | my $git_name = $1; | 549 | my $git_name = $1; |
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl index 528492bcba5b..89774011965d 100644 --- a/scripts/markup_oops.pl +++ b/scripts/markup_oops.pl | |||
@@ -1,6 +1,7 @@ | |||
1 | #!/usr/bin/perl | 1 | #!/usr/bin/perl |
2 | 2 | ||
3 | use File::Basename; | 3 | use File::Basename; |
4 | use Math::BigInt; | ||
4 | 5 | ||
5 | # Copyright 2008, Intel Corporation | 6 | # Copyright 2008, Intel Corporation |
6 | # | 7 | # |
@@ -172,8 +173,8 @@ while (<STDIN>) { | |||
172 | parse_x86_regs($line); | 173 | parse_x86_regs($line); |
173 | } | 174 | } |
174 | 175 | ||
175 | my $decodestart = hex($target) - hex($func_offset); | 176 | my $decodestart = Math::BigInt->from_hex("0x$target") - Math::BigInt->from_hex("0x$func_offset"); |
176 | my $decodestop = hex($target) + 8192; | 177 | my $decodestop = Math::BigInt->from_hex("0x$target") + 8192; |
177 | if ($target eq "0") { | 178 | if ($target eq "0") { |
178 | print "No oops found!\n"; | 179 | print "No oops found!\n"; |
179 | print "Usage: \n"; | 180 | print "Usage: \n"; |