diff options
343 files changed, 5912 insertions, 4278 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl index 084f6ad7b7a0..0b1a3f97f285 100644 --- a/Documentation/DocBook/kernel-locking.tmpl +++ b/Documentation/DocBook/kernel-locking.tmpl | |||
@@ -1922,9 +1922,12 @@ machines due to caching. | |||
1922 | <function>mutex_lock()</function> | 1922 | <function>mutex_lock()</function> |
1923 | </para> | 1923 | </para> |
1924 | <para> | 1924 | <para> |
1925 | There is a <function>mutex_trylock()</function> which can be | 1925 | There is a <function>mutex_trylock()</function> which does not |
1926 | used inside interrupt context, as it will not sleep. | 1926 | sleep. Still, it must not be used inside interrupt context since |
1927 | its implementation is not safe for that. | ||
1927 | <function>mutex_unlock()</function> will also never sleep. | 1928 | <function>mutex_unlock()</function> will also never sleep. |
1929 | It cannot be used in interrupt context either since a mutex | ||
1930 | must be released by the same task that acquired it. | ||
1928 | </para> | 1931 | </para> |
1929 | </listitem> | 1932 | </listitem> |
1930 | </itemizedlist> | 1933 | </itemizedlist> |
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg index 1a07fd674cd0..a7952c2bd959 100644 --- a/Documentation/hwmon/f71882fg +++ b/Documentation/hwmon/f71882fg | |||
@@ -2,10 +2,6 @@ Kernel driver f71882fg | |||
2 | ====================== | 2 | ====================== |
3 | 3 | ||
4 | Supported chips: | 4 | Supported chips: |
5 | * Fintek F71808E | ||
6 | Prefix: 'f71808fg' | ||
7 | Addresses scanned: none, address read from Super I/O config space | ||
8 | Datasheet: Not public | ||
9 | * Fintek F71858FG | 5 | * Fintek F71858FG |
10 | Prefix: 'f71858fg' | 6 | Prefix: 'f71858fg' |
11 | Addresses scanned: none, address read from Super I/O config space | 7 | Addresses scanned: none, address read from Super I/O config space |
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt index f6f80257addb..1565eefd6fd5 100644 --- a/Documentation/laptops/thinkpad-acpi.txt +++ b/Documentation/laptops/thinkpad-acpi.txt | |||
@@ -1024,6 +1024,10 @@ ThinkPad-specific interface. The driver will disable its native | |||
1024 | backlight brightness control interface if it detects that the standard | 1024 | backlight brightness control interface if it detects that the standard |
1025 | ACPI interface is available in the ThinkPad. | 1025 | ACPI interface is available in the ThinkPad. |
1026 | 1026 | ||
1027 | If you want to use the thinkpad-acpi backlight brightness control | ||
1028 | instead of the generic ACPI video backlight brightness control for some | ||
1029 | reason, you should use the acpi_backlight=vendor kernel parameter. | ||
1030 | |||
1027 | The brightness_enable module parameter can be used to control whether | 1031 | The brightness_enable module parameter can be used to control whether |
1028 | the LCD brightness control feature will be enabled when available. | 1032 | the LCD brightness control feature will be enabled when available. |
1029 | brightness_enable=0 forces it to be disabled. brightness_enable=1 | 1033 | brightness_enable=0 forces it to be disabled. brightness_enable=1 |
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index 568fa08e82e5..302db5da49b3 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt | |||
@@ -49,40 +49,13 @@ Table of Contents | |||
49 | f) MDIO on GPIOs | 49 | f) MDIO on GPIOs |
50 | g) SPI busses | 50 | g) SPI busses |
51 | 51 | ||
52 | VII - Marvell Discovery mv64[345]6x System Controller chips | 52 | VII - Specifying interrupt information for devices |
53 | 1) The /system-controller node | ||
54 | 2) Child nodes of /system-controller | ||
55 | a) Marvell Discovery MDIO bus | ||
56 | b) Marvell Discovery ethernet controller | ||
57 | c) Marvell Discovery PHY nodes | ||
58 | d) Marvell Discovery SDMA nodes | ||
59 | e) Marvell Discovery BRG nodes | ||
60 | f) Marvell Discovery CUNIT nodes | ||
61 | g) Marvell Discovery MPSCROUTING nodes | ||
62 | h) Marvell Discovery MPSCINTR nodes | ||
63 | i) Marvell Discovery MPSC nodes | ||
64 | j) Marvell Discovery Watch Dog Timer nodes | ||
65 | k) Marvell Discovery I2C nodes | ||
66 | l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes | ||
67 | m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes | ||
68 | n) Marvell Discovery GPP (General Purpose Pins) nodes | ||
69 | o) Marvell Discovery PCI host bridge node | ||
70 | p) Marvell Discovery CPU Error nodes | ||
71 | q) Marvell Discovery SRAM Controller nodes | ||
72 | r) Marvell Discovery PCI Error Handler nodes | ||
73 | s) Marvell Discovery Memory Controller nodes | ||
74 | |||
75 | VIII - Specifying interrupt information for devices | ||
76 | 1) interrupts property | 53 | 1) interrupts property |
77 | 2) interrupt-parent property | 54 | 2) interrupt-parent property |
78 | 3) OpenPIC Interrupt Controllers | 55 | 3) OpenPIC Interrupt Controllers |
79 | 4) ISA Interrupt Controllers | 56 | 4) ISA Interrupt Controllers |
80 | 57 | ||
81 | IX - Specifying GPIO information for devices | 58 | VIII - Specifying device power management information (sleep property) |
82 | 1) gpios property | ||
83 | 2) gpio-controller nodes | ||
84 | |||
85 | X - Specifying device power management information (sleep property) | ||
86 | 59 | ||
87 | Appendix A - Sample SOC node for MPC8540 | 60 | Appendix A - Sample SOC node for MPC8540 |
88 | 61 | ||
diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.txt index f93462c5db25..6d8be3468d7d 100644 --- a/Documentation/powerpc/hvcs.txt +++ b/Documentation/powerpc/hvcs.txt | |||
@@ -560,7 +560,7 @@ The proper channel for reporting bugs is either through the Linux OS | |||
560 | distribution company that provided your OS or by posting issues to the | 560 | distribution company that provided your OS or by posting issues to the |
561 | PowerPC development mailing list at: | 561 | PowerPC development mailing list at: |
562 | 562 | ||
563 | linuxppc-dev@ozlabs.org | 563 | linuxppc-dev@lists.ozlabs.org |
564 | 564 | ||
565 | This request is to provide a documented and searchable public exchange | 565 | This request is to provide a documented and searchable public exchange |
566 | of the problems and solutions surrounding this driver for the benefit of | 566 | of the problems and solutions surrounding this driver for the benefit of |
diff --git a/MAINTAINERS b/MAINTAINERS index b5b8baa1d70e..a1df54b0af79 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -454,9 +454,20 @@ L: linux-rdma@vger.kernel.org | |||
454 | S: Maintained | 454 | S: Maintained |
455 | F: drivers/infiniband/hw/amso1100/ | 455 | F: drivers/infiniband/hw/amso1100/ |
456 | 456 | ||
457 | ANALOG DEVICES INC ASOC DRIVERS | ||
458 | L: uclinux-dist-devel@blackfin.uclinux.org | ||
459 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
460 | W: http://blackfin.uclinux.org/ | ||
461 | S: Supported | ||
462 | F: sound/soc/blackfin/* | ||
463 | F: sound/soc/codecs/ad1* | ||
464 | F: sound/soc/codecs/adau* | ||
465 | F: sound/soc/codecs/adav* | ||
466 | F: sound/soc/codecs/ssm* | ||
467 | |||
457 | AOA (Apple Onboard Audio) ALSA DRIVER | 468 | AOA (Apple Onboard Audio) ALSA DRIVER |
458 | M: Johannes Berg <johannes@sipsolutions.net> | 469 | M: Johannes Berg <johannes@sipsolutions.net> |
459 | L: linuxppc-dev@ozlabs.org | 470 | L: linuxppc-dev@lists.ozlabs.org |
460 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 471 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
461 | S: Maintained | 472 | S: Maintained |
462 | F: sound/aoa/ | 473 | F: sound/aoa/ |
@@ -1472,8 +1483,8 @@ F: include/linux/can/platform/ | |||
1472 | 1483 | ||
1473 | CELL BROADBAND ENGINE ARCHITECTURE | 1484 | CELL BROADBAND ENGINE ARCHITECTURE |
1474 | M: Arnd Bergmann <arnd@arndb.de> | 1485 | M: Arnd Bergmann <arnd@arndb.de> |
1475 | L: linuxppc-dev@ozlabs.org | 1486 | L: linuxppc-dev@lists.ozlabs.org |
1476 | L: cbe-oss-dev@ozlabs.org | 1487 | L: cbe-oss-dev@lists.ozlabs.org |
1477 | W: http://www.ibm.com/developerworks/power/cell/ | 1488 | W: http://www.ibm.com/developerworks/power/cell/ |
1478 | S: Supported | 1489 | S: Supported |
1479 | F: arch/powerpc/include/asm/cell*.h | 1490 | F: arch/powerpc/include/asm/cell*.h |
@@ -2371,13 +2382,13 @@ F: include/linux/fb.h | |||
2371 | FREESCALE DMA DRIVER | 2382 | FREESCALE DMA DRIVER |
2372 | M: Li Yang <leoli@freescale.com> | 2383 | M: Li Yang <leoli@freescale.com> |
2373 | M: Zhang Wei <zw@zh-kernel.org> | 2384 | M: Zhang Wei <zw@zh-kernel.org> |
2374 | L: linuxppc-dev@ozlabs.org | 2385 | L: linuxppc-dev@lists.ozlabs.org |
2375 | S: Maintained | 2386 | S: Maintained |
2376 | F: drivers/dma/fsldma.* | 2387 | F: drivers/dma/fsldma.* |
2377 | 2388 | ||
2378 | FREESCALE I2C CPM DRIVER | 2389 | FREESCALE I2C CPM DRIVER |
2379 | M: Jochen Friedrich <jochen@scram.de> | 2390 | M: Jochen Friedrich <jochen@scram.de> |
2380 | L: linuxppc-dev@ozlabs.org | 2391 | L: linuxppc-dev@lists.ozlabs.org |
2381 | L: linux-i2c@vger.kernel.org | 2392 | L: linux-i2c@vger.kernel.org |
2382 | S: Maintained | 2393 | S: Maintained |
2383 | F: drivers/i2c/busses/i2c-cpm.c | 2394 | F: drivers/i2c/busses/i2c-cpm.c |
@@ -2393,7 +2404,7 @@ F: drivers/video/imxfb.c | |||
2393 | FREESCALE SOC FS_ENET DRIVER | 2404 | FREESCALE SOC FS_ENET DRIVER |
2394 | M: Pantelis Antoniou <pantelis.antoniou@gmail.com> | 2405 | M: Pantelis Antoniou <pantelis.antoniou@gmail.com> |
2395 | M: Vitaly Bordug <vbordug@ru.mvista.com> | 2406 | M: Vitaly Bordug <vbordug@ru.mvista.com> |
2396 | L: linuxppc-dev@ozlabs.org | 2407 | L: linuxppc-dev@lists.ozlabs.org |
2397 | L: netdev@vger.kernel.org | 2408 | L: netdev@vger.kernel.org |
2398 | S: Maintained | 2409 | S: Maintained |
2399 | F: drivers/net/fs_enet/ | 2410 | F: drivers/net/fs_enet/ |
@@ -2401,7 +2412,7 @@ F: include/linux/fs_enet_pd.h | |||
2401 | 2412 | ||
2402 | FREESCALE QUICC ENGINE LIBRARY | 2413 | FREESCALE QUICC ENGINE LIBRARY |
2403 | M: Timur Tabi <timur@freescale.com> | 2414 | M: Timur Tabi <timur@freescale.com> |
2404 | L: linuxppc-dev@ozlabs.org | 2415 | L: linuxppc-dev@lists.ozlabs.org |
2405 | S: Supported | 2416 | S: Supported |
2406 | F: arch/powerpc/sysdev/qe_lib/ | 2417 | F: arch/powerpc/sysdev/qe_lib/ |
2407 | F: arch/powerpc/include/asm/*qe.h | 2418 | F: arch/powerpc/include/asm/*qe.h |
@@ -2409,27 +2420,27 @@ F: arch/powerpc/include/asm/*qe.h | |||
2409 | FREESCALE USB PERIPHERAL DRIVERS | 2420 | FREESCALE USB PERIPHERAL DRIVERS |
2410 | M: Li Yang <leoli@freescale.com> | 2421 | M: Li Yang <leoli@freescale.com> |
2411 | L: linux-usb@vger.kernel.org | 2422 | L: linux-usb@vger.kernel.org |
2412 | L: linuxppc-dev@ozlabs.org | 2423 | L: linuxppc-dev@lists.ozlabs.org |
2413 | S: Maintained | 2424 | S: Maintained |
2414 | F: drivers/usb/gadget/fsl* | 2425 | F: drivers/usb/gadget/fsl* |
2415 | 2426 | ||
2416 | FREESCALE QUICC ENGINE UCC ETHERNET DRIVER | 2427 | FREESCALE QUICC ENGINE UCC ETHERNET DRIVER |
2417 | M: Li Yang <leoli@freescale.com> | 2428 | M: Li Yang <leoli@freescale.com> |
2418 | L: netdev@vger.kernel.org | 2429 | L: netdev@vger.kernel.org |
2419 | L: linuxppc-dev@ozlabs.org | 2430 | L: linuxppc-dev@lists.ozlabs.org |
2420 | S: Maintained | 2431 | S: Maintained |
2421 | F: drivers/net/ucc_geth* | 2432 | F: drivers/net/ucc_geth* |
2422 | 2433 | ||
2423 | FREESCALE QUICC ENGINE UCC UART DRIVER | 2434 | FREESCALE QUICC ENGINE UCC UART DRIVER |
2424 | M: Timur Tabi <timur@freescale.com> | 2435 | M: Timur Tabi <timur@freescale.com> |
2425 | L: linuxppc-dev@ozlabs.org | 2436 | L: linuxppc-dev@lists.ozlabs.org |
2426 | S: Supported | 2437 | S: Supported |
2427 | F: drivers/serial/ucc_uart.c | 2438 | F: drivers/serial/ucc_uart.c |
2428 | 2439 | ||
2429 | FREESCALE SOC SOUND DRIVERS | 2440 | FREESCALE SOC SOUND DRIVERS |
2430 | M: Timur Tabi <timur@freescale.com> | 2441 | M: Timur Tabi <timur@freescale.com> |
2431 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 2442 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
2432 | L: linuxppc-dev@ozlabs.org | 2443 | L: linuxppc-dev@lists.ozlabs.org |
2433 | S: Supported | 2444 | S: Supported |
2434 | F: sound/soc/fsl/fsl* | 2445 | F: sound/soc/fsl/fsl* |
2435 | F: sound/soc/fsl/mpc8610_hpcd.c | 2446 | F: sound/soc/fsl/mpc8610_hpcd.c |
@@ -2564,7 +2575,7 @@ F: mm/memory-failure.c | |||
2564 | F: mm/hwpoison-inject.c | 2575 | F: mm/hwpoison-inject.c |
2565 | 2576 | ||
2566 | HYPERVISOR VIRTUAL CONSOLE DRIVER | 2577 | HYPERVISOR VIRTUAL CONSOLE DRIVER |
2567 | L: linuxppc-dev@ozlabs.org | 2578 | L: linuxppc-dev@lists.ozlabs.org |
2568 | S: Odd Fixes | 2579 | S: Odd Fixes |
2569 | F: drivers/char/hvc_* | 2580 | F: drivers/char/hvc_* |
2570 | 2581 | ||
@@ -3476,7 +3487,7 @@ F: drivers/usb/misc/legousbtower.c | |||
3476 | 3487 | ||
3477 | LGUEST | 3488 | LGUEST |
3478 | M: Rusty Russell <rusty@rustcorp.com.au> | 3489 | M: Rusty Russell <rusty@rustcorp.com.au> |
3479 | L: lguest@ozlabs.org | 3490 | L: lguest@lists.ozlabs.org |
3480 | W: http://lguest.ozlabs.org/ | 3491 | W: http://lguest.ozlabs.org/ |
3481 | S: Maintained | 3492 | S: Maintained |
3482 | F: Documentation/lguest/ | 3493 | F: Documentation/lguest/ |
@@ -3495,7 +3506,7 @@ LINUX FOR POWERPC (32-BIT AND 64-BIT) | |||
3495 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> | 3506 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
3496 | M: Paul Mackerras <paulus@samba.org> | 3507 | M: Paul Mackerras <paulus@samba.org> |
3497 | W: http://www.penguinppc.org/ | 3508 | W: http://www.penguinppc.org/ |
3498 | L: linuxppc-dev@ozlabs.org | 3509 | L: linuxppc-dev@lists.ozlabs.org |
3499 | Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ | 3510 | Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ |
3500 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git | 3511 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git |
3501 | S: Supported | 3512 | S: Supported |
@@ -3505,14 +3516,14 @@ F: arch/powerpc/ | |||
3505 | LINUX FOR POWER MACINTOSH | 3516 | LINUX FOR POWER MACINTOSH |
3506 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> | 3517 | M: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
3507 | W: http://www.penguinppc.org/ | 3518 | W: http://www.penguinppc.org/ |
3508 | L: linuxppc-dev@ozlabs.org | 3519 | L: linuxppc-dev@lists.ozlabs.org |
3509 | S: Maintained | 3520 | S: Maintained |
3510 | F: arch/powerpc/platforms/powermac/ | 3521 | F: arch/powerpc/platforms/powermac/ |
3511 | F: drivers/macintosh/ | 3522 | F: drivers/macintosh/ |
3512 | 3523 | ||
3513 | LINUX FOR POWERPC EMBEDDED MPC5XXX | 3524 | LINUX FOR POWERPC EMBEDDED MPC5XXX |
3514 | M: Grant Likely <grant.likely@secretlab.ca> | 3525 | M: Grant Likely <grant.likely@secretlab.ca> |
3515 | L: linuxppc-dev@ozlabs.org | 3526 | L: linuxppc-dev@lists.ozlabs.org |
3516 | T: git git://git.secretlab.ca/git/linux-2.6.git | 3527 | T: git git://git.secretlab.ca/git/linux-2.6.git |
3517 | S: Maintained | 3528 | S: Maintained |
3518 | F: arch/powerpc/platforms/512x/ | 3529 | F: arch/powerpc/platforms/512x/ |
@@ -3522,7 +3533,7 @@ LINUX FOR POWERPC EMBEDDED PPC4XX | |||
3522 | M: Josh Boyer <jwboyer@linux.vnet.ibm.com> | 3533 | M: Josh Boyer <jwboyer@linux.vnet.ibm.com> |
3523 | M: Matt Porter <mporter@kernel.crashing.org> | 3534 | M: Matt Porter <mporter@kernel.crashing.org> |
3524 | W: http://www.penguinppc.org/ | 3535 | W: http://www.penguinppc.org/ |
3525 | L: linuxppc-dev@ozlabs.org | 3536 | L: linuxppc-dev@lists.ozlabs.org |
3526 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git | 3537 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git |
3527 | S: Maintained | 3538 | S: Maintained |
3528 | F: arch/powerpc/platforms/40x/ | 3539 | F: arch/powerpc/platforms/40x/ |
@@ -3531,7 +3542,7 @@ F: arch/powerpc/platforms/44x/ | |||
3531 | LINUX FOR POWERPC EMBEDDED XILINX VIRTEX | 3542 | LINUX FOR POWERPC EMBEDDED XILINX VIRTEX |
3532 | M: Grant Likely <grant.likely@secretlab.ca> | 3543 | M: Grant Likely <grant.likely@secretlab.ca> |
3533 | W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex | 3544 | W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex |
3534 | L: linuxppc-dev@ozlabs.org | 3545 | L: linuxppc-dev@lists.ozlabs.org |
3535 | T: git git://git.secretlab.ca/git/linux-2.6.git | 3546 | T: git git://git.secretlab.ca/git/linux-2.6.git |
3536 | S: Maintained | 3547 | S: Maintained |
3537 | F: arch/powerpc/*/*virtex* | 3548 | F: arch/powerpc/*/*virtex* |
@@ -3541,20 +3552,20 @@ LINUX FOR POWERPC EMBEDDED PPC8XX | |||
3541 | M: Vitaly Bordug <vitb@kernel.crashing.org> | 3552 | M: Vitaly Bordug <vitb@kernel.crashing.org> |
3542 | M: Marcelo Tosatti <marcelo@kvack.org> | 3553 | M: Marcelo Tosatti <marcelo@kvack.org> |
3543 | W: http://www.penguinppc.org/ | 3554 | W: http://www.penguinppc.org/ |
3544 | L: linuxppc-dev@ozlabs.org | 3555 | L: linuxppc-dev@lists.ozlabs.org |
3545 | S: Maintained | 3556 | S: Maintained |
3546 | F: arch/powerpc/platforms/8xx/ | 3557 | F: arch/powerpc/platforms/8xx/ |
3547 | 3558 | ||
3548 | LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX | 3559 | LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX |
3549 | M: Kumar Gala <galak@kernel.crashing.org> | 3560 | M: Kumar Gala <galak@kernel.crashing.org> |
3550 | W: http://www.penguinppc.org/ | 3561 | W: http://www.penguinppc.org/ |
3551 | L: linuxppc-dev@ozlabs.org | 3562 | L: linuxppc-dev@lists.ozlabs.org |
3552 | S: Maintained | 3563 | S: Maintained |
3553 | F: arch/powerpc/platforms/83xx/ | 3564 | F: arch/powerpc/platforms/83xx/ |
3554 | 3565 | ||
3555 | LINUX FOR POWERPC PA SEMI PWRFICIENT | 3566 | LINUX FOR POWERPC PA SEMI PWRFICIENT |
3556 | M: Olof Johansson <olof@lixom.net> | 3567 | M: Olof Johansson <olof@lixom.net> |
3557 | L: linuxppc-dev@ozlabs.org | 3568 | L: linuxppc-dev@lists.ozlabs.org |
3558 | S: Maintained | 3569 | S: Maintained |
3559 | F: arch/powerpc/platforms/pasemi/ | 3570 | F: arch/powerpc/platforms/pasemi/ |
3560 | F: drivers/*/*pasemi* | 3571 | F: drivers/*/*pasemi* |
@@ -4601,14 +4612,14 @@ F: drivers/ata/sata_promise.* | |||
4601 | PS3 NETWORK SUPPORT | 4612 | PS3 NETWORK SUPPORT |
4602 | M: Geoff Levand <geoff@infradead.org> | 4613 | M: Geoff Levand <geoff@infradead.org> |
4603 | L: netdev@vger.kernel.org | 4614 | L: netdev@vger.kernel.org |
4604 | L: cbe-oss-dev@ozlabs.org | 4615 | L: cbe-oss-dev@lists.ozlabs.org |
4605 | S: Maintained | 4616 | S: Maintained |
4606 | F: drivers/net/ps3_gelic_net.* | 4617 | F: drivers/net/ps3_gelic_net.* |
4607 | 4618 | ||
4608 | PS3 PLATFORM SUPPORT | 4619 | PS3 PLATFORM SUPPORT |
4609 | M: Geoff Levand <geoff@infradead.org> | 4620 | M: Geoff Levand <geoff@infradead.org> |
4610 | L: linuxppc-dev@ozlabs.org | 4621 | L: linuxppc-dev@lists.ozlabs.org |
4611 | L: cbe-oss-dev@ozlabs.org | 4622 | L: cbe-oss-dev@lists.ozlabs.org |
4612 | S: Maintained | 4623 | S: Maintained |
4613 | F: arch/powerpc/boot/ps3* | 4624 | F: arch/powerpc/boot/ps3* |
4614 | F: arch/powerpc/include/asm/lv1call.h | 4625 | F: arch/powerpc/include/asm/lv1call.h |
@@ -4622,7 +4633,7 @@ F: sound/ppc/snd_ps3* | |||
4622 | 4633 | ||
4623 | PS3VRAM DRIVER | 4634 | PS3VRAM DRIVER |
4624 | M: Jim Paris <jim@jtan.com> | 4635 | M: Jim Paris <jim@jtan.com> |
4625 | L: cbe-oss-dev@ozlabs.org | 4636 | L: cbe-oss-dev@lists.ozlabs.org |
4626 | S: Maintained | 4637 | S: Maintained |
4627 | F: drivers/block/ps3vram.c | 4638 | F: drivers/block/ps3vram.c |
4628 | 4639 | ||
@@ -5068,7 +5079,7 @@ F: drivers/mmc/host/sdhci.* | |||
5068 | 5079 | ||
5069 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) | 5080 | SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) |
5070 | M: Anton Vorontsov <avorontsov@ru.mvista.com> | 5081 | M: Anton Vorontsov <avorontsov@ru.mvista.com> |
5071 | L: linuxppc-dev@ozlabs.org | 5082 | L: linuxppc-dev@lists.ozlabs.org |
5072 | L: linux-mmc@vger.kernel.org | 5083 | L: linux-mmc@vger.kernel.org |
5073 | S: Maintained | 5084 | S: Maintained |
5074 | F: drivers/mmc/host/sdhci-of.* | 5085 | F: drivers/mmc/host/sdhci-of.* |
@@ -5485,8 +5496,8 @@ F: drivers/net/spider_net* | |||
5485 | 5496 | ||
5486 | SPU FILE SYSTEM | 5497 | SPU FILE SYSTEM |
5487 | M: Jeremy Kerr <jk@ozlabs.org> | 5498 | M: Jeremy Kerr <jk@ozlabs.org> |
5488 | L: linuxppc-dev@ozlabs.org | 5499 | L: linuxppc-dev@lists.ozlabs.org |
5489 | L: cbe-oss-dev@ozlabs.org | 5500 | L: cbe-oss-dev@lists.ozlabs.org |
5490 | W: http://www.ibm.com/developerworks/power/cell/ | 5501 | W: http://www.ibm.com/developerworks/power/cell/ |
5491 | S: Supported | 5502 | S: Supported |
5492 | F: Documentation/filesystems/spufs.txt | 5503 | F: Documentation/filesystems/spufs.txt |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 36 | 3 | SUBLEVEL = 36 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
5 | NAME = Sheep on Meth | 5 | NAME = Sheep on Meth |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1408,8 +1408,8 @@ checkstack: | |||
1408 | $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ | 1408 | $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ |
1409 | $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH) | 1409 | $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH) |
1410 | 1410 | ||
1411 | kernelrelease: include/config/kernel.release | 1411 | kernelrelease: |
1412 | @echo $(KERNELRELEASE) | 1412 | @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" |
1413 | 1413 | ||
1414 | kernelversion: | 1414 | kernelversion: |
1415 | @echo $(KERNELVERSION) | 1415 | @echo $(KERNELVERSION) |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 88e608aebc8c..842dba308eab 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -387,8 +387,9 @@ EXPORT_SYMBOL(dump_elf_task_fp); | |||
387 | * sys_execve() executes a new program. | 387 | * sys_execve() executes a new program. |
388 | */ | 388 | */ |
389 | asmlinkage int | 389 | asmlinkage int |
390 | do_sys_execve(const char __user *ufilename, char __user * __user *argv, | 390 | do_sys_execve(const char __user *ufilename, |
391 | char __user * __user *envp, struct pt_regs *regs) | 391 | const char __user *const __user *argv, |
392 | const char __user *const __user *envp, struct pt_regs *regs) | ||
392 | { | 393 | { |
393 | int error; | 394 | int error; |
394 | char *filename; | 395 | char *filename; |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 99b8200138d2..59c1ce858fc8 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -21,6 +21,9 @@ GZFLAGS :=-9 | |||
21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: | 21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: |
22 | KBUILD_CFLAGS +=$(call cc-option,-marm,) | 22 | KBUILD_CFLAGS +=$(call cc-option,-marm,) |
23 | 23 | ||
24 | # Never generate .eh_frame | ||
25 | KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) | ||
26 | |||
24 | # Do not use arch/arm/defconfig - it's always outdated. | 27 | # Do not use arch/arm/defconfig - it's always outdated. |
25 | # Select a platform tht is kept up-to-date | 28 | # Select a platform tht is kept up-to-date |
26 | KBUILD_DEFCONFIG := versatile_defconfig | 29 | KBUILD_DEFCONFIG := versatile_defconfig |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index c974be8913a7..7ce15eb15f72 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -158,15 +158,24 @@ struct pt_regs { | |||
158 | */ | 158 | */ |
159 | static inline int valid_user_regs(struct pt_regs *regs) | 159 | static inline int valid_user_regs(struct pt_regs *regs) |
160 | { | 160 | { |
161 | if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) { | 161 | unsigned long mode = regs->ARM_cpsr & MODE_MASK; |
162 | regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); | 162 | |
163 | return 1; | 163 | /* |
164 | * Always clear the F (FIQ) and A (delayed abort) bits | ||
165 | */ | ||
166 | regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); | ||
167 | |||
168 | if ((regs->ARM_cpsr & PSR_I_BIT) == 0) { | ||
169 | if (mode == USR_MODE) | ||
170 | return 1; | ||
171 | if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE) | ||
172 | return 1; | ||
164 | } | 173 | } |
165 | 174 | ||
166 | /* | 175 | /* |
167 | * Force CPSR to something logical... | 176 | * Force CPSR to something logical... |
168 | */ | 177 | */ |
169 | regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT; | 178 | regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; |
170 | if (!(elf_hwcap & HWCAP_26BIT)) | 179 | if (!(elf_hwcap & HWCAP_26BIT)) |
171 | regs->ARM_cpsr |= USR_MODE; | 180 | regs->ARM_cpsr |= USR_MODE; |
172 | 181 | ||
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index dd2bf53000fe..d02cfb683487 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -392,6 +392,7 @@ | |||
392 | #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) | 392 | #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) |
393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) | 393 | #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) |
394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) | 394 | #define __NR_recvmmsg (__NR_SYSCALL_BASE+365) |
395 | #define __NR_accept4 (__NR_SYSCALL_BASE+366) | ||
395 | 396 | ||
396 | /* | 397 | /* |
397 | * The following SWIs are ARM private. | 398 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 37ae301cc47c..afeb71fa72cb 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -375,6 +375,7 @@ | |||
375 | CALL(sys_rt_tgsigqueueinfo) | 375 | CALL(sys_rt_tgsigqueueinfo) |
376 | CALL(sys_perf_event_open) | 376 | CALL(sys_perf_event_open) |
377 | /* 365 */ CALL(sys_recvmmsg) | 377 | /* 365 */ CALL(sys_recvmmsg) |
378 | CALL(sys_accept4) | ||
378 | #ifndef syscalls_counted | 379 | #ifndef syscalls_counted |
379 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 380 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
380 | #define syscalls_counted | 381 | #define syscalls_counted |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 778c2f7024ff..d6e8b4d2e60d 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) | |||
79 | return; | 79 | return; |
80 | 80 | ||
81 | /* Initialize to zero */ | 81 | /* Initialize to zero */ |
82 | for (regno = 0; regno < GDB_MAX_REGS; regno++) | 82 | for (regno = 0; regno < DBG_MAX_REG_NUM; regno++) |
83 | gdb_regs[regno] = 0; | 83 | gdb_regs[regno] = 0; |
84 | 84 | ||
85 | /* Otherwise, we have only some registers from switch_to() */ | 85 | /* Otherwise, we have only some registers from switch_to() */ |
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 5b7c541a4c63..62e7c61d0342 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c | |||
@@ -62,8 +62,9 @@ asmlinkage int sys_vfork(struct pt_regs *regs) | |||
62 | /* sys_execve() executes a new program. | 62 | /* sys_execve() executes a new program. |
63 | * This is called indirectly via a small wrapper | 63 | * This is called indirectly via a small wrapper |
64 | */ | 64 | */ |
65 | asmlinkage int sys_execve(const char __user *filenamei, char __user * __user *argv, | 65 | asmlinkage int sys_execve(const char __user *filenamei, |
66 | char __user * __user *envp, struct pt_regs *regs) | 66 | const char __user *const __user *argv, |
67 | const char __user *const __user *envp, struct pt_regs *regs) | ||
67 | { | 68 | { |
68 | int error; | 69 | int error; |
69 | char * filename; | 70 | char * filename; |
@@ -78,14 +79,17 @@ out: | |||
78 | return error; | 79 | return error; |
79 | } | 80 | } |
80 | 81 | ||
81 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 82 | int kernel_execve(const char *filename, |
83 | const char *const argv[], | ||
84 | const char *const envp[]) | ||
82 | { | 85 | { |
83 | struct pt_regs regs; | 86 | struct pt_regs regs; |
84 | int ret; | 87 | int ret; |
85 | 88 | ||
86 | memset(®s, 0, sizeof(struct pt_regs)); | 89 | memset(®s, 0, sizeof(struct pt_regs)); |
87 | ret = do_execve(filename, (char __user * __user *)argv, | 90 | ret = do_execve(filename, |
88 | (char __user * __user *)envp, ®s); | 91 | (const char __user *const __user *)argv, |
92 | (const char __user *const __user *)envp, ®s); | ||
89 | if (ret < 0) | 93 | if (ret < 0) |
90 | goto out; | 94 | goto out; |
91 | 95 | ||
diff --git a/arch/arm/plat-samsung/dev-hsmmc.c b/arch/arm/plat-samsung/dev-hsmmc.c index b0f93f11e281..9d2be0941410 100644 --- a/arch/arm/plat-samsung/dev-hsmmc.c +++ b/arch/arm/plat-samsung/dev-hsmmc.c | |||
@@ -70,4 +70,6 @@ void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd) | |||
70 | set->cfg_gpio = pd->cfg_gpio; | 70 | set->cfg_gpio = pd->cfg_gpio; |
71 | if (pd->cfg_card) | 71 | if (pd->cfg_card) |
72 | set->cfg_card = pd->cfg_card; | 72 | set->cfg_card = pd->cfg_card; |
73 | if (pd->host_caps) | ||
74 | set->host_caps = pd->host_caps; | ||
73 | } | 75 | } |
diff --git a/arch/arm/plat-samsung/dev-hsmmc1.c b/arch/arm/plat-samsung/dev-hsmmc1.c index 1504fd802865..a6c8295840af 100644 --- a/arch/arm/plat-samsung/dev-hsmmc1.c +++ b/arch/arm/plat-samsung/dev-hsmmc1.c | |||
@@ -70,4 +70,6 @@ void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd) | |||
70 | set->cfg_gpio = pd->cfg_gpio; | 70 | set->cfg_gpio = pd->cfg_gpio; |
71 | if (pd->cfg_card) | 71 | if (pd->cfg_card) |
72 | set->cfg_card = pd->cfg_card; | 72 | set->cfg_card = pd->cfg_card; |
73 | if (pd->host_caps) | ||
74 | set->host_caps = pd->host_caps; | ||
73 | } | 75 | } |
diff --git a/arch/arm/plat-samsung/dev-hsmmc2.c b/arch/arm/plat-samsung/dev-hsmmc2.c index b28ef173444d..cb0d7143381a 100644 --- a/arch/arm/plat-samsung/dev-hsmmc2.c +++ b/arch/arm/plat-samsung/dev-hsmmc2.c | |||
@@ -71,4 +71,6 @@ void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd) | |||
71 | set->cfg_gpio = pd->cfg_gpio; | 71 | set->cfg_gpio = pd->cfg_gpio; |
72 | if (pd->cfg_card) | 72 | if (pd->cfg_card) |
73 | set->cfg_card = pd->cfg_card; | 73 | set->cfg_card = pd->cfg_card; |
74 | if (pd->host_caps) | ||
75 | set->host_caps = pd->host_caps; | ||
74 | } | 76 | } |
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index e5daddff397d..9c46aaad11ce 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c | |||
@@ -384,8 +384,9 @@ asmlinkage int sys_vfork(struct pt_regs *regs) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | asmlinkage int sys_execve(const char __user *ufilename, | 386 | asmlinkage int sys_execve(const char __user *ufilename, |
387 | char __user *__user *uargv, | 387 | const char __user *const __user *uargv, |
388 | char __user *__user *uenvp, struct pt_regs *regs) | 388 | const char __user *const __user *uenvp, |
389 | struct pt_regs *regs) | ||
389 | { | 390 | { |
390 | int error; | 391 | int error; |
391 | char *filename; | 392 | char *filename; |
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c index 459349b5ed5a..62635a09ae3e 100644 --- a/arch/avr32/kernel/sys_avr32.c +++ b/arch/avr32/kernel/sys_avr32.c | |||
@@ -7,7 +7,9 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/unistd.h> | 8 | #include <linux/unistd.h> |
9 | 9 | ||
10 | int kernel_execve(const char *file, char **argv, char **envp) | 10 | int kernel_execve(const char *file, |
11 | const char *const *argv, | ||
12 | const char *const *envp) | ||
11 | { | 13 | { |
12 | register long scno asm("r8") = __NR_execve; | 14 | register long scno asm("r8") = __NR_execve; |
13 | register long sc1 asm("r12") = (long)file; | 15 | register long sc1 asm("r12") = (long)file; |
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index d5872cd967ab..3f7ef4d97791 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h | |||
@@ -22,7 +22,9 @@ | |||
22 | 22 | ||
23 | #include <asm-generic/bitops/sched.h> | 23 | #include <asm-generic/bitops/sched.h> |
24 | #include <asm-generic/bitops/ffs.h> | 24 | #include <asm-generic/bitops/ffs.h> |
25 | #include <asm-generic/bitops/const_hweight.h> | ||
25 | #include <asm-generic/bitops/lock.h> | 26 | #include <asm-generic/bitops/lock.h> |
27 | |||
26 | #include <asm-generic/bitops/ext2-non-atomic.h> | 28 | #include <asm-generic/bitops/ext2-non-atomic.h> |
27 | #include <asm-generic/bitops/ext2-atomic.h> | 29 | #include <asm-generic/bitops/ext2-atomic.h> |
28 | #include <asm-generic/bitops/minix.h> | 30 | #include <asm-generic/bitops/minix.h> |
@@ -115,7 +117,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
115 | * of bits set) of a N-bit word | 117 | * of bits set) of a N-bit word |
116 | */ | 118 | */ |
117 | 119 | ||
118 | static inline unsigned int hweight32(unsigned int w) | 120 | static inline unsigned int __arch_hweight32(unsigned int w) |
119 | { | 121 | { |
120 | unsigned int res; | 122 | unsigned int res; |
121 | 123 | ||
@@ -125,19 +127,20 @@ static inline unsigned int hweight32(unsigned int w) | |||
125 | return res; | 127 | return res; |
126 | } | 128 | } |
127 | 129 | ||
128 | static inline unsigned int hweight64(__u64 w) | 130 | static inline unsigned int __arch_hweight64(__u64 w) |
129 | { | 131 | { |
130 | return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); | 132 | return __arch_hweight32((unsigned int)(w >> 32)) + |
133 | __arch_hweight32((unsigned int)w); | ||
131 | } | 134 | } |
132 | 135 | ||
133 | static inline unsigned int hweight16(unsigned int w) | 136 | static inline unsigned int __arch_hweight16(unsigned int w) |
134 | { | 137 | { |
135 | return hweight32(w & 0xffff); | 138 | return __arch_hweight32(w & 0xffff); |
136 | } | 139 | } |
137 | 140 | ||
138 | static inline unsigned int hweight8(unsigned int w) | 141 | static inline unsigned int __arch_hweight8(unsigned int w) |
139 | { | 142 | { |
140 | return hweight32(w & 0xff); | 143 | return __arch_hweight32(w & 0xff); |
141 | } | 144 | } |
142 | 145 | ||
143 | #endif /* _BLACKFIN_BITOPS_H */ | 146 | #endif /* _BLACKFIN_BITOPS_H */ |
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index 22886cbdae7a..14fcd254b185 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h | |||
@@ -389,8 +389,11 @@ | |||
389 | #define __NR_rt_tgsigqueueinfo 368 | 389 | #define __NR_rt_tgsigqueueinfo 368 |
390 | #define __NR_perf_event_open 369 | 390 | #define __NR_perf_event_open 369 |
391 | #define __NR_recvmmsg 370 | 391 | #define __NR_recvmmsg 370 |
392 | #define __NR_fanotify_init 371 | ||
393 | #define __NR_fanotify_mark 372 | ||
394 | #define __NR_prlimit64 373 | ||
392 | 395 | ||
393 | #define __NR_syscall 371 | 396 | #define __NR_syscall 374 |
394 | #define NR_syscalls __NR_syscall | 397 | #define NR_syscalls __NR_syscall |
395 | 398 | ||
396 | /* Old optional stuff no one actually uses */ | 399 | /* Old optional stuff no one actually uses */ |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index a566f61c002a..01f98cb964d2 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -209,7 +209,9 @@ copy_thread(unsigned long clone_flags, | |||
209 | /* | 209 | /* |
210 | * sys_execve() executes a new program. | 210 | * sys_execve() executes a new program. |
211 | */ | 211 | */ |
212 | asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp) | 212 | asmlinkage int sys_execve(const char __user *name, |
213 | const char __user *const __user *argv, | ||
214 | const char __user *const __user *envp) | ||
213 | { | 215 | { |
214 | int error; | 216 | int error; |
215 | char *filename; | 217 | char *filename; |
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index a5847f5d67c7..af1bffa21dc1 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -1628,6 +1628,9 @@ ENTRY(_sys_call_table) | |||
1628 | .long _sys_rt_tgsigqueueinfo | 1628 | .long _sys_rt_tgsigqueueinfo |
1629 | .long _sys_perf_event_open | 1629 | .long _sys_perf_event_open |
1630 | .long _sys_recvmmsg /* 370 */ | 1630 | .long _sys_recvmmsg /* 370 */ |
1631 | .long _sys_fanotify_init | ||
1632 | .long _sys_fanotify_mark | ||
1633 | .long _sys_prlimit64 | ||
1631 | 1634 | ||
1632 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1635 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1633 | .long _sys_ni_syscall | 1636 | .long _sys_ni_syscall |
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index 93f0f64b1326..9a57db6907f5 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c | |||
@@ -204,7 +204,9 @@ asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long | |||
204 | /* | 204 | /* |
205 | * sys_execve() executes a new program. | 205 | * sys_execve() executes a new program. |
206 | */ | 206 | */ |
207 | asmlinkage int sys_execve(const char *fname, char **argv, char **envp, | 207 | asmlinkage int sys_execve(const char *fname, |
208 | const char *const *argv, | ||
209 | const char *const *envp, | ||
208 | long r13, long mof, long srp, | 210 | long r13, long mof, long srp, |
209 | struct pt_regs *regs) | 211 | struct pt_regs *regs) |
210 | { | 212 | { |
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index 2661a9529d70..562f84718906 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c | |||
@@ -218,8 +218,10 @@ sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp, | |||
218 | 218 | ||
219 | /* sys_execve() executes a new program. */ | 219 | /* sys_execve() executes a new program. */ |
220 | asmlinkage int | 220 | asmlinkage int |
221 | sys_execve(const char *fname, char **argv, char **envp, long r13, long mof, long srp, | 221 | sys_execve(const char *fname, |
222 | struct pt_regs *regs) | 222 | const char *const *argv, |
223 | const char *const *envp, long r13, long mof, long srp, | ||
224 | struct pt_regs *regs) | ||
223 | { | 225 | { |
224 | int error; | 226 | int error; |
225 | char *filename; | 227 | char *filename; |
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 428931cf2f0c..2b63b0191f52 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c | |||
@@ -250,8 +250,9 @@ int copy_thread(unsigned long clone_flags, | |||
250 | /* | 250 | /* |
251 | * sys_execve() executes a new program. | 251 | * sys_execve() executes a new program. |
252 | */ | 252 | */ |
253 | asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, | 253 | asmlinkage int sys_execve(const char __user *name, |
254 | char __user * __user *envp) | 254 | const char __user *const __user *argv, |
255 | const char __user *const __user *envp) | ||
255 | { | 256 | { |
256 | int error; | 257 | int error; |
257 | char * filename; | 258 | char * filename; |
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 8b7b78d77d5c..97478138e361 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c | |||
@@ -212,7 +212,10 @@ int copy_thread(unsigned long clone_flags, | |||
212 | /* | 212 | /* |
213 | * sys_execve() executes a new program. | 213 | * sys_execve() executes a new program. |
214 | */ | 214 | */ |
215 | asmlinkage int sys_execve(const char *name, char **argv, char **envp,int dummy,...) | 215 | asmlinkage int sys_execve(const char *name, |
216 | const char *const *argv, | ||
217 | const char *const *envp, | ||
218 | int dummy, ...) | ||
216 | { | 219 | { |
217 | int error; | 220 | int error; |
218 | char * filename; | 221 | char * filename; |
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c index f9b3f44da69f..dc1ac0243b78 100644 --- a/arch/h8300/kernel/sys_h8300.c +++ b/arch/h8300/kernel/sys_h8300.c | |||
@@ -51,7 +51,9 @@ asmlinkage void syscall_print(void *dummy,...) | |||
51 | * Do a system call from kernel instead of calling sys_execve so we | 51 | * Do a system call from kernel instead of calling sys_execve so we |
52 | * end up with proper pt_regs. | 52 | * end up with proper pt_regs. |
53 | */ | 53 | */ |
54 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 54 | int kernel_execve(const char *filename, |
55 | const char *const argv[], | ||
56 | const char *const envp[]) | ||
55 | { | 57 | { |
56 | register long res __asm__("er0"); | 58 | register long res __asm__("er0"); |
57 | register char *const *_c __asm__("er3") = envp; | 59 | register char *const *_c __asm__("er3") = envp; |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 87f1bd1efc82..954d398a54b4 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -356,8 +356,6 @@ asmlinkage unsigned long sys_mmap2( | |||
356 | int fd, long pgoff); | 356 | int fd, long pgoff); |
357 | struct pt_regs; | 357 | struct pt_regs; |
358 | struct sigaction; | 358 | struct sigaction; |
359 | long sys_execve(const char __user *filename, char __user * __user *argv, | ||
360 | char __user * __user *envp, struct pt_regs *regs); | ||
361 | asmlinkage long sys_ia64_pipe(void); | 359 | asmlinkage long sys_ia64_pipe(void); |
362 | asmlinkage long sys_rt_sigaction(int sig, | 360 | asmlinkage long sys_rt_sigaction(int sig, |
363 | const struct sigaction __user *act, | 361 | const struct sigaction __user *act, |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a879c03b7f1c..16f1c7b04c69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -633,7 +633,9 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) | |||
633 | } | 633 | } |
634 | 634 | ||
635 | long | 635 | long |
636 | sys_execve (const char __user *filename, char __user * __user *argv, char __user * __user *envp, | 636 | sys_execve (const char __user *filename, |
637 | const char __user *const __user *argv, | ||
638 | const char __user *const __user *envp, | ||
637 | struct pt_regs *regs) | 639 | struct pt_regs *regs) |
638 | { | 640 | { |
639 | char *fname; | 641 | char *fname; |
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 8665a4d868ec..422bea9f1dbc 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -289,8 +289,8 @@ asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2, | |||
289 | * sys_execve() executes a new program. | 289 | * sys_execve() executes a new program. |
290 | */ | 290 | */ |
291 | asmlinkage int sys_execve(const char __user *ufilename, | 291 | asmlinkage int sys_execve(const char __user *ufilename, |
292 | char __user * __user *uargv, | 292 | const char __user *const __user *uargv, |
293 | char __user * __user *uenvp, | 293 | const char __user *const __user *uenvp, |
294 | unsigned long r3, unsigned long r4, unsigned long r5, | 294 | unsigned long r3, unsigned long r4, unsigned long r5, |
295 | unsigned long r6, struct pt_regs regs) | 295 | unsigned long r6, struct pt_regs regs) |
296 | { | 296 | { |
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index 0a00f467edfa..d841fb6cc703 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c | |||
@@ -93,7 +93,9 @@ asmlinkage int sys_cachectl(char *addr, int nbytes, int op) | |||
93 | * Do a system call from kernel instead of calling sys_execve so we | 93 | * Do a system call from kernel instead of calling sys_execve so we |
94 | * end up with proper pt_regs. | 94 | * end up with proper pt_regs. |
95 | */ | 95 | */ |
96 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 96 | int kernel_execve(const char *filename, |
97 | const char *const argv[], | ||
98 | const char *const envp[]) | ||
97 | { | 99 | { |
98 | register long __scno __asm__ ("r7") = __NR_execve; | 100 | register long __scno __asm__ ("r7") = __NR_execve; |
99 | register long __arg3 __asm__ ("r2") = (long)(envp); | 101 | register long __arg3 __asm__ ("r2") = (long)(envp); |
diff --git a/arch/m68k/include/asm/ide.h b/arch/m68k/include/asm/ide.h index 3958726664ba..492fee8a1ab2 100644 --- a/arch/m68k/include/asm/ide.h +++ b/arch/m68k/include/asm/ide.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-m68k/ide.h | ||
3 | * | ||
4 | * Copyright (C) 1994-1996 Linus Torvalds & authors | 2 | * Copyright (C) 1994-1996 Linus Torvalds & authors |
5 | */ | 3 | */ |
6 | 4 | ||
@@ -34,6 +32,8 @@ | |||
34 | #include <asm/io.h> | 32 | #include <asm/io.h> |
35 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
36 | 34 | ||
35 | #ifdef CONFIG_MMU | ||
36 | |||
37 | /* | 37 | /* |
38 | * Get rid of defs from io.h - ide has its private and conflicting versions | 38 | * Get rid of defs from io.h - ide has its private and conflicting versions |
39 | * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we | 39 | * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we |
@@ -53,5 +53,14 @@ | |||
53 | #define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n) | 53 | #define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n) |
54 | #define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n) | 54 | #define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n) |
55 | 55 | ||
56 | #else | ||
57 | |||
58 | #define __ide_mm_insw(port, addr, n) io_insw((unsigned int)port, addr, n) | ||
59 | #define __ide_mm_insl(port, addr, n) io_insl((unsigned int)port, addr, n) | ||
60 | #define __ide_mm_outsw(port, addr, n) io_outsw((unsigned int)port, addr, n) | ||
61 | #define __ide_mm_outsl(port, addr, n) io_outsl((unsigned int)port, addr, n) | ||
62 | |||
63 | #endif /* CONFIG_MMU */ | ||
64 | |||
56 | #endif /* __KERNEL__ */ | 65 | #endif /* __KERNEL__ */ |
57 | #endif /* _M68K_IDE_H */ | 66 | #endif /* _M68K_IDE_H */ |
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 221d0b71ce39..18732ab23292 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c | |||
@@ -315,7 +315,9 @@ EXPORT_SYMBOL(dump_fpu); | |||
315 | /* | 315 | /* |
316 | * sys_execve() executes a new program. | 316 | * sys_execve() executes a new program. |
317 | */ | 317 | */ |
318 | asmlinkage int sys_execve(const char __user *name, char __user * __user *argv, char __user * __user *envp) | 318 | asmlinkage int sys_execve(const char __user *name, |
319 | const char __user *const __user *argv, | ||
320 | const char __user *const __user *envp) | ||
319 | { | 321 | { |
320 | int error; | 322 | int error; |
321 | char * filename; | 323 | char * filename; |
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 77896692eb0a..2f431ece7b5f 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c | |||
@@ -459,7 +459,9 @@ asmlinkage int sys_getpagesize(void) | |||
459 | * Do a system call from kernel instead of calling sys_execve so we | 459 | * Do a system call from kernel instead of calling sys_execve so we |
460 | * end up with proper pt_regs. | 460 | * end up with proper pt_regs. |
461 | */ | 461 | */ |
462 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 462 | int kernel_execve(const char *filename, |
463 | const char *const argv[], | ||
464 | const char *const envp[]) | ||
463 | { | 465 | { |
464 | register long __res asm ("%d0") = __NR_execve; | 466 | register long __res asm ("%d0") = __NR_execve; |
465 | register long __a asm ("%d1") = (long)(filename); | 467 | register long __a asm ("%d1") = (long)(filename); |
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c index 6350f68cd026..6d3390590e5b 100644 --- a/arch/m68knommu/kernel/process.c +++ b/arch/m68knommu/kernel/process.c | |||
@@ -316,14 +316,14 @@ void dump(struct pt_regs *fp) | |||
316 | fp->d0, fp->d1, fp->d2, fp->d3); | 316 | fp->d0, fp->d1, fp->d2, fp->d3); |
317 | printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", | 317 | printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", |
318 | fp->d4, fp->d5, fp->a0, fp->a1); | 318 | fp->d4, fp->d5, fp->a0, fp->a1); |
319 | printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n", | 319 | printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %p\n", |
320 | (unsigned int) rdusp(), (unsigned int) fp); | 320 | (unsigned int) rdusp(), fp); |
321 | 321 | ||
322 | printk(KERN_EMERG "\nCODE:"); | 322 | printk(KERN_EMERG "\nCODE:"); |
323 | tp = ((unsigned char *) fp->pc) - 0x20; | 323 | tp = ((unsigned char *) fp->pc) - 0x20; |
324 | for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { | 324 | for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { |
325 | if ((i % 0x10) == 0) | 325 | if ((i % 0x10) == 0) |
326 | printk(KERN_EMERG "%08x: ", (int) (tp + i)); | 326 | printk(KERN_EMERG "%p: ", tp + i); |
327 | printk("%08x ", (int) *sp++); | 327 | printk("%08x ", (int) *sp++); |
328 | } | 328 | } |
329 | printk(KERN_EMERG "\n"); | 329 | printk(KERN_EMERG "\n"); |
@@ -332,7 +332,7 @@ void dump(struct pt_regs *fp) | |||
332 | tp = ((unsigned char *) fp) - 0x40; | 332 | tp = ((unsigned char *) fp) - 0x40; |
333 | for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { | 333 | for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { |
334 | if ((i % 0x10) == 0) | 334 | if ((i % 0x10) == 0) |
335 | printk(KERN_EMERG "%08x: ", (int) (tp + i)); | 335 | printk(KERN_EMERG "%p: ", tp + i); |
336 | printk("%08x ", (int) *sp++); | 336 | printk("%08x ", (int) *sp++); |
337 | } | 337 | } |
338 | printk(KERN_EMERG "\n"); | 338 | printk(KERN_EMERG "\n"); |
@@ -341,7 +341,7 @@ void dump(struct pt_regs *fp) | |||
341 | tp = (unsigned char *) (rdusp() - 0x10); | 341 | tp = (unsigned char *) (rdusp() - 0x10); |
342 | for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) { | 342 | for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) { |
343 | if ((i % 0x10) == 0) | 343 | if ((i % 0x10) == 0) |
344 | printk(KERN_EMERG "%08x: ", (int) (tp + i)); | 344 | printk(KERN_EMERG "%p: ", tp + i); |
345 | printk("%08x ", (int) *sp++); | 345 | printk("%08x ", (int) *sp++); |
346 | } | 346 | } |
347 | printk(KERN_EMERG "\n"); | 347 | printk(KERN_EMERG "\n"); |
@@ -350,7 +350,9 @@ void dump(struct pt_regs *fp) | |||
350 | /* | 350 | /* |
351 | * sys_execve() executes a new program. | 351 | * sys_execve() executes a new program. |
352 | */ | 352 | */ |
353 | asmlinkage int sys_execve(const char *name, char **argv, char **envp) | 353 | asmlinkage int sys_execve(const char *name, |
354 | const char *const *argv, | ||
355 | const char *const *envp) | ||
354 | { | 356 | { |
355 | int error; | 357 | int error; |
356 | char * filename; | 358 | char * filename; |
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c index d65e9c4c930c..68488ae47f0a 100644 --- a/arch/m68knommu/kernel/sys_m68k.c +++ b/arch/m68knommu/kernel/sys_m68k.c | |||
@@ -44,7 +44,9 @@ asmlinkage int sys_getpagesize(void) | |||
44 | * Do a system call from kernel instead of calling sys_execve so we | 44 | * Do a system call from kernel instead of calling sys_execve so we |
45 | * end up with proper pt_regs. | 45 | * end up with proper pt_regs. |
46 | */ | 46 | */ |
47 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 47 | int kernel_execve(const char *filename, |
48 | const char *const argv[], | ||
49 | const char *const envp[]) | ||
48 | { | 50 | { |
49 | register long __res asm ("%d0") = __NR_execve; | 51 | register long __res asm ("%d0") = __NR_execve; |
50 | register long __a asm ("%d1") = (long)(filename); | 52 | register long __a asm ("%d1") = (long)(filename); |
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c index d33ba17601fa..99d9b61cccb5 100644 --- a/arch/microblaze/kernel/prom_parse.c +++ b/arch/microblaze/kernel/prom_parse.c | |||
@@ -73,7 +73,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | |||
73 | /* We can only get here if we hit a P2P bridge with no node, | 73 | /* We can only get here if we hit a P2P bridge with no node, |
74 | * let's do standard swizzling and try again | 74 | * let's do standard swizzling and try again |
75 | */ | 75 | */ |
76 | lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); | 76 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); |
77 | pdev = ppdev; | 77 | pdev = ppdev; |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index 6abab6ebedbe..2250fe9d269b 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c | |||
@@ -47,8 +47,10 @@ asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs | |||
47 | return do_fork(flags, stack, regs, 0, NULL, NULL); | 47 | return do_fork(flags, stack, regs, 0, NULL, NULL); |
48 | } | 48 | } |
49 | 49 | ||
50 | asmlinkage long microblaze_execve(const char __user *filenamei, char __user *__user *argv, | 50 | asmlinkage long microblaze_execve(const char __user *filenamei, |
51 | char __user *__user *envp, struct pt_regs *regs) | 51 | const char __user *const __user *argv, |
52 | const char __user *const __user *envp, | ||
53 | struct pt_regs *regs) | ||
52 | { | 54 | { |
53 | int error; | 55 | int error; |
54 | char *filename; | 56 | char *filename; |
@@ -77,7 +79,9 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | |||
77 | * Do a system call from kernel instead of calling sys_execve so we | 79 | * Do a system call from kernel instead of calling sys_execve so we |
78 | * end up with proper pt_regs. | 80 | * end up with proper pt_regs. |
79 | */ | 81 | */ |
80 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 82 | int kernel_execve(const char *filename, |
83 | const char *const argv[], | ||
84 | const char *const envp[]) | ||
81 | { | 85 | { |
82 | register const char *__a __asm__("r5") = filename; | 86 | register const char *__a __asm__("r5") = filename; |
83 | register const void *__b __asm__("r6") = argv; | 87 | register const void *__b __asm__("r6") = argv; |
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 23be25fec4d6..55ef532f32be 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -27,10 +27,11 @@ | |||
27 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
28 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/of.h> | ||
31 | #include <linux/of_address.h> | ||
30 | 32 | ||
31 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
32 | #include <asm/io.h> | 34 | #include <asm/io.h> |
33 | #include <asm/prom.h> | ||
34 | #include <asm/pci-bridge.h> | 35 | #include <asm/pci-bridge.h> |
35 | #include <asm/byteorder.h> | 36 | #include <asm/byteorder.h> |
36 | 37 | ||
@@ -1077,7 +1078,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1077 | struct dev_archdata *sd = &dev->dev.archdata; | 1078 | struct dev_archdata *sd = &dev->dev.archdata; |
1078 | 1079 | ||
1079 | /* Setup OF node pointer in archdata */ | 1080 | /* Setup OF node pointer in archdata */ |
1080 | sd->of_node = pci_device_to_OF_node(dev); | 1081 | dev->dev.of_node = pci_device_to_OF_node(dev); |
1081 | 1082 | ||
1082 | /* Fixup NUMA node as it may not be setup yet by the generic | 1083 | /* Fixup NUMA node as it may not be setup yet by the generic |
1083 | * code and is needed by the DMA init | 1084 | * code and is needed by the DMA init |
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c index 7869a41b0f94..0687a42a5bd4 100644 --- a/arch/microblaze/pci/xilinx_pci.c +++ b/arch/microblaze/pci/xilinx_pci.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/ioport.h> | 17 | #include <linux/ioport.h> |
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/of_address.h> | ||
19 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
21 | 22 | ||
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index bddce0bca195..1dc6edff45e0 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -258,8 +258,10 @@ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) | |||
258 | error = PTR_ERR(filename); | 258 | error = PTR_ERR(filename); |
259 | if (IS_ERR(filename)) | 259 | if (IS_ERR(filename)) |
260 | goto out; | 260 | goto out; |
261 | error = do_execve(filename, (char __user *__user *) (long)regs.regs[5], | 261 | error = do_execve(filename, |
262 | (char __user *__user *) (long)regs.regs[6], ®s); | 262 | (const char __user *const __user *) (long)regs.regs[5], |
263 | (const char __user *const __user *) (long)regs.regs[6], | ||
264 | ®s); | ||
263 | putname(filename); | 265 | putname(filename); |
264 | 266 | ||
265 | out: | 267 | out: |
@@ -436,7 +438,9 @@ asmlinkage void bad_stack(void) | |||
436 | * Do a system call from kernel instead of calling sys_execve so we | 438 | * Do a system call from kernel instead of calling sys_execve so we |
437 | * end up with proper pt_regs. | 439 | * end up with proper pt_regs. |
438 | */ | 440 | */ |
439 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 441 | int kernel_execve(const char *filename, |
442 | const char *const argv[], | ||
443 | const char *const envp[]) | ||
440 | { | 444 | { |
441 | register unsigned long __a0 asm("$4") = (unsigned long) filename; | 445 | register unsigned long __a0 asm("$4") = (unsigned long) filename; |
442 | register unsigned long __a1 asm("$5") = (unsigned long) argv; | 446 | register unsigned long __a1 asm("$5") = (unsigned long) argv; |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 762eb325b949..f48373e2bc1c 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -269,8 +269,8 @@ asmlinkage long sys_vfork(void) | |||
269 | } | 269 | } |
270 | 270 | ||
271 | asmlinkage long sys_execve(const char __user *name, | 271 | asmlinkage long sys_execve(const char __user *name, |
272 | char __user * __user *argv, | 272 | const char __user *const __user *argv, |
273 | char __user * __user *envp) | 273 | const char __user *const __user *envp) |
274 | { | 274 | { |
275 | char *filename; | 275 | char *filename; |
276 | int error; | 276 | int error; |
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 4e34880bea03..159acb02cfd4 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c | |||
@@ -25,7 +25,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
25 | unsigned long addr; | 25 | unsigned long addr; |
26 | void *ret; | 26 | void *ret; |
27 | 27 | ||
28 | printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp); | 28 | pr_debug("dma_alloc_coherent(%s,%zu,%x)\n", |
29 | dev ? dev_name(dev) : "?", size, gfp); | ||
29 | 30 | ||
30 | if (0xbe000000 - pci_sram_allocated >= size) { | 31 | if (0xbe000000 - pci_sram_allocated >= size) { |
31 | size = (size + 255) & ~255; | 32 | size = (size + 255) & ~255; |
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 1444875a7611..0dc8543acb4f 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c | |||
@@ -41,8 +41,10 @@ int hpux_execve(struct pt_regs *regs) | |||
41 | if (IS_ERR(filename)) | 41 | if (IS_ERR(filename)) |
42 | goto out; | 42 | goto out; |
43 | 43 | ||
44 | error = do_execve(filename, (char __user * __user *) regs->gr[25], | 44 | error = do_execve(filename, |
45 | (char __user * __user *) regs->gr[24], regs); | 45 | (const char __user *const __user *) regs->gr[25], |
46 | (const char __user *const __user *) regs->gr[24], | ||
47 | regs); | ||
46 | 48 | ||
47 | putname(filename); | 49 | putname(filename); |
48 | 50 | ||
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 76332dadc6e9..4b4b9181a1a0 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -348,17 +348,22 @@ asmlinkage int sys_execve(struct pt_regs *regs) | |||
348 | error = PTR_ERR(filename); | 348 | error = PTR_ERR(filename); |
349 | if (IS_ERR(filename)) | 349 | if (IS_ERR(filename)) |
350 | goto out; | 350 | goto out; |
351 | error = do_execve(filename, (char __user * __user *) regs->gr[25], | 351 | error = do_execve(filename, |
352 | (char __user * __user *) regs->gr[24], regs); | 352 | (const char __user *const __user *) regs->gr[25], |
353 | (const char __user *const __user *) regs->gr[24], | ||
354 | regs); | ||
353 | putname(filename); | 355 | putname(filename); |
354 | out: | 356 | out: |
355 | 357 | ||
356 | return error; | 358 | return error; |
357 | } | 359 | } |
358 | 360 | ||
359 | extern int __execve(const char *filename, char *const argv[], | 361 | extern int __execve(const char *filename, |
360 | char *const envp[], struct task_struct *task); | 362 | const char *const argv[], |
361 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 363 | const char *const envp[], struct task_struct *task); |
364 | int kernel_execve(const char *filename, | ||
365 | const char *const argv[], | ||
366 | const char *const envp[]) | ||
362 | { | 367 | { |
363 | return __execve(filename, argv, envp, current); | 368 | return __execve(filename, argv, envp, current); |
364 | } | 369 | } |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index feacfb789686..91356ffda2ca 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1034,8 +1034,9 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | |||
1034 | flush_fp_to_thread(current); | 1034 | flush_fp_to_thread(current); |
1035 | flush_altivec_to_thread(current); | 1035 | flush_altivec_to_thread(current); |
1036 | flush_spe_to_thread(current); | 1036 | flush_spe_to_thread(current); |
1037 | error = do_execve(filename, (char __user * __user *) a1, | 1037 | error = do_execve(filename, |
1038 | (char __user * __user *) a2, regs); | 1038 | (const char __user *const __user *) a1, |
1039 | (const char __user *const __user *) a2, regs); | ||
1039 | putname(filename); | 1040 | putname(filename); |
1040 | out: | 1041 | out: |
1041 | return error; | 1042 | return error; |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 7eafaf2662b9..d3a2d1c6438e 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -267,8 +267,9 @@ asmlinkage void execve_tail(void) | |||
267 | /* | 267 | /* |
268 | * sys_execve() executes a new program. | 268 | * sys_execve() executes a new program. |
269 | */ | 269 | */ |
270 | SYSCALL_DEFINE3(execve, const char __user *, name, char __user * __user *, argv, | 270 | SYSCALL_DEFINE3(execve, const char __user *, name, |
271 | char __user * __user *, envp) | 271 | const char __user *const __user *, argv, |
272 | const char __user *const __user *, envp) | ||
272 | { | 273 | { |
273 | struct pt_regs *regs = task_pt_regs(current); | 274 | struct pt_regs *regs = task_pt_regs(current); |
274 | char *filename; | 275 | char *filename; |
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c index 651096ff8db4..e478bf9a7e91 100644 --- a/arch/score/kernel/sys_score.c +++ b/arch/score/kernel/sys_score.c | |||
@@ -99,8 +99,10 @@ score_execve(struct pt_regs *regs) | |||
99 | if (IS_ERR(filename)) | 99 | if (IS_ERR(filename)) |
100 | return error; | 100 | return error; |
101 | 101 | ||
102 | error = do_execve(filename, (char __user *__user*)regs->regs[5], | 102 | error = do_execve(filename, |
103 | (char __user *__user *) regs->regs[6], regs); | 103 | (const char __user *const __user *)regs->regs[5], |
104 | (const char __user *const __user *)regs->regs[6], | ||
105 | regs); | ||
104 | 106 | ||
105 | putname(filename); | 107 | putname(filename); |
106 | return error; | 108 | return error; |
@@ -110,7 +112,9 @@ score_execve(struct pt_regs *regs) | |||
110 | * Do a system call from kernel instead of calling sys_execve so we | 112 | * Do a system call from kernel instead of calling sys_execve so we |
111 | * end up with proper pt_regs. | 113 | * end up with proper pt_regs. |
112 | */ | 114 | */ |
113 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 115 | int kernel_execve(const char *filename, |
116 | const char *const argv[], | ||
117 | const char *const envp[]) | ||
114 | { | 118 | { |
115 | register unsigned long __r4 asm("r4") = (unsigned long) filename; | 119 | register unsigned long __r4 asm("r4") = (unsigned long) filename; |
116 | register unsigned long __r5 asm("r5") = (unsigned long) argv; | 120 | register unsigned long __r5 asm("r5") = (unsigned long) argv; |
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 052981972ae6..762a13984bbd 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -296,9 +296,10 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, | |||
296 | /* | 296 | /* |
297 | * sys_execve() executes a new program. | 297 | * sys_execve() executes a new program. |
298 | */ | 298 | */ |
299 | asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, | 299 | asmlinkage int sys_execve(const char __user *ufilename, |
300 | char __user * __user *uenvp, unsigned long r7, | 300 | const char __user *const __user *uargv, |
301 | struct pt_regs __regs) | 301 | const char __user *const __user *uenvp, |
302 | unsigned long r7, struct pt_regs __regs) | ||
302 | { | 303 | { |
303 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | 304 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
304 | int error; | 305 | int error; |
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 68d128d651b3..210c1cabcb7f 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -497,8 +497,8 @@ asmlinkage int sys_execve(const char *ufilename, char **uargv, | |||
497 | goto out; | 497 | goto out; |
498 | 498 | ||
499 | error = do_execve(filename, | 499 | error = do_execve(filename, |
500 | (char __user * __user *)uargv, | 500 | (const char __user *const __user *)uargv, |
501 | (char __user * __user *)uenvp, | 501 | (const char __user *const __user *)uenvp, |
502 | pregs); | 502 | pregs); |
503 | putname(filename); | 503 | putname(filename); |
504 | out: | 504 | out: |
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c index eb68bfdd86e6..f56b6fe5c5d0 100644 --- a/arch/sh/kernel/sys_sh32.c +++ b/arch/sh/kernel/sys_sh32.c | |||
@@ -71,7 +71,9 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, | |||
71 | * Do a system call from kernel instead of calling sys_execve so we | 71 | * Do a system call from kernel instead of calling sys_execve so we |
72 | * end up with proper pt_regs. | 72 | * end up with proper pt_regs. |
73 | */ | 73 | */ |
74 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 74 | int kernel_execve(const char *filename, |
75 | const char *const argv[], | ||
76 | const char *const envp[]) | ||
75 | { | 77 | { |
76 | register long __sc0 __asm__ ("r3") = __NR_execve; | 78 | register long __sc0 __asm__ ("r3") = __NR_execve; |
77 | register long __sc4 __asm__ ("r4") = (long) filename; | 79 | register long __sc4 __asm__ ("r4") = (long) filename; |
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c index 287235768bc5..c5a38c4bf410 100644 --- a/arch/sh/kernel/sys_sh64.c +++ b/arch/sh/kernel/sys_sh64.c | |||
@@ -33,7 +33,9 @@ | |||
33 | * Do a system call from kernel instead of calling sys_execve so we | 33 | * Do a system call from kernel instead of calling sys_execve so we |
34 | * end up with proper pt_regs. | 34 | * end up with proper pt_regs. |
35 | */ | 35 | */ |
36 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 36 | int kernel_execve(const char *filename, |
37 | const char *const argv[], | ||
38 | const char *const envp[]) | ||
37 | { | 39 | { |
38 | register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve); | 40 | register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve); |
39 | register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename; | 41 | register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename; |
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 2050ca02c423..f0c74227c737 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
@@ -25,9 +25,9 @@ extern void atomic_sub(int, atomic_t *); | |||
25 | extern void atomic64_sub(int, atomic64_t *); | 25 | extern void atomic64_sub(int, atomic64_t *); |
26 | 26 | ||
27 | extern int atomic_add_ret(int, atomic_t *); | 27 | extern int atomic_add_ret(int, atomic_t *); |
28 | extern int atomic64_add_ret(int, atomic64_t *); | 28 | extern long atomic64_add_ret(int, atomic64_t *); |
29 | extern int atomic_sub_ret(int, atomic_t *); | 29 | extern int atomic_sub_ret(int, atomic_t *); |
30 | extern int atomic64_sub_ret(int, atomic64_t *); | 30 | extern long atomic64_sub_ret(int, atomic64_t *); |
31 | 31 | ||
32 | #define atomic_dec_return(v) atomic_sub_ret(1, v) | 32 | #define atomic_dec_return(v) atomic_sub_ret(1, v) |
33 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) | 33 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) |
@@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
91 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | 91 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) |
92 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 92 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
93 | 93 | ||
94 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) | 94 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) |
95 | { | 95 | { |
96 | long c, old; | 96 | long c, old; |
97 | c = atomic64_read(v); | 97 | c = atomic64_read(v); |
diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h index e834880be204..2173432ad7f7 100644 --- a/arch/sparc/include/asm/fb.h +++ b/arch/sparc/include/asm/fb.h | |||
@@ -1,5 +1,6 @@ | |||
1 | #ifndef _SPARC_FB_H_ | 1 | #ifndef _SPARC_FB_H_ |
2 | #define _SPARC_FB_H_ | 2 | #define _SPARC_FB_H_ |
3 | #include <linux/console.h> | ||
3 | #include <linux/fb.h> | 4 | #include <linux/fb.h> |
4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
5 | #include <asm/page.h> | 6 | #include <asm/page.h> |
@@ -18,6 +19,9 @@ static inline int fb_is_primary_device(struct fb_info *info) | |||
18 | struct device *dev = info->device; | 19 | struct device *dev = info->device; |
19 | struct device_node *node; | 20 | struct device_node *node; |
20 | 21 | ||
22 | if (console_set_on_cmdline) | ||
23 | return 0; | ||
24 | |||
21 | node = dev->of_node; | 25 | node = dev->of_node; |
22 | if (node && | 26 | if (node && |
23 | node == of_console_device) | 27 | node == of_console_device) |
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h index a303c9d64d84..e4c61a18bb28 100644 --- a/arch/sparc/include/asm/rwsem-const.h +++ b/arch/sparc/include/asm/rwsem-const.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 5 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
6 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 6 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
7 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 7 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
8 | #define RWSEM_WAITING_BIAS 0xffff0000 | 8 | #define RWSEM_WAITING_BIAS (-0x00010000) |
9 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 9 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
10 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 10 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
11 | 11 | ||
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index d0b3b01ac9d4..03eb5a8f6f93 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h | |||
@@ -397,8 +397,11 @@ | |||
397 | #define __NR_rt_tgsigqueueinfo 326 | 397 | #define __NR_rt_tgsigqueueinfo 326 |
398 | #define __NR_perf_event_open 327 | 398 | #define __NR_perf_event_open 327 |
399 | #define __NR_recvmmsg 328 | 399 | #define __NR_recvmmsg 328 |
400 | #define __NR_fanotify_init 329 | ||
401 | #define __NR_fanotify_mark 330 | ||
402 | #define __NR_prlimit64 331 | ||
400 | 403 | ||
401 | #define NR_syscalls 329 | 404 | #define NR_syscalls 332 |
402 | 405 | ||
403 | #ifdef __32bit_syscall_numbers__ | 406 | #ifdef __32bit_syscall_numbers__ |
404 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, | 407 | /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 40e29fc8a4d6..17529298c50a 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -633,8 +633,10 @@ asmlinkage int sparc_execve(struct pt_regs *regs) | |||
633 | if(IS_ERR(filename)) | 633 | if(IS_ERR(filename)) |
634 | goto out; | 634 | goto out; |
635 | error = do_execve(filename, | 635 | error = do_execve(filename, |
636 | (char __user * __user *)regs->u_regs[base + UREG_I1], | 636 | (const char __user *const __user *) |
637 | (char __user * __user *)regs->u_regs[base + UREG_I2], | 637 | regs->u_regs[base + UREG_I1], |
638 | (const char __user *const __user *) | ||
639 | regs->u_regs[base + UREG_I2], | ||
638 | regs); | 640 | regs); |
639 | putname(filename); | 641 | putname(filename); |
640 | out: | 642 | out: |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index dbe81a368b45..485f54748384 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -739,9 +739,9 @@ asmlinkage int sparc_execve(struct pt_regs *regs) | |||
739 | if (IS_ERR(filename)) | 739 | if (IS_ERR(filename)) |
740 | goto out; | 740 | goto out; |
741 | error = do_execve(filename, | 741 | error = do_execve(filename, |
742 | (char __user * __user *) | 742 | (const char __user *const __user *) |
743 | regs->u_regs[base + UREG_I1], | 743 | regs->u_regs[base + UREG_I1], |
744 | (char __user * __user *) | 744 | (const char __user *const __user *) |
745 | regs->u_regs[base + UREG_I2], regs); | 745 | regs->u_regs[base + UREG_I2], regs); |
746 | putname(filename); | 746 | putname(filename); |
747 | if (!error) { | 747 | if (!error) { |
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 46a76ba3fb4b..44e5faf1ad5f 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S | |||
@@ -330,6 +330,15 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ | |||
330 | nop | 330 | nop |
331 | nop | 331 | nop |
332 | 332 | ||
333 | .globl sys32_fanotify_mark | ||
334 | sys32_fanotify_mark: | ||
335 | sethi %hi(sys_fanotify_mark), %g1 | ||
336 | sllx %o2, 32, %o2 | ||
337 | or %o2, %o3, %o2 | ||
338 | mov %o4, %o3 | ||
339 | jmpl %g1 + %lo(sys_fanotify_mark), %g0 | ||
340 | mov %o5, %o4 | ||
341 | |||
333 | .section __ex_table,"a" | 342 | .section __ex_table,"a" |
334 | .align 4 | 343 | .align 4 |
335 | .word 1b, __retl_efault, 2b, __retl_efault | 344 | .word 1b, __retl_efault, 2b, __retl_efault |
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index ee995b7dae7e..50794137d710 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c | |||
@@ -282,7 +282,9 @@ out: | |||
282 | * Do a system call from kernel instead of calling sys_execve so we | 282 | * Do a system call from kernel instead of calling sys_execve so we |
283 | * end up with proper pt_regs. | 283 | * end up with proper pt_regs. |
284 | */ | 284 | */ |
285 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 285 | int kernel_execve(const char *filename, |
286 | const char *const argv[], | ||
287 | const char *const envp[]) | ||
286 | { | 288 | { |
287 | long __res; | 289 | long __res; |
288 | register long __g1 __asm__ ("g1") = __NR_execve; | 290 | register long __g1 __asm__ ("g1") = __NR_execve; |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 3d435c42e6db..f836f4e93afe 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -758,7 +758,9 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, | |||
758 | * Do a system call from kernel instead of calling sys_execve so we | 758 | * Do a system call from kernel instead of calling sys_execve so we |
759 | * end up with proper pt_regs. | 759 | * end up with proper pt_regs. |
760 | */ | 760 | */ |
761 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 761 | int kernel_execve(const char *filename, |
762 | const char *const argv[], | ||
763 | const char *const envp[]) | ||
762 | { | 764 | { |
763 | long __res; | 765 | long __res; |
764 | register long __g1 __asm__ ("g1") = __NR_execve; | 766 | register long __g1 __asm__ ("g1") = __NR_execve; |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 801fc8e5a0e8..ec396e1916b9 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -82,5 +82,6 @@ sys_call_table: | |||
82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 82 | /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 83 | /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 84 | /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg | 85 | /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init |
86 | /*330*/ .long sys_fanotify_mark, sys_prlimit64 | ||
86 | 87 | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 9db058dd039e..8cfcaa549580 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -83,7 +83,8 @@ sys_call_table32: | |||
83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate | 83 | /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate |
84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 84 | .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv | 85 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv |
86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg | 86 | .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init |
87 | /*330*/ .word sys32_fanotify_mark, sys_prlimit64 | ||
87 | 88 | ||
88 | #endif /* CONFIG_COMPAT */ | 89 | #endif /* CONFIG_COMPAT */ |
89 | 90 | ||
@@ -158,4 +159,5 @@ sys_call_table: | |||
158 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate | 159 | /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate |
159 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 | 160 | .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 |
160 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv | 161 | /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv |
161 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg | 162 | .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init |
163 | /*330*/ .word sys_fanotify_mark, sys_prlimit64 | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index ed590ad0acdc..985cc28c74c5 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -543,8 +543,9 @@ long _sys_vfork(struct pt_regs *regs) | |||
543 | /* | 543 | /* |
544 | * sys_execve() executes a new program. | 544 | * sys_execve() executes a new program. |
545 | */ | 545 | */ |
546 | long _sys_execve(char __user *path, char __user *__user *argv, | 546 | long _sys_execve(const char __user *path, |
547 | char __user *__user *envp, struct pt_regs *regs) | 547 | const char __user *const __user *argv, |
548 | const char __user *const __user *envp, struct pt_regs *regs) | ||
548 | { | 549 | { |
549 | long error; | 550 | long error; |
550 | char *filename; | 551 | char *filename; |
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h index 17a2cb5a4178..1f469e80fdd3 100644 --- a/arch/um/include/asm/dma-mapping.h +++ b/arch/um/include/asm/dma-mapping.h | |||
@@ -95,13 +95,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
95 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 95 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
96 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 96 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
97 | 97 | ||
98 | static inline int | ||
99 | dma_get_cache_alignment(void) | ||
100 | { | ||
101 | BUG(); | ||
102 | return(0); | ||
103 | } | ||
104 | |||
105 | static inline void | 98 | static inline void |
106 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 99 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
107 | enum dma_data_direction direction) | 100 | enum dma_data_direction direction) |
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index 59b20d93b6d4..cd145eda3579 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -44,8 +44,9 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) | |||
44 | PT_REGS_SP(regs) = esp; | 44 | PT_REGS_SP(regs) = esp; |
45 | } | 45 | } |
46 | 46 | ||
47 | static long execve1(const char *file, char __user * __user *argv, | 47 | static long execve1(const char *file, |
48 | char __user *__user *env) | 48 | const char __user *const __user *argv, |
49 | const char __user *const __user *env) | ||
49 | { | 50 | { |
50 | long error; | 51 | long error; |
51 | 52 | ||
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index 7427c0b1930c..5ddb246626db 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c | |||
@@ -51,7 +51,9 @@ long old_mmap(unsigned long addr, unsigned long len, | |||
51 | return err; | 51 | return err; |
52 | } | 52 | } |
53 | 53 | ||
54 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 54 | int kernel_execve(const char *filename, |
55 | const char *const argv[], | ||
56 | const char *const envp[]) | ||
55 | { | 57 | { |
56 | mm_segment_t fs; | 58 | mm_segment_t fs; |
57 | int ret; | 59 | int ret; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a84fc34c8f77..cea0cd9a316f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS | |||
245 | 245 | ||
246 | config KTIME_SCALAR | 246 | config KTIME_SCALAR |
247 | def_bool X86_32 | 247 | def_bool X86_32 |
248 | |||
249 | config ARCH_CPU_PROBE_RELEASE | ||
250 | def_bool y | ||
251 | depends on HOTPLUG_CPU | ||
252 | |||
248 | source "init/Kconfig" | 253 | source "init/Kconfig" |
249 | source "kernel/Kconfig.freezer" | 254 | source "kernel/Kconfig.freezer" |
250 | 255 | ||
@@ -749,11 +754,11 @@ config IOMMU_API | |||
749 | def_bool (AMD_IOMMU || DMAR) | 754 | def_bool (AMD_IOMMU || DMAR) |
750 | 755 | ||
751 | config MAXSMP | 756 | config MAXSMP |
752 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" | 757 | bool "Enable Maximum number of SMP Processors and NUMA Nodes" |
753 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL | 758 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL |
754 | select CPUMASK_OFFSTACK | 759 | select CPUMASK_OFFSTACK |
755 | ---help--- | 760 | ---help--- |
756 | Configure maximum number of CPUS and NUMA Nodes for this architecture. | 761 | Enable maximum number of CPUS and NUMA Nodes for this architecture. |
757 | If unsure, say N. | 762 | If unsure, say N. |
758 | 763 | ||
759 | config NR_CPUS | 764 | config NR_CPUS |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 2984a25ff383..f686f49e8b7b 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -26,6 +26,7 @@ struct mm_struct; | |||
26 | struct vm_area_struct; | 26 | struct vm_area_struct; |
27 | 27 | ||
28 | extern pgd_t swapper_pg_dir[1024]; | 28 | extern pgd_t swapper_pg_dir[1024]; |
29 | extern pgd_t trampoline_pg_dir[1024]; | ||
29 | 30 | ||
30 | static inline void pgtable_cache_init(void) { } | 31 | static inline void pgtable_cache_init(void) { } |
31 | static inline void check_pgt_cache(void) { } | 32 | static inline void check_pgt_cache(void) { } |
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index feb2ff9bfc2d..f1d8b441fc77 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -23,8 +23,9 @@ long sys_iopl(unsigned int, struct pt_regs *); | |||
23 | /* kernel/process.c */ | 23 | /* kernel/process.c */ |
24 | int sys_fork(struct pt_regs *); | 24 | int sys_fork(struct pt_regs *); |
25 | int sys_vfork(struct pt_regs *); | 25 | int sys_vfork(struct pt_regs *); |
26 | long sys_execve(const char __user *, char __user * __user *, | 26 | long sys_execve(const char __user *, |
27 | char __user * __user *, struct pt_regs *); | 27 | const char __user *const __user *, |
28 | const char __user *const __user *, struct pt_regs *); | ||
28 | long sys_clone(unsigned long, unsigned long, void __user *, | 29 | long sys_clone(unsigned long, unsigned long, void __user *, |
29 | void __user *, struct pt_regs *); | 30 | void __user *, struct pt_regs *); |
30 | 31 | ||
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index cb507bb05d79..4dde797c0578 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h | |||
@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base; | |||
13 | 13 | ||
14 | extern unsigned long init_rsp; | 14 | extern unsigned long init_rsp; |
15 | extern unsigned long initial_code; | 15 | extern unsigned long initial_code; |
16 | extern unsigned long initial_page_table; | ||
16 | extern unsigned long initial_gs; | 17 | extern unsigned long initial_gs; |
17 | 18 | ||
18 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) | 19 | #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) |
19 | 20 | ||
20 | extern unsigned long setup_trampoline(void); | 21 | extern unsigned long setup_trampoline(void); |
22 | extern void __init setup_trampoline_page_table(void); | ||
21 | extern void __init reserve_trampoline_memory(void); | 23 | extern void __init reserve_trampoline_memory(void); |
22 | #else | 24 | #else |
23 | static inline void reserve_trampoline_memory(void) {}; | 25 | static inline void setup_trampoline_page_table(void) {} |
26 | static inline void reserve_trampoline_memory(void) {} | ||
24 | #endif /* CONFIG_X86_TRAMPOLINE */ | 27 | #endif /* CONFIG_X86_TRAMPOLINE */ |
25 | 28 | ||
26 | #endif /* __ASSEMBLY__ */ | 29 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 4dc0084ec1b1..f1efebaf5510 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1728 | struct irq_pin_list *entry; | 1728 | struct irq_pin_list *entry; |
1729 | 1729 | ||
1730 | cfg = desc->chip_data; | 1730 | cfg = desc->chip_data; |
1731 | if (!cfg) | ||
1732 | continue; | ||
1731 | entry = cfg->irq_2_pin; | 1733 | entry = cfg->irq_2_pin; |
1732 | if (!entry) | 1734 | if (!entry) |
1733 | continue; | 1735 | continue; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 60a57b13082d..ba5f62f45f01 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum) | |||
669 | } | 669 | } |
670 | 670 | ||
671 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | 671 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ |
672 | ms = (cpu->x86_model << 8) | cpu->x86_mask; | 672 | ms = (cpu->x86_model << 4) | cpu->x86_mask; |
673 | while ((range = *erratum++)) | 673 | while ((range = *erratum++)) |
674 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | 674 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && |
675 | (ms >= AMD_MODEL_RANGE_START(range)) && | 675 | (ms >= AMD_MODEL_RANGE_START(range)) && |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 214ac860ebe0..d8d86d014008 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added) | |||
491 | * Intel Errata AAP53 (model 30) | 491 | * Intel Errata AAP53 (model 30) |
492 | * Intel Errata BD53 (model 44) | 492 | * Intel Errata BD53 (model 44) |
493 | * | 493 | * |
494 | * These chips need to be 'reset' when adding counters by programming | 494 | * The official story: |
495 | * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 | 495 | * These chips need to be 'reset' when adding counters by programming the |
496 | * either in sequence on the same PMC or on different PMCs. | 496 | * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either |
497 | * in sequence on the same PMC or on different PMCs. | ||
498 | * | ||
499 | * In practise it appears some of these events do in fact count, and | ||
500 | * we need to programm all 4 events. | ||
497 | */ | 501 | */ |
498 | static void intel_pmu_nhm_enable_all(int added) | 502 | static void intel_pmu_nhm_workaround(void) |
499 | { | 503 | { |
500 | if (added) { | 504 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
501 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 505 | static const unsigned long nhm_magic[4] = { |
502 | int i; | 506 | 0x4300B5, |
507 | 0x4300D2, | ||
508 | 0x4300B1, | ||
509 | 0x4300B1 | ||
510 | }; | ||
511 | struct perf_event *event; | ||
512 | int i; | ||
513 | |||
514 | /* | ||
515 | * The Errata requires below steps: | ||
516 | * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; | ||
517 | * 2) Configure 4 PERFEVTSELx with the magic events and clear | ||
518 | * the corresponding PMCx; | ||
519 | * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; | ||
520 | * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; | ||
521 | * 5) Clear 4 pairs of ERFEVTSELx and PMCx; | ||
522 | */ | ||
523 | |||
524 | /* | ||
525 | * The real steps we choose are a little different from above. | ||
526 | * A) To reduce MSR operations, we don't run step 1) as they | ||
527 | * are already cleared before this function is called; | ||
528 | * B) Call x86_perf_event_update to save PMCx before configuring | ||
529 | * PERFEVTSELx with magic number; | ||
530 | * C) With step 5), we do clear only when the PERFEVTSELx is | ||
531 | * not used currently. | ||
532 | * D) Call x86_perf_event_set_period to restore PMCx; | ||
533 | */ | ||
503 | 534 | ||
504 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); | 535 | /* We always operate 4 pairs of PERF Counters */ |
505 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); | 536 | for (i = 0; i < 4; i++) { |
506 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); | 537 | event = cpuc->events[i]; |
538 | if (event) | ||
539 | x86_perf_event_update(event); | ||
540 | } | ||
507 | 541 | ||
508 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); | 542 | for (i = 0; i < 4; i++) { |
509 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | 543 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); |
544 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); | ||
545 | } | ||
510 | 546 | ||
511 | for (i = 0; i < 3; i++) { | 547 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); |
512 | struct perf_event *event = cpuc->events[i]; | 548 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); |
513 | 549 | ||
514 | if (!event) | 550 | for (i = 0; i < 4; i++) { |
515 | continue; | 551 | event = cpuc->events[i]; |
516 | 552 | ||
553 | if (event) { | ||
554 | x86_perf_event_set_period(event); | ||
517 | __x86_pmu_enable_event(&event->hw, | 555 | __x86_pmu_enable_event(&event->hw, |
518 | ARCH_PERFMON_EVENTSEL_ENABLE); | 556 | ARCH_PERFMON_EVENTSEL_ENABLE); |
519 | } | 557 | } else |
558 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); | ||
520 | } | 559 | } |
560 | } | ||
561 | |||
562 | static void intel_pmu_nhm_enable_all(int added) | ||
563 | { | ||
564 | if (added) | ||
565 | intel_pmu_nhm_workaround(); | ||
521 | intel_pmu_enable_all(added); | 566 | intel_pmu_enable_all(added); |
522 | } | 567 | } |
523 | 568 | ||
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index ff4c453e13f3..fa8c1b8e09fb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -334,7 +334,7 @@ ENTRY(startup_32_smp) | |||
334 | /* | 334 | /* |
335 | * Enable paging | 335 | * Enable paging |
336 | */ | 336 | */ |
337 | movl $pa(swapper_pg_dir),%eax | 337 | movl pa(initial_page_table), %eax |
338 | movl %eax,%cr3 /* set the page table pointer.. */ | 338 | movl %eax,%cr3 /* set the page table pointer.. */ |
339 | movl %cr0,%eax | 339 | movl %cr0,%eax |
340 | orl $X86_CR0_PG,%eax | 340 | orl $X86_CR0_PG,%eax |
@@ -614,6 +614,8 @@ ignore_int: | |||
614 | .align 4 | 614 | .align 4 |
615 | ENTRY(initial_code) | 615 | ENTRY(initial_code) |
616 | .long i386_start_kernel | 616 | .long i386_start_kernel |
617 | ENTRY(initial_page_table) | ||
618 | .long pa(swapper_pg_dir) | ||
617 | 619 | ||
618 | /* | 620 | /* |
619 | * BSS section | 621 | * BSS section |
@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir) | |||
629 | #endif | 631 | #endif |
630 | swapper_pg_fixmap: | 632 | swapper_pg_fixmap: |
631 | .fill 1024,4,0 | 633 | .fill 1024,4,0 |
634 | #ifdef CONFIG_X86_TRAMPOLINE | ||
635 | ENTRY(trampoline_pg_dir) | ||
636 | .fill 1024,4,0 | ||
637 | #endif | ||
632 | ENTRY(empty_zero_page) | 638 | ENTRY(empty_zero_page) |
633 | .fill 4096,1,0 | 639 | .fill 4096,1,0 |
634 | 640 | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 1f11f5ce668f..a46cb3522c0c 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 41 | static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
42 | unsigned int xstate_size; | 42 | unsigned int xstate_size; |
43 | EXPORT_SYMBOL_GPL(xstate_size); | ||
43 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); | 44 | unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); |
44 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 45 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; |
45 | 46 | ||
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index ef10940e1af0..852b81967a37 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -194,7 +194,7 @@ static struct hw_breakpoint { | |||
194 | unsigned long addr; | 194 | unsigned long addr; |
195 | int len; | 195 | int len; |
196 | int type; | 196 | int type; |
197 | struct perf_event **pev; | 197 | struct perf_event * __percpu *pev; |
198 | } breakinfo[HBP_NUM]; | 198 | } breakinfo[HBP_NUM]; |
199 | 199 | ||
200 | static unsigned long early_dr7; | 200 | static unsigned long early_dr7; |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 1bfb6cf4dd55..770ebfb349e9 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -709,6 +709,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
709 | struct hlist_node *node, *tmp; | 709 | struct hlist_node *node, *tmp; |
710 | unsigned long flags, orig_ret_address = 0; | 710 | unsigned long flags, orig_ret_address = 0; |
711 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 711 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
712 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
712 | 713 | ||
713 | INIT_HLIST_HEAD(&empty_rp); | 714 | INIT_HLIST_HEAD(&empty_rp); |
714 | kretprobe_hash_lock(current, &head, &flags); | 715 | kretprobe_hash_lock(current, &head, &flags); |
@@ -740,14 +741,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
740 | /* another task is sharing our hash bucket */ | 741 | /* another task is sharing our hash bucket */ |
741 | continue; | 742 | continue; |
742 | 743 | ||
744 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
745 | |||
746 | if (orig_ret_address != trampoline_address) | ||
747 | /* | ||
748 | * This is the real return address. Any other | ||
749 | * instances associated with this task are for | ||
750 | * other calls deeper on the call stack | ||
751 | */ | ||
752 | break; | ||
753 | } | ||
754 | |||
755 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
756 | |||
757 | correct_ret_addr = ri->ret_addr; | ||
758 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
759 | if (ri->task != current) | ||
760 | /* another task is sharing our hash bucket */ | ||
761 | continue; | ||
762 | |||
763 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
743 | if (ri->rp && ri->rp->handler) { | 764 | if (ri->rp && ri->rp->handler) { |
744 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | 765 | __get_cpu_var(current_kprobe) = &ri->rp->kp; |
745 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | 766 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; |
767 | ri->ret_addr = correct_ret_addr; | ||
746 | ri->rp->handler(ri, regs); | 768 | ri->rp->handler(ri, regs); |
747 | __get_cpu_var(current_kprobe) = NULL; | 769 | __get_cpu_var(current_kprobe) = NULL; |
748 | } | 770 | } |
749 | 771 | ||
750 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
751 | recycle_rp_inst(ri, &empty_rp); | 772 | recycle_rp_inst(ri, &empty_rp); |
752 | 773 | ||
753 | if (orig_ret_address != trampoline_address) | 774 | if (orig_ret_address != trampoline_address) |
@@ -759,8 +780,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
759 | break; | 780 | break; |
760 | } | 781 | } |
761 | 782 | ||
762 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
763 | |||
764 | kretprobe_hash_unlock(current, &flags); | 783 | kretprobe_hash_unlock(current, &flags); |
765 | 784 | ||
766 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 785 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 64ecaf0af9af..57d1868a86aa 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -301,8 +301,9 @@ EXPORT_SYMBOL(kernel_thread); | |||
301 | /* | 301 | /* |
302 | * sys_execve() executes a new program. | 302 | * sys_execve() executes a new program. |
303 | */ | 303 | */ |
304 | long sys_execve(const char __user *name, char __user * __user *argv, | 304 | long sys_execve(const char __user *name, |
305 | char __user * __user *envp, struct pt_regs *regs) | 305 | const char __user *const __user *argv, |
306 | const char __user *const __user *envp, struct pt_regs *regs) | ||
306 | { | 307 | { |
307 | long error; | 308 | long error; |
308 | char *filename; | 309 | char *filename; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b008e7883207..c3a4fbb2b996 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p) | |||
1014 | paging_init(); | 1014 | paging_init(); |
1015 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); | 1015 | x86_init.paging.pagetable_setup_done(swapper_pg_dir); |
1016 | 1016 | ||
1017 | setup_trampoline_page_table(); | ||
1018 | |||
1017 | tboot_probe(); | 1019 | tboot_probe(); |
1018 | 1020 | ||
1019 | #ifdef CONFIG_X86_64 | 1021 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a5e928b0cb5f..8b3bfc4dd708 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -73,7 +73,6 @@ | |||
73 | 73 | ||
74 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_32 |
75 | u8 apicid_2_node[MAX_APICID]; | 75 | u8 apicid_2_node[MAX_APICID]; |
76 | static int low_mappings; | ||
77 | #endif | 76 | #endif |
78 | 77 | ||
79 | /* State of each CPU */ | 78 | /* State of each CPU */ |
@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |||
91 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | 90 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); |
92 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | 91 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
93 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | 92 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
93 | |||
94 | /* | ||
95 | * We need this for trampoline_base protection from concurrent accesses when | ||
96 | * off- and onlining cores wildly. | ||
97 | */ | ||
98 | static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); | ||
99 | |||
100 | void cpu_hotplug_driver_lock() | ||
101 | { | ||
102 | mutex_lock(&x86_cpu_hotplug_driver_mutex); | ||
103 | } | ||
104 | |||
105 | void cpu_hotplug_driver_unlock() | ||
106 | { | ||
107 | mutex_unlock(&x86_cpu_hotplug_driver_mutex); | ||
108 | } | ||
109 | |||
110 | ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } | ||
111 | ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } | ||
94 | #else | 112 | #else |
95 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | 113 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
96 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | 114 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
281 | * fragile that we want to limit the things done here to the | 299 | * fragile that we want to limit the things done here to the |
282 | * most necessary things. | 300 | * most necessary things. |
283 | */ | 301 | */ |
302 | |||
303 | #ifdef CONFIG_X86_32 | ||
304 | /* | ||
305 | * Switch away from the trampoline page-table | ||
306 | * | ||
307 | * Do this before cpu_init() because it needs to access per-cpu | ||
308 | * data which may not be mapped in the trampoline page-table. | ||
309 | */ | ||
310 | load_cr3(swapper_pg_dir); | ||
311 | __flush_tlb_all(); | ||
312 | #endif | ||
313 | |||
284 | vmi_bringup(); | 314 | vmi_bringup(); |
285 | cpu_init(); | 315 | cpu_init(); |
286 | preempt_disable(); | 316 | preempt_disable(); |
@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
299 | legacy_pic->chip->unmask(0); | 329 | legacy_pic->chip->unmask(0); |
300 | } | 330 | } |
301 | 331 | ||
302 | #ifdef CONFIG_X86_32 | ||
303 | while (low_mappings) | ||
304 | cpu_relax(); | ||
305 | __flush_tlb_all(); | ||
306 | #endif | ||
307 | |||
308 | /* This must be done before setting cpu_online_mask */ | 332 | /* This must be done before setting cpu_online_mask */ |
309 | set_cpu_sibling_map(raw_smp_processor_id()); | 333 | set_cpu_sibling_map(raw_smp_processor_id()); |
310 | wmb(); | 334 | wmb(); |
@@ -750,6 +774,7 @@ do_rest: | |||
750 | #ifdef CONFIG_X86_32 | 774 | #ifdef CONFIG_X86_32 |
751 | /* Stack for startup_32 can be just as for start_secondary onwards */ | 775 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
752 | irq_ctx_init(cpu); | 776 | irq_ctx_init(cpu); |
777 | initial_page_table = __pa(&trampoline_pg_dir); | ||
753 | #else | 778 | #else |
754 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | 779 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); |
755 | initial_gs = per_cpu_offset(cpu); | 780 | initial_gs = per_cpu_offset(cpu); |
@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
897 | 922 | ||
898 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 923 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
899 | 924 | ||
900 | #ifdef CONFIG_X86_32 | ||
901 | /* init low mem mapping */ | ||
902 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
903 | min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); | ||
904 | flush_tlb_all(); | ||
905 | low_mappings = 1; | ||
906 | |||
907 | err = do_boot_cpu(apicid, cpu); | 925 | err = do_boot_cpu(apicid, cpu); |
908 | 926 | ||
909 | zap_low_mappings(false); | ||
910 | low_mappings = 0; | ||
911 | #else | ||
912 | err = do_boot_cpu(apicid, cpu); | ||
913 | #endif | ||
914 | if (err) { | 927 | if (err) { |
915 | pr_debug("do_boot_cpu failed %d\n", err); | 928 | pr_debug("do_boot_cpu failed %d\n", err); |
916 | return -EIO; | 929 | return -EIO; |
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index 196552bb412c..d5e06624e34a 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c | |||
@@ -28,7 +28,9 @@ | |||
28 | * Do a system call from kernel instead of calling sys_execve so we | 28 | * Do a system call from kernel instead of calling sys_execve so we |
29 | * end up with proper pt_regs. | 29 | * end up with proper pt_regs. |
30 | */ | 30 | */ |
31 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | 31 | int kernel_execve(const char *filename, |
32 | const char *const argv[], | ||
33 | const char *const envp[]) | ||
32 | { | 34 | { |
33 | long __res; | 35 | long __res; |
34 | asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" | 36 | asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index c652ef62742d..a874495b3673 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/io.h> | 1 | #include <linux/io.h> |
2 | 2 | ||
3 | #include <asm/trampoline.h> | 3 | #include <asm/trampoline.h> |
4 | #include <asm/pgtable.h> | ||
4 | #include <asm/e820.h> | 5 | #include <asm/e820.h> |
5 | 6 | ||
6 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | 7 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) |
@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void) | |||
37 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 38 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); |
38 | return virt_to_phys(trampoline_base); | 39 | return virt_to_phys(trampoline_base); |
39 | } | 40 | } |
41 | |||
42 | void __init setup_trampoline_page_table(void) | ||
43 | { | ||
44 | #ifdef CONFIG_X86_32 | ||
45 | /* Copy kernel address range */ | ||
46 | clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, | ||
47 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
48 | min_t(unsigned long, KERNEL_PGD_PTRS, | ||
49 | KERNEL_PGD_BOUNDARY)); | ||
50 | |||
51 | /* Initialize low mappings */ | ||
52 | clone_pgd_range(trampoline_pg_dir, | ||
53 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | ||
54 | min_t(unsigned long, KERNEL_PGD_PTRS, | ||
55 | KERNEL_PGD_BOUNDARY)); | ||
56 | #endif | ||
57 | } | ||
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 0fd6378981f4..ddeb2314b522 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -697,6 +697,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) | |||
697 | pit->wq = create_singlethread_workqueue("kvm-pit-wq"); | 697 | pit->wq = create_singlethread_workqueue("kvm-pit-wq"); |
698 | if (!pit->wq) { | 698 | if (!pit->wq) { |
699 | mutex_unlock(&pit->pit_state.lock); | 699 | mutex_unlock(&pit->pit_state.lock); |
700 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | ||
700 | kfree(pit); | 701 | kfree(pit); |
701 | return NULL; | 702 | return NULL; |
702 | } | 703 | } |
@@ -742,7 +743,7 @@ fail: | |||
742 | kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); | 743 | kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); |
743 | kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); | 744 | kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); |
744 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | 745 | kvm_free_irq_source_id(kvm, pit->irq_source_id); |
745 | 746 | destroy_workqueue(pit->wq); | |
746 | kfree(pit); | 747 | kfree(pit); |
747 | return NULL; | 748 | return NULL; |
748 | } | 749 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 25f19078b321..3a09c625d526 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2387,7 +2387,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, | |||
2387 | if (cpu_has_xsave) | 2387 | if (cpu_has_xsave) |
2388 | memcpy(guest_xsave->region, | 2388 | memcpy(guest_xsave->region, |
2389 | &vcpu->arch.guest_fpu.state->xsave, | 2389 | &vcpu->arch.guest_fpu.state->xsave, |
2390 | sizeof(struct xsave_struct)); | 2390 | xstate_size); |
2391 | else { | 2391 | else { |
2392 | memcpy(guest_xsave->region, | 2392 | memcpy(guest_xsave->region, |
2393 | &vcpu->arch.guest_fpu.state->fxsave, | 2393 | &vcpu->arch.guest_fpu.state->fxsave, |
@@ -2405,7 +2405,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, | |||
2405 | 2405 | ||
2406 | if (cpu_has_xsave) | 2406 | if (cpu_has_xsave) |
2407 | memcpy(&vcpu->arch.guest_fpu.state->xsave, | 2407 | memcpy(&vcpu->arch.guest_fpu.state->xsave, |
2408 | guest_xsave->region, sizeof(struct xsave_struct)); | 2408 | guest_xsave->region, xstate_size); |
2409 | else { | 2409 | else { |
2410 | if (xstate_bv & ~XSTATE_FPSSE) | 2410 | if (xstate_bv & ~XSTATE_FPSSE) |
2411 | return -EINVAL; | 2411 | return -EINVAL; |
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 7c2f38f68ebb..e3558b9a58ba 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
@@ -318,8 +318,9 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp, | |||
318 | */ | 318 | */ |
319 | 319 | ||
320 | asmlinkage | 320 | asmlinkage |
321 | long xtensa_execve(const char __user *name, char __user * __user *argv, | 321 | long xtensa_execve(const char __user *name, |
322 | char __user * __user *envp, | 322 | const char __user *const __user *argv, |
323 | const char __user *const __user *envp, | ||
323 | long a3, long a4, long a5, | 324 | long a3, long a4, long a5, |
324 | struct pt_regs *regs) | 325 | struct pt_regs *regs) |
325 | { | 326 | { |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index ea24c1e51be2..2673a3d14806 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = { | |||
1588 | }, | 1588 | }, |
1589 | }; | 1589 | }; |
1590 | 1590 | ||
1591 | static int sata_dwc_probe(struct of_device *ofdev, | 1591 | static int sata_dwc_probe(struct platform_device *ofdev, |
1592 | const struct of_device_id *match) | 1592 | const struct of_device_id *match) |
1593 | { | 1593 | { |
1594 | struct sata_dwc_device *hsdev; | 1594 | struct sata_dwc_device *hsdev; |
@@ -1702,7 +1702,7 @@ error_out: | |||
1702 | return err; | 1702 | return err; |
1703 | } | 1703 | } |
1704 | 1704 | ||
1705 | static int sata_dwc_remove(struct of_device *ofdev) | 1705 | static int sata_dwc_remove(struct platform_device *ofdev) |
1706 | { | 1706 | { |
1707 | struct device *dev = &ofdev->dev; | 1707 | struct device *dev = &ofdev->dev; |
1708 | struct ata_host *host = dev_get_drvdata(dev); | 1708 | struct ata_host *host = dev_get_drvdata(dev); |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 2982b3ee9465..057413bb16e2 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -94,6 +94,7 @@ | |||
94 | #include <linux/hdreg.h> | 94 | #include <linux/hdreg.h> |
95 | #include <linux/platform_device.h> | 95 | #include <linux/platform_device.h> |
96 | #if defined(CONFIG_OF) | 96 | #if defined(CONFIG_OF) |
97 | #include <linux/of_address.h> | ||
97 | #include <linux/of_device.h> | 98 | #include <linux/of_device.h> |
98 | #include <linux/of_platform.h> | 99 | #include <linux/of_platform.h> |
99 | #endif | 100 | #endif |
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index ddf5def1b0da..710af89b176d 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -819,13 +819,16 @@ static const struct intel_driver_description { | |||
819 | "Sandybridge", NULL, &intel_gen6_driver }, | 819 | "Sandybridge", NULL, &intel_gen6_driver }, |
820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, | 820 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, |
821 | "Sandybridge", NULL, &intel_gen6_driver }, | 821 | "Sandybridge", NULL, &intel_gen6_driver }, |
822 | { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, | ||
823 | "Sandybridge", NULL, &intel_gen6_driver }, | ||
822 | { 0, 0, NULL, NULL, NULL } | 824 | { 0, 0, NULL, NULL, NULL } |
823 | }; | 825 | }; |
824 | 826 | ||
825 | static int __devinit intel_gmch_probe(struct pci_dev *pdev, | 827 | static int __devinit intel_gmch_probe(struct pci_dev *pdev, |
826 | struct agp_bridge_data *bridge) | 828 | struct agp_bridge_data *bridge) |
827 | { | 829 | { |
828 | int i; | 830 | int i, mask; |
831 | |||
829 | bridge->driver = NULL; | 832 | bridge->driver = NULL; |
830 | 833 | ||
831 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { | 834 | for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { |
@@ -845,14 +848,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev, | |||
845 | 848 | ||
846 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); | 849 | dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); |
847 | 850 | ||
848 | if (bridge->driver->mask_memory == intel_i965_mask_memory) { | 851 | if (bridge->driver->mask_memory == intel_gen6_mask_memory) |
849 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) | 852 | mask = 40; |
850 | dev_err(&intel_private.pcidev->dev, | 853 | else if (bridge->driver->mask_memory == intel_i965_mask_memory) |
851 | "set gfx device dma mask 36bit failed!\n"); | 854 | mask = 36; |
852 | else | 855 | else |
853 | pci_set_consistent_dma_mask(intel_private.pcidev, | 856 | mask = 32; |
854 | DMA_BIT_MASK(36)); | 857 | |
855 | } | 858 | if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) |
859 | dev_err(&intel_private.pcidev->dev, | ||
860 | "set gfx device dma mask %d-bit failed!\n", mask); | ||
861 | else | ||
862 | pci_set_consistent_dma_mask(intel_private.pcidev, | ||
863 | DMA_BIT_MASK(mask)); | ||
856 | 864 | ||
857 | return 1; | 865 | return 1; |
858 | } | 866 | } |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c05e3e518268..08d47532e605 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -204,6 +204,7 @@ | |||
204 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 | 204 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 |
205 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 | 205 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 |
206 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 | 206 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 |
207 | #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126 | ||
207 | 208 | ||
208 | /* cover 915 and 945 variants */ | 209 | /* cover 915 and 945 variants */ |
209 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ | 210 | #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index ad46eae1f9bb..c350d01716bd 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
675 | } | 675 | } |
676 | 676 | ||
677 | set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ | 677 | set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ |
678 | filp->private_data = tty; | 678 | |
679 | file_move(filp, &tty->tty_files); | 679 | tty_add_file(tty, filp); |
680 | 680 | ||
681 | retval = devpts_pty_new(inode, tty->link); | 681 | retval = devpts_pty_new(inode, tty->link); |
682 | if (retval) | 682 | if (retval) |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 0350c42375a2..949067a0bd47 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ | |||
136 | DEFINE_MUTEX(tty_mutex); | 136 | DEFINE_MUTEX(tty_mutex); |
137 | EXPORT_SYMBOL(tty_mutex); | 137 | EXPORT_SYMBOL(tty_mutex); |
138 | 138 | ||
139 | /* Spinlock to protect the tty->tty_files list */ | ||
140 | DEFINE_SPINLOCK(tty_files_lock); | ||
141 | |||
139 | static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); | 142 | static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); |
140 | static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); | 143 | static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); |
141 | ssize_t redirected_tty_write(struct file *, const char __user *, | 144 | ssize_t redirected_tty_write(struct file *, const char __user *, |
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty) | |||
185 | kfree(tty); | 188 | kfree(tty); |
186 | } | 189 | } |
187 | 190 | ||
191 | static inline struct tty_struct *file_tty(struct file *file) | ||
192 | { | ||
193 | return ((struct tty_file_private *)file->private_data)->tty; | ||
194 | } | ||
195 | |||
196 | /* Associate a new file with the tty structure */ | ||
197 | void tty_add_file(struct tty_struct *tty, struct file *file) | ||
198 | { | ||
199 | struct tty_file_private *priv; | ||
200 | |||
201 | /* XXX: must implement proper error handling in callers */ | ||
202 | priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL); | ||
203 | |||
204 | priv->tty = tty; | ||
205 | priv->file = file; | ||
206 | file->private_data = priv; | ||
207 | |||
208 | spin_lock(&tty_files_lock); | ||
209 | list_add(&priv->list, &tty->tty_files); | ||
210 | spin_unlock(&tty_files_lock); | ||
211 | } | ||
212 | |||
213 | /* Delete file from its tty */ | ||
214 | void tty_del_file(struct file *file) | ||
215 | { | ||
216 | struct tty_file_private *priv = file->private_data; | ||
217 | |||
218 | spin_lock(&tty_files_lock); | ||
219 | list_del(&priv->list); | ||
220 | spin_unlock(&tty_files_lock); | ||
221 | file->private_data = NULL; | ||
222 | kfree(priv); | ||
223 | } | ||
224 | |||
225 | |||
188 | #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) | 226 | #define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) |
189 | 227 | ||
190 | /** | 228 | /** |
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) | |||
235 | struct list_head *p; | 273 | struct list_head *p; |
236 | int count = 0; | 274 | int count = 0; |
237 | 275 | ||
238 | file_list_lock(); | 276 | spin_lock(&tty_files_lock); |
239 | list_for_each(p, &tty->tty_files) { | 277 | list_for_each(p, &tty->tty_files) { |
240 | count++; | 278 | count++; |
241 | } | 279 | } |
242 | file_list_unlock(); | 280 | spin_unlock(&tty_files_lock); |
243 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 281 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
244 | tty->driver->subtype == PTY_TYPE_SLAVE && | 282 | tty->driver->subtype == PTY_TYPE_SLAVE && |
245 | tty->link && tty->link->count) | 283 | tty->link && tty->link->count) |
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty) | |||
497 | struct file *cons_filp = NULL; | 535 | struct file *cons_filp = NULL; |
498 | struct file *filp, *f = NULL; | 536 | struct file *filp, *f = NULL; |
499 | struct task_struct *p; | 537 | struct task_struct *p; |
538 | struct tty_file_private *priv; | ||
500 | int closecount = 0, n; | 539 | int closecount = 0, n; |
501 | unsigned long flags; | 540 | unsigned long flags; |
502 | int refs = 0; | 541 | int refs = 0; |
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty) | |||
506 | 545 | ||
507 | 546 | ||
508 | spin_lock(&redirect_lock); | 547 | spin_lock(&redirect_lock); |
509 | if (redirect && redirect->private_data == tty) { | 548 | if (redirect && file_tty(redirect) == tty) { |
510 | f = redirect; | 549 | f = redirect; |
511 | redirect = NULL; | 550 | redirect = NULL; |
512 | } | 551 | } |
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty) | |||
519 | workqueue with the lock held */ | 558 | workqueue with the lock held */ |
520 | check_tty_count(tty, "tty_hangup"); | 559 | check_tty_count(tty, "tty_hangup"); |
521 | 560 | ||
522 | file_list_lock(); | 561 | spin_lock(&tty_files_lock); |
523 | /* This breaks for file handles being sent over AF_UNIX sockets ? */ | 562 | /* This breaks for file handles being sent over AF_UNIX sockets ? */ |
524 | list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) { | 563 | list_for_each_entry(priv, &tty->tty_files, list) { |
564 | filp = priv->file; | ||
525 | if (filp->f_op->write == redirected_tty_write) | 565 | if (filp->f_op->write == redirected_tty_write) |
526 | cons_filp = filp; | 566 | cons_filp = filp; |
527 | if (filp->f_op->write != tty_write) | 567 | if (filp->f_op->write != tty_write) |
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty) | |||
530 | __tty_fasync(-1, filp, 0); /* can't block */ | 570 | __tty_fasync(-1, filp, 0); /* can't block */ |
531 | filp->f_op = &hung_up_tty_fops; | 571 | filp->f_op = &hung_up_tty_fops; |
532 | } | 572 | } |
533 | file_list_unlock(); | 573 | spin_unlock(&tty_files_lock); |
534 | 574 | ||
535 | tty_ldisc_hangup(tty); | 575 | tty_ldisc_hangup(tty); |
536 | 576 | ||
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
889 | loff_t *ppos) | 929 | loff_t *ppos) |
890 | { | 930 | { |
891 | int i; | 931 | int i; |
892 | struct tty_struct *tty; | 932 | struct inode *inode = file->f_path.dentry->d_inode; |
893 | struct inode *inode; | 933 | struct tty_struct *tty = file_tty(file); |
894 | struct tty_ldisc *ld; | 934 | struct tty_ldisc *ld; |
895 | 935 | ||
896 | tty = file->private_data; | ||
897 | inode = file->f_path.dentry->d_inode; | ||
898 | if (tty_paranoia_check(tty, inode, "tty_read")) | 936 | if (tty_paranoia_check(tty, inode, "tty_read")) |
899 | return -EIO; | 937 | return -EIO; |
900 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) | 938 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) |
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg) | |||
1065 | static ssize_t tty_write(struct file *file, const char __user *buf, | 1103 | static ssize_t tty_write(struct file *file, const char __user *buf, |
1066 | size_t count, loff_t *ppos) | 1104 | size_t count, loff_t *ppos) |
1067 | { | 1105 | { |
1068 | struct tty_struct *tty; | ||
1069 | struct inode *inode = file->f_path.dentry->d_inode; | 1106 | struct inode *inode = file->f_path.dentry->d_inode; |
1107 | struct tty_struct *tty = file_tty(file); | ||
1108 | struct tty_ldisc *ld; | ||
1070 | ssize_t ret; | 1109 | ssize_t ret; |
1071 | struct tty_ldisc *ld; | ||
1072 | 1110 | ||
1073 | tty = file->private_data; | ||
1074 | if (tty_paranoia_check(tty, inode, "tty_write")) | 1111 | if (tty_paranoia_check(tty, inode, "tty_write")) |
1075 | return -EIO; | 1112 | return -EIO; |
1076 | if (!tty || !tty->ops->write || | 1113 | if (!tty || !tty->ops->write || |
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work) | |||
1424 | tty_driver_kref_put(driver); | 1461 | tty_driver_kref_put(driver); |
1425 | module_put(driver->owner); | 1462 | module_put(driver->owner); |
1426 | 1463 | ||
1427 | file_list_lock(); | 1464 | spin_lock(&tty_files_lock); |
1428 | list_del_init(&tty->tty_files); | 1465 | list_del_init(&tty->tty_files); |
1429 | file_list_unlock(); | 1466 | spin_unlock(&tty_files_lock); |
1430 | 1467 | ||
1431 | put_pid(tty->pgrp); | 1468 | put_pid(tty->pgrp); |
1432 | put_pid(tty->session); | 1469 | put_pid(tty->session); |
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx) | |||
1507 | 1544 | ||
1508 | int tty_release(struct inode *inode, struct file *filp) | 1545 | int tty_release(struct inode *inode, struct file *filp) |
1509 | { | 1546 | { |
1510 | struct tty_struct *tty, *o_tty; | 1547 | struct tty_struct *tty = file_tty(filp); |
1548 | struct tty_struct *o_tty; | ||
1511 | int pty_master, tty_closing, o_tty_closing, do_sleep; | 1549 | int pty_master, tty_closing, o_tty_closing, do_sleep; |
1512 | int devpts; | 1550 | int devpts; |
1513 | int idx; | 1551 | int idx; |
1514 | char buf[64]; | 1552 | char buf[64]; |
1515 | 1553 | ||
1516 | tty = filp->private_data; | ||
1517 | if (tty_paranoia_check(tty, inode, "tty_release_dev")) | 1554 | if (tty_paranoia_check(tty, inode, "tty_release_dev")) |
1518 | return 0; | 1555 | return 0; |
1519 | 1556 | ||
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp) | |||
1671 | * - do_tty_hangup no longer sees this file descriptor as | 1708 | * - do_tty_hangup no longer sees this file descriptor as |
1672 | * something that needs to be handled for hangups. | 1709 | * something that needs to be handled for hangups. |
1673 | */ | 1710 | */ |
1674 | file_kill(filp); | 1711 | tty_del_file(filp); |
1675 | filp->private_data = NULL; | ||
1676 | 1712 | ||
1677 | /* | 1713 | /* |
1678 | * Perform some housekeeping before deciding whether to return. | 1714 | * Perform some housekeeping before deciding whether to return. |
@@ -1839,8 +1875,8 @@ got_driver: | |||
1839 | return PTR_ERR(tty); | 1875 | return PTR_ERR(tty); |
1840 | } | 1876 | } |
1841 | 1877 | ||
1842 | filp->private_data = tty; | 1878 | tty_add_file(tty, filp); |
1843 | file_move(filp, &tty->tty_files); | 1879 | |
1844 | check_tty_count(tty, "tty_open"); | 1880 | check_tty_count(tty, "tty_open"); |
1845 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 1881 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
1846 | tty->driver->subtype == PTY_TYPE_MASTER) | 1882 | tty->driver->subtype == PTY_TYPE_MASTER) |
@@ -1916,11 +1952,10 @@ got_driver: | |||
1916 | 1952 | ||
1917 | static unsigned int tty_poll(struct file *filp, poll_table *wait) | 1953 | static unsigned int tty_poll(struct file *filp, poll_table *wait) |
1918 | { | 1954 | { |
1919 | struct tty_struct *tty; | 1955 | struct tty_struct *tty = file_tty(filp); |
1920 | struct tty_ldisc *ld; | 1956 | struct tty_ldisc *ld; |
1921 | int ret = 0; | 1957 | int ret = 0; |
1922 | 1958 | ||
1923 | tty = filp->private_data; | ||
1924 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) | 1959 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) |
1925 | return 0; | 1960 | return 0; |
1926 | 1961 | ||
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait) | |||
1933 | 1968 | ||
1934 | static int __tty_fasync(int fd, struct file *filp, int on) | 1969 | static int __tty_fasync(int fd, struct file *filp, int on) |
1935 | { | 1970 | { |
1936 | struct tty_struct *tty; | 1971 | struct tty_struct *tty = file_tty(filp); |
1937 | unsigned long flags; | 1972 | unsigned long flags; |
1938 | int retval = 0; | 1973 | int retval = 0; |
1939 | 1974 | ||
1940 | tty = filp->private_data; | ||
1941 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) | 1975 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) |
1942 | goto out; | 1976 | goto out; |
1943 | 1977 | ||
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty); | |||
2491 | */ | 2525 | */ |
2492 | long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2526 | long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
2493 | { | 2527 | { |
2494 | struct tty_struct *tty, *real_tty; | 2528 | struct tty_struct *tty = file_tty(file); |
2529 | struct tty_struct *real_tty; | ||
2495 | void __user *p = (void __user *)arg; | 2530 | void __user *p = (void __user *)arg; |
2496 | int retval; | 2531 | int retval; |
2497 | struct tty_ldisc *ld; | 2532 | struct tty_ldisc *ld; |
2498 | struct inode *inode = file->f_dentry->d_inode; | 2533 | struct inode *inode = file->f_dentry->d_inode; |
2499 | 2534 | ||
2500 | tty = file->private_data; | ||
2501 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) | 2535 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) |
2502 | return -EINVAL; | 2536 | return -EINVAL; |
2503 | 2537 | ||
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, | |||
2619 | unsigned long arg) | 2653 | unsigned long arg) |
2620 | { | 2654 | { |
2621 | struct inode *inode = file->f_dentry->d_inode; | 2655 | struct inode *inode = file->f_dentry->d_inode; |
2622 | struct tty_struct *tty = file->private_data; | 2656 | struct tty_struct *tty = file_tty(file); |
2623 | struct tty_ldisc *ld; | 2657 | struct tty_ldisc *ld; |
2624 | int retval = -ENOIOCTLCMD; | 2658 | int retval = -ENOIOCTLCMD; |
2625 | 2659 | ||
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty) | |||
2711 | if (!filp) | 2745 | if (!filp) |
2712 | continue; | 2746 | continue; |
2713 | if (filp->f_op->read == tty_read && | 2747 | if (filp->f_op->read == tty_read && |
2714 | filp->private_data == tty) { | 2748 | file_tty(filp) == tty) { |
2715 | printk(KERN_NOTICE "SAK: killed process %d" | 2749 | printk(KERN_NOTICE "SAK: killed process %d" |
2716 | " (%s): fd#%d opened to the tty\n", | 2750 | " (%s): fd#%d opened to the tty\n", |
2717 | task_pid_nr(p), p->comm, i); | 2751 | task_pid_nr(p), p->comm, i); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index c734f9b1263a..50590c7f2c01 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -194,10 +194,11 @@ static DECLARE_WORK(console_work, console_callback); | |||
194 | int fg_console; | 194 | int fg_console; |
195 | int last_console; | 195 | int last_console; |
196 | int want_console = -1; | 196 | int want_console = -1; |
197 | int saved_fg_console; | 197 | static int saved_fg_console; |
198 | int saved_last_console; | 198 | static int saved_last_console; |
199 | int saved_want_console; | 199 | static int saved_want_console; |
200 | int saved_vc_mode; | 200 | static int saved_vc_mode; |
201 | static int saved_console_blanked; | ||
201 | 202 | ||
202 | /* | 203 | /* |
203 | * For each existing display, we have a pointer to console currently visible | 204 | * For each existing display, we have a pointer to console currently visible |
@@ -3449,6 +3450,7 @@ int con_debug_enter(struct vc_data *vc) | |||
3449 | saved_last_console = last_console; | 3450 | saved_last_console = last_console; |
3450 | saved_want_console = want_console; | 3451 | saved_want_console = want_console; |
3451 | saved_vc_mode = vc->vc_mode; | 3452 | saved_vc_mode = vc->vc_mode; |
3453 | saved_console_blanked = console_blanked; | ||
3452 | vc->vc_mode = KD_TEXT; | 3454 | vc->vc_mode = KD_TEXT; |
3453 | console_blanked = 0; | 3455 | console_blanked = 0; |
3454 | if (vc->vc_sw->con_debug_enter) | 3456 | if (vc->vc_sw->con_debug_enter) |
@@ -3492,6 +3494,7 @@ int con_debug_leave(void) | |||
3492 | fg_console = saved_fg_console; | 3494 | fg_console = saved_fg_console; |
3493 | last_console = saved_last_console; | 3495 | last_console = saved_last_console; |
3494 | want_console = saved_want_console; | 3496 | want_console = saved_want_console; |
3497 | console_blanked = saved_console_blanked; | ||
3495 | vc_cons[fg_console].d->vc_mode = saved_vc_mode; | 3498 | vc_cons[fg_console].d->vc_mode = saved_vc_mode; |
3496 | 3499 | ||
3497 | vc = vc_cons[fg_console].d; | 3500 | vc = vc_cons[fg_console].d; |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 0ed763cd2e77..b663d573aad9 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -94,6 +94,7 @@ | |||
94 | 94 | ||
95 | #ifdef CONFIG_OF | 95 | #ifdef CONFIG_OF |
96 | /* For open firmware. */ | 96 | /* For open firmware. */ |
97 | #include <linux/of_address.h> | ||
97 | #include <linux/of_device.h> | 98 | #include <linux/of_device.h> |
98 | #include <linux/of_platform.h> | 99 | #include <linux/of_platform.h> |
99 | #endif | 100 | #endif |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 90288ec7c284..84da748555bc 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -55,6 +55,9 @@ | |||
55 | static int drm_version(struct drm_device *dev, void *data, | 55 | static int drm_version(struct drm_device *dev, void *data, |
56 | struct drm_file *file_priv); | 56 | struct drm_file *file_priv); |
57 | 57 | ||
58 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | ||
59 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} | ||
60 | |||
58 | /** Ioctl table */ | 61 | /** Ioctl table */ |
59 | static struct drm_ioctl_desc drm_ioctls[] = { | 62 | static struct drm_ioctl_desc drm_ioctls[] = { |
60 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), | 63 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), |
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp, | |||
421 | int retcode = -EINVAL; | 424 | int retcode = -EINVAL; |
422 | char stack_kdata[128]; | 425 | char stack_kdata[128]; |
423 | char *kdata = NULL; | 426 | char *kdata = NULL; |
427 | unsigned int usize, asize; | ||
424 | 428 | ||
425 | dev = file_priv->minor->dev; | 429 | dev = file_priv->minor->dev; |
426 | atomic_inc(&dev->ioctl_count); | 430 | atomic_inc(&dev->ioctl_count); |
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp, | |||
436 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) | 440 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) |
437 | goto err_i1; | 441 | goto err_i1; |
438 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && | 442 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && |
439 | (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) | 443 | (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
444 | u32 drv_size; | ||
440 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; | 445 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; |
446 | drv_size = _IOC_SIZE(ioctl->cmd_drv); | ||
447 | usize = asize = _IOC_SIZE(cmd); | ||
448 | if (drv_size > asize) | ||
449 | asize = drv_size; | ||
450 | } | ||
441 | else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { | 451 | else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { |
442 | ioctl = &drm_ioctls[nr]; | 452 | ioctl = &drm_ioctls[nr]; |
443 | cmd = ioctl->cmd; | 453 | cmd = ioctl->cmd; |
454 | usize = asize = _IOC_SIZE(cmd); | ||
444 | } else | 455 | } else |
445 | goto err_i1; | 456 | goto err_i1; |
446 | 457 | ||
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp, | |||
460 | retcode = -EACCES; | 471 | retcode = -EACCES; |
461 | } else { | 472 | } else { |
462 | if (cmd & (IOC_IN | IOC_OUT)) { | 473 | if (cmd & (IOC_IN | IOC_OUT)) { |
463 | if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { | 474 | if (asize <= sizeof(stack_kdata)) { |
464 | kdata = stack_kdata; | 475 | kdata = stack_kdata; |
465 | } else { | 476 | } else { |
466 | kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); | 477 | kdata = kmalloc(asize, GFP_KERNEL); |
467 | if (!kdata) { | 478 | if (!kdata) { |
468 | retcode = -ENOMEM; | 479 | retcode = -ENOMEM; |
469 | goto err_i1; | 480 | goto err_i1; |
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp, | |||
473 | 484 | ||
474 | if (cmd & IOC_IN) { | 485 | if (cmd & IOC_IN) { |
475 | if (copy_from_user(kdata, (void __user *)arg, | 486 | if (copy_from_user(kdata, (void __user *)arg, |
476 | _IOC_SIZE(cmd)) != 0) { | 487 | usize) != 0) { |
477 | retcode = -EFAULT; | 488 | retcode = -EFAULT; |
478 | goto err_i1; | 489 | goto err_i1; |
479 | } | 490 | } |
480 | } | 491 | } else |
492 | memset(kdata, 0, usize); | ||
493 | |||
481 | if (ioctl->flags & DRM_UNLOCKED) | 494 | if (ioctl->flags & DRM_UNLOCKED) |
482 | retcode = func(dev, kdata, file_priv); | 495 | retcode = func(dev, kdata, file_priv); |
483 | else { | 496 | else { |
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp, | |||
488 | 501 | ||
489 | if (cmd & IOC_OUT) { | 502 | if (cmd & IOC_OUT) { |
490 | if (copy_to_user((void __user *)arg, kdata, | 503 | if (copy_to_user((void __user *)arg, kdata, |
491 | _IOC_SIZE(cmd)) != 0) | 504 | usize) != 0) |
492 | retcode = -EFAULT; | 505 | retcode = -EFAULT; |
493 | } | 506 | } |
494 | } | 507 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index de82e201d682..8dd7e6f86bb3 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn | |||
94 | int i; | 94 | int i; |
95 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; | 95 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; |
96 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | 96 | struct drm_fb_helper_cmdline_mode *cmdline_mode; |
97 | struct drm_connector *connector = fb_helper_conn->connector; | 97 | struct drm_connector *connector; |
98 | 98 | ||
99 | if (!fb_helper_conn) | 99 | if (!fb_helper_conn) |
100 | return false; | 100 | return false; |
101 | connector = fb_helper_conn->connector; | ||
101 | 102 | ||
102 | cmdline_mode = &fb_helper_conn->cmdline_mode; | 103 | cmdline_mode = &fb_helper_conn->cmdline_mode; |
103 | if (!mode_option) | 104 | if (!mode_option) |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 3778360eceea..fda67468e603 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
138 | break; | 138 | break; |
139 | } | 139 | } |
140 | 140 | ||
141 | if (!agpmem) | 141 | if (&agpmem->head == &dev->agp->memory) |
142 | goto vm_fault_error; | 142 | goto vm_fault_error; |
143 | 143 | ||
144 | /* | 144 | /* |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 0e6c131313d9..61b4caf220fa 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | struct drm_ioctl_desc i810_ioctls[] = { | 1257 | struct drm_ioctl_desc i810_ioctls[] = { |
1258 | DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1258 | DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1259 | DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | 1259 | DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), |
1260 | DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), | 1260 | DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), |
1261 | DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1261 | DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1262 | DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), | 1262 | DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), |
1263 | DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), | 1263 | DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), |
1264 | DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), | 1264 | DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), |
1265 | DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), | 1265 | DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), |
1266 | DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), | 1266 | DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), |
1267 | DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), | 1267 | DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), |
1268 | DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), | 1268 | DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), |
1269 | DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), | 1269 | DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), |
1270 | DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1270 | DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1271 | DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), | 1271 | DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), |
1272 | DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), | 1272 | DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), |
1273 | }; | 1273 | }; |
1274 | 1274 | ||
1275 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); | 1275 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 5168862c9227..671aa18415ac 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | struct drm_ioctl_desc i830_ioctls[] = { | 1526 | struct drm_ioctl_desc i830_ioctls[] = { |
1527 | DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1527 | DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1528 | DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | 1528 | DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), |
1529 | DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), | 1529 | DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), |
1530 | DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1530 | DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), |
1531 | DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), | 1531 | DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), |
1532 | DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), | 1532 | DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), |
1533 | DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), | 1533 | DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), |
1534 | DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), | 1534 | DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), |
1535 | DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), | 1535 | DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), |
1536 | DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), | 1536 | DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), |
1537 | DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), | 1537 | DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), |
1538 | DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), | 1538 | DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), |
1539 | DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), | 1539 | DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), |
1540 | DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), | 1540 | DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), |
1541 | }; | 1541 | }; |
1542 | 1542 | ||
1543 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); | 1543 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index da78f2c0d909..5c8e53458edb 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
8 | i915_suspend.o \ | 8 | i915_suspend.o \ |
9 | i915_gem.o \ | 9 | i915_gem.o \ |
10 | i915_gem_debug.o \ | 10 | i915_gem_debug.o \ |
11 | i915_gem_evict.o \ | ||
11 | i915_gem_tiling.o \ | 12 | i915_gem_tiling.o \ |
12 | i915_trace_points.o \ | 13 | i915_trace_points.o \ |
13 | intel_display.o \ | 14 | intel_display.o \ |
@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
18 | intel_hdmi.o \ | 19 | intel_hdmi.o \ |
19 | intel_sdvo.o \ | 20 | intel_sdvo.o \ |
20 | intel_modes.o \ | 21 | intel_modes.o \ |
22 | intel_panel.o \ | ||
21 | intel_i2c.o \ | 23 | intel_i2c.o \ |
22 | intel_fb.o \ | 24 | intel_fb.o \ |
23 | intel_tv.o \ | 25 | intel_tv.o \ |
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 0d6ff640e1c6..8c2ad014c47f 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h | |||
@@ -30,20 +30,17 @@ | |||
30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
31 | 31 | ||
32 | struct intel_dvo_device { | 32 | struct intel_dvo_device { |
33 | char *name; | 33 | const char *name; |
34 | int type; | 34 | int type; |
35 | /* DVOA/B/C output register */ | 35 | /* DVOA/B/C output register */ |
36 | u32 dvo_reg; | 36 | u32 dvo_reg; |
37 | /* GPIO register used for i2c bus to control this device */ | 37 | /* GPIO register used for i2c bus to control this device */ |
38 | u32 gpio; | 38 | u32 gpio; |
39 | int slave_addr; | 39 | int slave_addr; |
40 | struct i2c_adapter *i2c_bus; | ||
41 | 40 | ||
42 | const struct intel_dvo_dev_ops *dev_ops; | 41 | const struct intel_dvo_dev_ops *dev_ops; |
43 | void *dev_priv; | 42 | void *dev_priv; |
44 | 43 | struct i2c_adapter *i2c_bus; | |
45 | struct drm_display_mode *panel_fixed_mode; | ||
46 | bool panel_wants_dither; | ||
47 | }; | 44 | }; |
48 | 45 | ||
49 | struct intel_dvo_dev_ops { | 46 | struct intel_dvo_dev_ops { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9214119c0154..92d5605a34d1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -467,6 +467,9 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
467 | } | 467 | } |
468 | } | 468 | } |
469 | 469 | ||
470 | if (error->overlay) | ||
471 | intel_overlay_print_error_state(m, error->overlay); | ||
472 | |||
470 | out: | 473 | out: |
471 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); | 474 | spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
472 | 475 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f19ffe87af3c..a7ec93e62f81 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -499,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
499 | } | 499 | } |
500 | } | 500 | } |
501 | 501 | ||
502 | |||
503 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | ||
504 | BEGIN_LP_RING(2); | ||
505 | OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); | ||
506 | OUT_RING(MI_NOOP); | ||
507 | ADVANCE_LP_RING(); | ||
508 | } | ||
502 | i915_emit_breadcrumb(dev); | 509 | i915_emit_breadcrumb(dev); |
503 | 510 | ||
504 | return 0; | 511 | return 0; |
@@ -2360,46 +2367,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | |||
2360 | } | 2367 | } |
2361 | 2368 | ||
2362 | struct drm_ioctl_desc i915_ioctls[] = { | 2369 | struct drm_ioctl_desc i915_ioctls[] = { |
2363 | DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2370 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2364 | DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | 2371 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
2365 | DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), | 2372 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
2366 | DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | 2373 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
2367 | DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | 2374 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
2368 | DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | 2375 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
2369 | DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), | 2376 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), |
2370 | DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2377 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2371 | DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), | 2378 | DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), |
2372 | DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), | 2379 | DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), |
2373 | DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2380 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2374 | DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | 2381 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
2375 | DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | 2382 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2376 | DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | 2383 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2377 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | 2384 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
2378 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | 2385 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
2379 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2386 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2380 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2387 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2381 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | 2388 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
2382 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), | 2389 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), |
2383 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2390 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2384 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2391 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2385 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | 2392 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
2386 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), | 2393 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
2387 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2394 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2388 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2395 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
2389 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), | 2396 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), |
2390 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), | 2397 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), |
2391 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), | 2398 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), |
2392 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), | 2399 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), |
2393 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), | 2400 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), |
2394 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), | 2401 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), |
2395 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), | 2402 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), |
2396 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), | 2403 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), |
2397 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), | 2404 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), |
2398 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), | 2405 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), |
2399 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), | 2406 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
2400 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), | 2407 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), |
2401 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 2408 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2402 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 2409 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
2403 | }; | 2410 | }; |
2404 | 2411 | ||
2405 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 2412 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5044f653e8ea..00befce8fbb7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -181,6 +181,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
181 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | 181 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
182 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), | 182 | INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
183 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), | 183 | INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), |
184 | INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), | ||
184 | {0, 0, 0} | 185 | {0, 0, 0} |
185 | }; | 186 | }; |
186 | 187 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 906663b9929e..047cd7ce7e1b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -113,6 +113,9 @@ struct intel_opregion { | |||
113 | int enabled; | 113 | int enabled; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | struct intel_overlay; | ||
117 | struct intel_overlay_error_state; | ||
118 | |||
116 | struct drm_i915_master_private { | 119 | struct drm_i915_master_private { |
117 | drm_local_map_t *sarea; | 120 | drm_local_map_t *sarea; |
118 | struct _drm_i915_sarea *sarea_priv; | 121 | struct _drm_i915_sarea *sarea_priv; |
@@ -166,6 +169,7 @@ struct drm_i915_error_state { | |||
166 | u32 purgeable:1; | 169 | u32 purgeable:1; |
167 | } *active_bo; | 170 | } *active_bo; |
168 | u32 active_bo_count; | 171 | u32 active_bo_count; |
172 | struct intel_overlay_error_state *overlay; | ||
169 | }; | 173 | }; |
170 | 174 | ||
171 | struct drm_i915_display_funcs { | 175 | struct drm_i915_display_funcs { |
@@ -186,8 +190,6 @@ struct drm_i915_display_funcs { | |||
186 | /* clock gating init */ | 190 | /* clock gating init */ |
187 | }; | 191 | }; |
188 | 192 | ||
189 | struct intel_overlay; | ||
190 | |||
191 | struct intel_device_info { | 193 | struct intel_device_info { |
192 | u8 is_mobile : 1; | 194 | u8 is_mobile : 1; |
193 | u8 is_i8xx : 1; | 195 | u8 is_i8xx : 1; |
@@ -242,6 +244,7 @@ typedef struct drm_i915_private { | |||
242 | struct pci_dev *bridge_dev; | 244 | struct pci_dev *bridge_dev; |
243 | struct intel_ring_buffer render_ring; | 245 | struct intel_ring_buffer render_ring; |
244 | struct intel_ring_buffer bsd_ring; | 246 | struct intel_ring_buffer bsd_ring; |
247 | uint32_t next_seqno; | ||
245 | 248 | ||
246 | drm_dma_handle_t *status_page_dmah; | 249 | drm_dma_handle_t *status_page_dmah; |
247 | void *seqno_page; | 250 | void *seqno_page; |
@@ -251,6 +254,7 @@ typedef struct drm_i915_private { | |||
251 | drm_local_map_t hws_map; | 254 | drm_local_map_t hws_map; |
252 | struct drm_gem_object *seqno_obj; | 255 | struct drm_gem_object *seqno_obj; |
253 | struct drm_gem_object *pwrctx; | 256 | struct drm_gem_object *pwrctx; |
257 | struct drm_gem_object *renderctx; | ||
254 | 258 | ||
255 | struct resource mch_res; | 259 | struct resource mch_res; |
256 | 260 | ||
@@ -285,6 +289,9 @@ typedef struct drm_i915_private { | |||
285 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 289 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
286 | int vblank_pipe; | 290 | int vblank_pipe; |
287 | int num_pipe; | 291 | int num_pipe; |
292 | u32 flush_rings; | ||
293 | #define FLUSH_RENDER_RING 0x1 | ||
294 | #define FLUSH_BSD_RING 0x2 | ||
288 | 295 | ||
289 | /* For hangcheck timer */ | 296 | /* For hangcheck timer */ |
290 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ | 297 | #define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ |
@@ -568,8 +575,6 @@ typedef struct drm_i915_private { | |||
568 | */ | 575 | */ |
569 | struct delayed_work retire_work; | 576 | struct delayed_work retire_work; |
570 | 577 | ||
571 | uint32_t next_gem_seqno; | ||
572 | |||
573 | /** | 578 | /** |
574 | * Waiting sequence number, if any | 579 | * Waiting sequence number, if any |
575 | */ | 580 | */ |
@@ -610,6 +615,8 @@ typedef struct drm_i915_private { | |||
610 | struct sdvo_device_mapping sdvo_mappings[2]; | 615 | struct sdvo_device_mapping sdvo_mappings[2]; |
611 | /* indicate whether the LVDS_BORDER should be enabled or not */ | 616 | /* indicate whether the LVDS_BORDER should be enabled or not */ |
612 | unsigned int lvds_border_bits; | 617 | unsigned int lvds_border_bits; |
618 | /* Panel fitter placement and size for Ironlake+ */ | ||
619 | u32 pch_pf_pos, pch_pf_size; | ||
613 | 620 | ||
614 | struct drm_crtc *plane_to_crtc_mapping[2]; | 621 | struct drm_crtc *plane_to_crtc_mapping[2]; |
615 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 622 | struct drm_crtc *pipe_to_crtc_mapping[2]; |
@@ -669,6 +676,8 @@ struct drm_i915_gem_object { | |||
669 | struct list_head list; | 676 | struct list_head list; |
670 | /** This object's place on GPU write list */ | 677 | /** This object's place on GPU write list */ |
671 | struct list_head gpu_write_list; | 678 | struct list_head gpu_write_list; |
679 | /** This object's place on eviction list */ | ||
680 | struct list_head evict_list; | ||
672 | 681 | ||
673 | /** | 682 | /** |
674 | * This is set if the object is on the active or flushing lists | 683 | * This is set if the object is on the active or flushing lists |
@@ -978,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); | |||
978 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 987 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
979 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 988 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
980 | unsigned long end); | 989 | unsigned long end); |
990 | int i915_gpu_idle(struct drm_device *dev); | ||
981 | int i915_gem_idle(struct drm_device *dev); | 991 | int i915_gem_idle(struct drm_device *dev); |
982 | uint32_t i915_add_request(struct drm_device *dev, | 992 | uint32_t i915_add_request(struct drm_device *dev, |
983 | struct drm_file *file_priv, | 993 | struct drm_file *file_priv, |
@@ -991,7 +1001,9 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | |||
991 | int write); | 1001 | int write); |
992 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); | 1002 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); |
993 | int i915_gem_attach_phys_object(struct drm_device *dev, | 1003 | int i915_gem_attach_phys_object(struct drm_device *dev, |
994 | struct drm_gem_object *obj, int id); | 1004 | struct drm_gem_object *obj, |
1005 | int id, | ||
1006 | int align); | ||
995 | void i915_gem_detach_phys_object(struct drm_device *dev, | 1007 | void i915_gem_detach_phys_object(struct drm_device *dev, |
996 | struct drm_gem_object *obj); | 1008 | struct drm_gem_object *obj); |
997 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1009 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
@@ -1003,6 +1015,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | |||
1003 | void i915_gem_shrinker_init(void); | 1015 | void i915_gem_shrinker_init(void); |
1004 | void i915_gem_shrinker_exit(void); | 1016 | void i915_gem_shrinker_exit(void); |
1005 | 1017 | ||
1018 | /* i915_gem_evict.c */ | ||
1019 | int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); | ||
1020 | int i915_gem_evict_everything(struct drm_device *dev); | ||
1021 | int i915_gem_evict_inactive(struct drm_device *dev); | ||
1022 | |||
1006 | /* i915_gem_tiling.c */ | 1023 | /* i915_gem_tiling.c */ |
1007 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 1024 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1008 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 1025 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
@@ -1066,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | |||
1066 | extern void intel_detect_pch (struct drm_device *dev); | 1083 | extern void intel_detect_pch (struct drm_device *dev); |
1067 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | 1084 | extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); |
1068 | 1085 | ||
1086 | /* overlay */ | ||
1087 | extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); | ||
1088 | extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); | ||
1089 | |||
1069 | /** | 1090 | /** |
1070 | * Lock test for when it's just for synchronization of ring access. | 1091 | * Lock test for when it's just for synchronization of ring access. |
1071 | * | 1092 | * |
@@ -1092,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); | |||
1092 | #define I915_VERBOSE 0 | 1113 | #define I915_VERBOSE 0 |
1093 | 1114 | ||
1094 | #define BEGIN_LP_RING(n) do { \ | 1115 | #define BEGIN_LP_RING(n) do { \ |
1095 | drm_i915_private_t *dev_priv = dev->dev_private; \ | 1116 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ |
1096 | if (I915_VERBOSE) \ | 1117 | if (I915_VERBOSE) \ |
1097 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ | 1118 | DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ |
1098 | intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ | 1119 | intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ |
1099 | } while (0) | 1120 | } while (0) |
1100 | 1121 | ||
1101 | 1122 | ||
1102 | #define OUT_RING(x) do { \ | 1123 | #define OUT_RING(x) do { \ |
1103 | drm_i915_private_t *dev_priv = dev->dev_private; \ | 1124 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ |
1104 | if (I915_VERBOSE) \ | 1125 | if (I915_VERBOSE) \ |
1105 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ | 1126 | DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ |
1106 | intel_ring_emit(dev, &dev_priv->render_ring, x); \ | 1127 | intel_ring_emit(dev, &dev_priv__->render_ring, x); \ |
1107 | } while (0) | 1128 | } while (0) |
1108 | 1129 | ||
1109 | #define ADVANCE_LP_RING() do { \ | 1130 | #define ADVANCE_LP_RING() do { \ |
1110 | drm_i915_private_t *dev_priv = dev->dev_private; \ | 1131 | drm_i915_private_t *dev_priv__ = dev->dev_private; \ |
1111 | if (I915_VERBOSE) \ | 1132 | if (I915_VERBOSE) \ |
1112 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ | 1133 | DRM_DEBUG("ADVANCE_LP_RING %x\n", \ |
1113 | dev_priv->render_ring.tail); \ | 1134 | dev_priv__->render_ring.tail); \ |
1114 | intel_ring_advance(dev, &dev_priv->render_ring); \ | 1135 | intel_ring_advance(dev, &dev_priv__->render_ring); \ |
1115 | } while(0) | 1136 | } while(0) |
1116 | 1137 | ||
1117 | /** | 1138 | /** |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0758c7802e6b..df5a7135c261 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | ||
38 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 39 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
@@ -48,8 +49,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | |||
48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
49 | unsigned alignment); | 50 | unsigned alignment); |
50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 51 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
51 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); | ||
52 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | ||
53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 52 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
54 | struct drm_i915_gem_pwrite *args, | 53 | struct drm_i915_gem_pwrite *args, |
55 | struct drm_file *file_priv); | 54 | struct drm_file *file_priv); |
@@ -58,6 +57,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj); | |||
58 | static LIST_HEAD(shrink_list); | 57 | static LIST_HEAD(shrink_list); |
59 | static DEFINE_SPINLOCK(shrink_list_lock); | 58 | static DEFINE_SPINLOCK(shrink_list_lock); |
60 | 59 | ||
60 | static inline bool | ||
61 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) | ||
62 | { | ||
63 | return obj_priv->gtt_space && | ||
64 | !obj_priv->active && | ||
65 | obj_priv->pin_count == 0; | ||
66 | } | ||
67 | |||
61 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 68 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
62 | unsigned long end) | 69 | unsigned long end) |
63 | { | 70 | { |
@@ -313,7 +320,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | |||
313 | if (ret == -ENOMEM) { | 320 | if (ret == -ENOMEM) { |
314 | struct drm_device *dev = obj->dev; | 321 | struct drm_device *dev = obj->dev; |
315 | 322 | ||
316 | ret = i915_gem_evict_something(dev, obj->size); | 323 | ret = i915_gem_evict_something(dev, obj->size, |
324 | i915_gem_get_gtt_alignment(obj)); | ||
317 | if (ret) | 325 | if (ret) |
318 | return ret; | 326 | return ret; |
319 | 327 | ||
@@ -1036,6 +1044,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1036 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); | 1044 | ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
1037 | } | 1045 | } |
1038 | 1046 | ||
1047 | |||
1048 | /* Maintain LRU order of "inactive" objects */ | ||
1049 | if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) | ||
1050 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1051 | |||
1039 | drm_gem_object_unreference(obj); | 1052 | drm_gem_object_unreference(obj); |
1040 | mutex_unlock(&dev->struct_mutex); | 1053 | mutex_unlock(&dev->struct_mutex); |
1041 | return ret; | 1054 | return ret; |
@@ -1137,7 +1150,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1137 | { | 1150 | { |
1138 | struct drm_gem_object *obj = vma->vm_private_data; | 1151 | struct drm_gem_object *obj = vma->vm_private_data; |
1139 | struct drm_device *dev = obj->dev; | 1152 | struct drm_device *dev = obj->dev; |
1140 | struct drm_i915_private *dev_priv = dev->dev_private; | 1153 | drm_i915_private_t *dev_priv = dev->dev_private; |
1141 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1154 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1142 | pgoff_t page_offset; | 1155 | pgoff_t page_offset; |
1143 | unsigned long pfn; | 1156 | unsigned long pfn; |
@@ -1155,8 +1168,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1155 | if (ret) | 1168 | if (ret) |
1156 | goto unlock; | 1169 | goto unlock; |
1157 | 1170 | ||
1158 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1159 | |||
1160 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | 1171 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1161 | if (ret) | 1172 | if (ret) |
1162 | goto unlock; | 1173 | goto unlock; |
@@ -1169,6 +1180,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1169 | goto unlock; | 1180 | goto unlock; |
1170 | } | 1181 | } |
1171 | 1182 | ||
1183 | if (i915_gem_object_is_inactive(obj_priv)) | ||
1184 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1185 | |||
1172 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + | 1186 | pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + |
1173 | page_offset; | 1187 | page_offset; |
1174 | 1188 | ||
@@ -1363,7 +1377,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1363 | struct drm_file *file_priv) | 1377 | struct drm_file *file_priv) |
1364 | { | 1378 | { |
1365 | struct drm_i915_gem_mmap_gtt *args = data; | 1379 | struct drm_i915_gem_mmap_gtt *args = data; |
1366 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1367 | struct drm_gem_object *obj; | 1380 | struct drm_gem_object *obj; |
1368 | struct drm_i915_gem_object *obj_priv; | 1381 | struct drm_i915_gem_object *obj_priv; |
1369 | int ret; | 1382 | int ret; |
@@ -1409,7 +1422,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
1409 | mutex_unlock(&dev->struct_mutex); | 1422 | mutex_unlock(&dev->struct_mutex); |
1410 | return ret; | 1423 | return ret; |
1411 | } | 1424 | } |
1412 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
1413 | } | 1425 | } |
1414 | 1426 | ||
1415 | drm_gem_object_unreference(obj); | 1427 | drm_gem_object_unreference(obj); |
@@ -1493,9 +1505,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj) | |||
1493 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 1505 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
1494 | struct inode *inode; | 1506 | struct inode *inode; |
1495 | 1507 | ||
1508 | /* Our goal here is to return as much of the memory as | ||
1509 | * is possible back to the system as we are called from OOM. | ||
1510 | * To do this we must instruct the shmfs to drop all of its | ||
1511 | * backing pages, *now*. Here we mirror the actions taken | ||
1512 | * when by shmem_delete_inode() to release the backing store. | ||
1513 | */ | ||
1496 | inode = obj->filp->f_path.dentry->d_inode; | 1514 | inode = obj->filp->f_path.dentry->d_inode; |
1497 | if (inode->i_op->truncate) | 1515 | truncate_inode_pages(inode->i_mapping, 0); |
1498 | inode->i_op->truncate (inode); | 1516 | if (inode->i_op->truncate_range) |
1517 | inode->i_op->truncate_range(inode, 0, (loff_t)-1); | ||
1499 | 1518 | ||
1500 | obj_priv->madv = __I915_MADV_PURGED; | 1519 | obj_priv->madv = __I915_MADV_PURGED; |
1501 | } | 1520 | } |
@@ -1887,19 +1906,6 @@ i915_gem_flush(struct drm_device *dev, | |||
1887 | flush_domains); | 1906 | flush_domains); |
1888 | } | 1907 | } |
1889 | 1908 | ||
1890 | static void | ||
1891 | i915_gem_flush_ring(struct drm_device *dev, | ||
1892 | uint32_t invalidate_domains, | ||
1893 | uint32_t flush_domains, | ||
1894 | struct intel_ring_buffer *ring) | ||
1895 | { | ||
1896 | if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
1897 | drm_agp_chipset_flush(dev); | ||
1898 | ring->flush(dev, ring, | ||
1899 | invalidate_domains, | ||
1900 | flush_domains); | ||
1901 | } | ||
1902 | |||
1903 | /** | 1909 | /** |
1904 | * Ensures that all rendering to the object has completed and the object is | 1910 | * Ensures that all rendering to the object has completed and the object is |
1905 | * safe to unbind from the GTT or access from the CPU. | 1911 | * safe to unbind from the GTT or access from the CPU. |
@@ -1973,8 +1979,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1973 | * cause memory corruption through use-after-free. | 1979 | * cause memory corruption through use-after-free. |
1974 | */ | 1980 | */ |
1975 | 1981 | ||
1976 | BUG_ON(obj_priv->active); | ||
1977 | |||
1978 | /* release the fence reg _after_ flushing */ | 1982 | /* release the fence reg _after_ flushing */ |
1979 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 1983 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
1980 | i915_gem_clear_fence_reg(obj); | 1984 | i915_gem_clear_fence_reg(obj); |
@@ -2010,34 +2014,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2010 | return ret; | 2014 | return ret; |
2011 | } | 2015 | } |
2012 | 2016 | ||
2013 | static struct drm_gem_object * | 2017 | int |
2014 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | ||
2015 | { | ||
2016 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2017 | struct drm_i915_gem_object *obj_priv; | ||
2018 | struct drm_gem_object *best = NULL; | ||
2019 | struct drm_gem_object *first = NULL; | ||
2020 | |||
2021 | /* Try to find the smallest clean object */ | ||
2022 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
2023 | struct drm_gem_object *obj = &obj_priv->base; | ||
2024 | if (obj->size >= min_size) { | ||
2025 | if ((!obj_priv->dirty || | ||
2026 | i915_gem_object_is_purgeable(obj_priv)) && | ||
2027 | (!best || obj->size < best->size)) { | ||
2028 | best = obj; | ||
2029 | if (best->size == min_size) | ||
2030 | return best; | ||
2031 | } | ||
2032 | if (!first) | ||
2033 | first = obj; | ||
2034 | } | ||
2035 | } | ||
2036 | |||
2037 | return best ? best : first; | ||
2038 | } | ||
2039 | |||
2040 | static int | ||
2041 | i915_gpu_idle(struct drm_device *dev) | 2018 | i915_gpu_idle(struct drm_device *dev) |
2042 | { | 2019 | { |
2043 | drm_i915_private_t *dev_priv = dev->dev_private; | 2020 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -2078,155 +2055,6 @@ i915_gpu_idle(struct drm_device *dev) | |||
2078 | return ret; | 2055 | return ret; |
2079 | } | 2056 | } |
2080 | 2057 | ||
2081 | static int | ||
2082 | i915_gem_evict_everything(struct drm_device *dev) | ||
2083 | { | ||
2084 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2085 | int ret; | ||
2086 | bool lists_empty; | ||
2087 | |||
2088 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2089 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2090 | list_empty(&dev_priv->mm.flushing_list) && | ||
2091 | list_empty(&dev_priv->render_ring.active_list) && | ||
2092 | (!HAS_BSD(dev) | ||
2093 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
2094 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2095 | |||
2096 | if (lists_empty) | ||
2097 | return -ENOSPC; | ||
2098 | |||
2099 | /* Flush everything (on to the inactive lists) and evict */ | ||
2100 | ret = i915_gpu_idle(dev); | ||
2101 | if (ret) | ||
2102 | return ret; | ||
2103 | |||
2104 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2105 | |||
2106 | ret = i915_gem_evict_from_inactive_list(dev); | ||
2107 | if (ret) | ||
2108 | return ret; | ||
2109 | |||
2110 | spin_lock(&dev_priv->mm.active_list_lock); | ||
2111 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
2112 | list_empty(&dev_priv->mm.flushing_list) && | ||
2113 | list_empty(&dev_priv->render_ring.active_list) && | ||
2114 | (!HAS_BSD(dev) | ||
2115 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
2116 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
2117 | BUG_ON(!lists_empty); | ||
2118 | |||
2119 | return 0; | ||
2120 | } | ||
2121 | |||
2122 | static int | ||
2123 | i915_gem_evict_something(struct drm_device *dev, int min_size) | ||
2124 | { | ||
2125 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
2126 | struct drm_gem_object *obj; | ||
2127 | int ret; | ||
2128 | |||
2129 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | ||
2130 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | ||
2131 | for (;;) { | ||
2132 | i915_gem_retire_requests(dev); | ||
2133 | |||
2134 | /* If there's an inactive buffer available now, grab it | ||
2135 | * and be done. | ||
2136 | */ | ||
2137 | obj = i915_gem_find_inactive_object(dev, min_size); | ||
2138 | if (obj) { | ||
2139 | struct drm_i915_gem_object *obj_priv; | ||
2140 | |||
2141 | #if WATCH_LRU | ||
2142 | DRM_INFO("%s: evicting %p\n", __func__, obj); | ||
2143 | #endif | ||
2144 | obj_priv = to_intel_bo(obj); | ||
2145 | BUG_ON(obj_priv->pin_count != 0); | ||
2146 | BUG_ON(obj_priv->active); | ||
2147 | |||
2148 | /* Wait on the rendering and unbind the buffer. */ | ||
2149 | return i915_gem_object_unbind(obj); | ||
2150 | } | ||
2151 | |||
2152 | /* If we didn't get anything, but the ring is still processing | ||
2153 | * things, wait for the next to finish and hopefully leave us | ||
2154 | * a buffer to evict. | ||
2155 | */ | ||
2156 | if (!list_empty(&render_ring->request_list)) { | ||
2157 | struct drm_i915_gem_request *request; | ||
2158 | |||
2159 | request = list_first_entry(&render_ring->request_list, | ||
2160 | struct drm_i915_gem_request, | ||
2161 | list); | ||
2162 | |||
2163 | ret = i915_wait_request(dev, | ||
2164 | request->seqno, request->ring); | ||
2165 | if (ret) | ||
2166 | return ret; | ||
2167 | |||
2168 | continue; | ||
2169 | } | ||
2170 | |||
2171 | if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { | ||
2172 | struct drm_i915_gem_request *request; | ||
2173 | |||
2174 | request = list_first_entry(&bsd_ring->request_list, | ||
2175 | struct drm_i915_gem_request, | ||
2176 | list); | ||
2177 | |||
2178 | ret = i915_wait_request(dev, | ||
2179 | request->seqno, request->ring); | ||
2180 | if (ret) | ||
2181 | return ret; | ||
2182 | |||
2183 | continue; | ||
2184 | } | ||
2185 | |||
2186 | /* If we didn't have anything on the request list but there | ||
2187 | * are buffers awaiting a flush, emit one and try again. | ||
2188 | * When we wait on it, those buffers waiting for that flush | ||
2189 | * will get moved to inactive. | ||
2190 | */ | ||
2191 | if (!list_empty(&dev_priv->mm.flushing_list)) { | ||
2192 | struct drm_i915_gem_object *obj_priv; | ||
2193 | |||
2194 | /* Find an object that we can immediately reuse */ | ||
2195 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | ||
2196 | obj = &obj_priv->base; | ||
2197 | if (obj->size >= min_size) | ||
2198 | break; | ||
2199 | |||
2200 | obj = NULL; | ||
2201 | } | ||
2202 | |||
2203 | if (obj != NULL) { | ||
2204 | uint32_t seqno; | ||
2205 | |||
2206 | i915_gem_flush_ring(dev, | ||
2207 | obj->write_domain, | ||
2208 | obj->write_domain, | ||
2209 | obj_priv->ring); | ||
2210 | seqno = i915_add_request(dev, NULL, | ||
2211 | obj->write_domain, | ||
2212 | obj_priv->ring); | ||
2213 | if (seqno == 0) | ||
2214 | return -ENOMEM; | ||
2215 | continue; | ||
2216 | } | ||
2217 | } | ||
2218 | |||
2219 | /* If we didn't do any of the above, there's no single buffer | ||
2220 | * large enough to swap out for the new one, so just evict | ||
2221 | * everything and start again. (This should be rare.) | ||
2222 | */ | ||
2223 | if (!list_empty (&dev_priv->mm.inactive_list)) | ||
2224 | return i915_gem_evict_from_inactive_list(dev); | ||
2225 | else | ||
2226 | return i915_gem_evict_everything(dev); | ||
2227 | } | ||
2228 | } | ||
2229 | |||
2230 | int | 2058 | int |
2231 | i915_gem_object_get_pages(struct drm_gem_object *obj, | 2059 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2232 | gfp_t gfpmask) | 2060 | gfp_t gfpmask) |
@@ -2666,7 +2494,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2666 | #if WATCH_LRU | 2494 | #if WATCH_LRU |
2667 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2495 | DRM_INFO("%s: GTT full, evicting something\n", __func__); |
2668 | #endif | 2496 | #endif |
2669 | ret = i915_gem_evict_something(dev, obj->size); | 2497 | ret = i915_gem_evict_something(dev, obj->size, alignment); |
2670 | if (ret) | 2498 | if (ret) |
2671 | return ret; | 2499 | return ret; |
2672 | 2500 | ||
@@ -2684,7 +2512,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2684 | 2512 | ||
2685 | if (ret == -ENOMEM) { | 2513 | if (ret == -ENOMEM) { |
2686 | /* first try to clear up some space from the GTT */ | 2514 | /* first try to clear up some space from the GTT */ |
2687 | ret = i915_gem_evict_something(dev, obj->size); | 2515 | ret = i915_gem_evict_something(dev, obj->size, |
2516 | alignment); | ||
2688 | if (ret) { | 2517 | if (ret) { |
2689 | /* now try to shrink everyone else */ | 2518 | /* now try to shrink everyone else */ |
2690 | if (gfpmask) { | 2519 | if (gfpmask) { |
@@ -2714,7 +2543,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2714 | drm_mm_put_block(obj_priv->gtt_space); | 2543 | drm_mm_put_block(obj_priv->gtt_space); |
2715 | obj_priv->gtt_space = NULL; | 2544 | obj_priv->gtt_space = NULL; |
2716 | 2545 | ||
2717 | ret = i915_gem_evict_something(dev, obj->size); | 2546 | ret = i915_gem_evict_something(dev, obj->size, alignment); |
2718 | if (ret) | 2547 | if (ret) |
2719 | return ret; | 2548 | return ret; |
2720 | 2549 | ||
@@ -2723,6 +2552,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2723 | atomic_inc(&dev->gtt_count); | 2552 | atomic_inc(&dev->gtt_count); |
2724 | atomic_add(obj->size, &dev->gtt_memory); | 2553 | atomic_add(obj->size, &dev->gtt_memory); |
2725 | 2554 | ||
2555 | /* keep track of bounds object by adding it to the inactive list */ | ||
2556 | list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
2557 | |||
2726 | /* Assert that the object is not currently in any GPU domain. As it | 2558 | /* Assert that the object is not currently in any GPU domain. As it |
2727 | * wasn't in the GTT, there shouldn't be any way it could have been in | 2559 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2728 | * a GPU cache | 2560 | * a GPU cache |
@@ -3117,6 +2949,7 @@ static void | |||
3117 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | 2949 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
3118 | { | 2950 | { |
3119 | struct drm_device *dev = obj->dev; | 2951 | struct drm_device *dev = obj->dev; |
2952 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3120 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 2953 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
3121 | uint32_t invalidate_domains = 0; | 2954 | uint32_t invalidate_domains = 0; |
3122 | uint32_t flush_domains = 0; | 2955 | uint32_t flush_domains = 0; |
@@ -3179,6 +3012,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
3179 | obj->pending_write_domain = obj->write_domain; | 3012 | obj->pending_write_domain = obj->write_domain; |
3180 | obj->read_domains = obj->pending_read_domains; | 3013 | obj->read_domains = obj->pending_read_domains; |
3181 | 3014 | ||
3015 | if (flush_domains & I915_GEM_GPU_DOMAINS) { | ||
3016 | if (obj_priv->ring == &dev_priv->render_ring) | ||
3017 | dev_priv->flush_rings |= FLUSH_RENDER_RING; | ||
3018 | else if (obj_priv->ring == &dev_priv->bsd_ring) | ||
3019 | dev_priv->flush_rings |= FLUSH_BSD_RING; | ||
3020 | } | ||
3021 | |||
3182 | dev->invalidate_domains |= invalidate_domains; | 3022 | dev->invalidate_domains |= invalidate_domains; |
3183 | dev->flush_domains |= flush_domains; | 3023 | dev->flush_domains |= flush_domains; |
3184 | #if WATCH_BUF | 3024 | #if WATCH_BUF |
@@ -3718,7 +3558,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3718 | ring = &dev_priv->render_ring; | 3558 | ring = &dev_priv->render_ring; |
3719 | } | 3559 | } |
3720 | 3560 | ||
3721 | |||
3722 | if (args->buffer_count < 1) { | 3561 | if (args->buffer_count < 1) { |
3723 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3562 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3724 | return -EINVAL; | 3563 | return -EINVAL; |
@@ -3892,6 +3731,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3892 | */ | 3731 | */ |
3893 | dev->invalidate_domains = 0; | 3732 | dev->invalidate_domains = 0; |
3894 | dev->flush_domains = 0; | 3733 | dev->flush_domains = 0; |
3734 | dev_priv->flush_rings = 0; | ||
3895 | 3735 | ||
3896 | for (i = 0; i < args->buffer_count; i++) { | 3736 | for (i = 0; i < args->buffer_count; i++) { |
3897 | struct drm_gem_object *obj = object_list[i]; | 3737 | struct drm_gem_object *obj = object_list[i]; |
@@ -3912,16 +3752,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3912 | i915_gem_flush(dev, | 3752 | i915_gem_flush(dev, |
3913 | dev->invalidate_domains, | 3753 | dev->invalidate_domains, |
3914 | dev->flush_domains); | 3754 | dev->flush_domains); |
3915 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { | 3755 | if (dev_priv->flush_rings & FLUSH_RENDER_RING) |
3916 | (void)i915_add_request(dev, file_priv, | 3756 | (void)i915_add_request(dev, file_priv, |
3917 | dev->flush_domains, | 3757 | dev->flush_domains, |
3918 | &dev_priv->render_ring); | 3758 | &dev_priv->render_ring); |
3919 | 3759 | if (dev_priv->flush_rings & FLUSH_BSD_RING) | |
3920 | if (HAS_BSD(dev)) | 3760 | (void)i915_add_request(dev, file_priv, |
3921 | (void)i915_add_request(dev, file_priv, | 3761 | dev->flush_domains, |
3922 | dev->flush_domains, | 3762 | &dev_priv->bsd_ring); |
3923 | &dev_priv->bsd_ring); | ||
3924 | } | ||
3925 | } | 3763 | } |
3926 | 3764 | ||
3927 | for (i = 0; i < args->buffer_count; i++) { | 3765 | for (i = 0; i < args->buffer_count; i++) { |
@@ -4192,6 +4030,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4192 | if (alignment == 0) | 4030 | if (alignment == 0) |
4193 | alignment = i915_gem_get_gtt_alignment(obj); | 4031 | alignment = i915_gem_get_gtt_alignment(obj); |
4194 | if (obj_priv->gtt_offset & (alignment - 1)) { | 4032 | if (obj_priv->gtt_offset & (alignment - 1)) { |
4033 | WARN(obj_priv->pin_count, | ||
4034 | "bo is already pinned with incorrect alignment:" | ||
4035 | " offset=%x, req.alignment=%x\n", | ||
4036 | obj_priv->gtt_offset, alignment); | ||
4195 | ret = i915_gem_object_unbind(obj); | 4037 | ret = i915_gem_object_unbind(obj); |
4196 | if (ret) | 4038 | if (ret) |
4197 | return ret; | 4039 | return ret; |
@@ -4213,8 +4055,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4213 | atomic_inc(&dev->pin_count); | 4055 | atomic_inc(&dev->pin_count); |
4214 | atomic_add(obj->size, &dev->pin_memory); | 4056 | atomic_add(obj->size, &dev->pin_memory); |
4215 | if (!obj_priv->active && | 4057 | if (!obj_priv->active && |
4216 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && | 4058 | (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) |
4217 | !list_empty(&obj_priv->list)) | ||
4218 | list_del_init(&obj_priv->list); | 4059 | list_del_init(&obj_priv->list); |
4219 | } | 4060 | } |
4220 | i915_verify_inactive(dev, __FILE__, __LINE__); | 4061 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -4359,22 +4200,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4359 | } | 4200 | } |
4360 | 4201 | ||
4361 | mutex_lock(&dev->struct_mutex); | 4202 | mutex_lock(&dev->struct_mutex); |
4362 | /* Update the active list for the hardware's current position. | ||
4363 | * Otherwise this only updates on a delayed timer or when irqs are | ||
4364 | * actually unmasked, and our working set ends up being larger than | ||
4365 | * required. | ||
4366 | */ | ||
4367 | i915_gem_retire_requests(dev); | ||
4368 | 4203 | ||
4369 | obj_priv = to_intel_bo(obj); | 4204 | /* Count all active objects as busy, even if they are currently not used |
4370 | /* Don't count being on the flushing list against the object being | 4205 | * by the gpu. Users of this interface expect objects to eventually |
4371 | * done. Otherwise, a buffer left on the flushing list but not getting | 4206 | * become non-busy without any further actions, therefore emit any |
4372 | * flushed (because nobody's flushing that domain) won't ever return | 4207 | * necessary flushes here. |
4373 | * unbusy and get reused by libdrm's bo cache. The other expected | ||
4374 | * consumer of this interface, OpenGL's occlusion queries, also specs | ||
4375 | * that the objects get unbusy "eventually" without any interference. | ||
4376 | */ | 4208 | */ |
4377 | args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; | 4209 | obj_priv = to_intel_bo(obj); |
4210 | args->busy = obj_priv->active; | ||
4211 | if (args->busy) { | ||
4212 | /* Unconditionally flush objects, even when the gpu still uses this | ||
4213 | * object. Userspace calling this function indicates that it wants to | ||
4214 | * use this buffer rather sooner than later, so issuing the required | ||
4215 | * flush earlier is beneficial. | ||
4216 | */ | ||
4217 | if (obj->write_domain) { | ||
4218 | i915_gem_flush(dev, 0, obj->write_domain); | ||
4219 | (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); | ||
4220 | } | ||
4221 | |||
4222 | /* Update the active list for the hardware's current position. | ||
4223 | * Otherwise this only updates on a delayed timer or when irqs | ||
4224 | * are actually unmasked, and our working set ends up being | ||
4225 | * larger than required. | ||
4226 | */ | ||
4227 | i915_gem_retire_requests_ring(dev, obj_priv->ring); | ||
4228 | |||
4229 | args->busy = obj_priv->active; | ||
4230 | } | ||
4378 | 4231 | ||
4379 | drm_gem_object_unreference(obj); | 4232 | drm_gem_object_unreference(obj); |
4380 | mutex_unlock(&dev->struct_mutex); | 4233 | mutex_unlock(&dev->struct_mutex); |
@@ -4514,30 +4367,6 @@ void i915_gem_free_object(struct drm_gem_object *obj) | |||
4514 | i915_gem_free_object_tail(obj); | 4367 | i915_gem_free_object_tail(obj); |
4515 | } | 4368 | } |
4516 | 4369 | ||
4517 | /** Unbinds all inactive objects. */ | ||
4518 | static int | ||
4519 | i915_gem_evict_from_inactive_list(struct drm_device *dev) | ||
4520 | { | ||
4521 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
4522 | |||
4523 | while (!list_empty(&dev_priv->mm.inactive_list)) { | ||
4524 | struct drm_gem_object *obj; | ||
4525 | int ret; | ||
4526 | |||
4527 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | ||
4528 | struct drm_i915_gem_object, | ||
4529 | list)->base; | ||
4530 | |||
4531 | ret = i915_gem_object_unbind(obj); | ||
4532 | if (ret != 0) { | ||
4533 | DRM_ERROR("Error unbinding object: %d\n", ret); | ||
4534 | return ret; | ||
4535 | } | ||
4536 | } | ||
4537 | |||
4538 | return 0; | ||
4539 | } | ||
4540 | |||
4541 | int | 4370 | int |
4542 | i915_gem_idle(struct drm_device *dev) | 4371 | i915_gem_idle(struct drm_device *dev) |
4543 | { | 4372 | { |
@@ -4562,7 +4391,7 @@ i915_gem_idle(struct drm_device *dev) | |||
4562 | 4391 | ||
4563 | /* Under UMS, be paranoid and evict. */ | 4392 | /* Under UMS, be paranoid and evict. */ |
4564 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { | 4393 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) { |
4565 | ret = i915_gem_evict_from_inactive_list(dev); | 4394 | ret = i915_gem_evict_inactive(dev); |
4566 | if (ret) { | 4395 | if (ret) { |
4567 | mutex_unlock(&dev->struct_mutex); | 4396 | mutex_unlock(&dev->struct_mutex); |
4568 | return ret; | 4397 | return ret; |
@@ -4680,6 +4509,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev) | |||
4680 | goto cleanup_render_ring; | 4509 | goto cleanup_render_ring; |
4681 | } | 4510 | } |
4682 | 4511 | ||
4512 | dev_priv->next_seqno = 1; | ||
4513 | |||
4683 | return 0; | 4514 | return 0; |
4684 | 4515 | ||
4685 | cleanup_render_ring: | 4516 | cleanup_render_ring: |
@@ -4841,7 +4672,7 @@ i915_gem_load(struct drm_device *dev) | |||
4841 | * e.g. for cursor + overlay regs | 4672 | * e.g. for cursor + overlay regs |
4842 | */ | 4673 | */ |
4843 | int i915_gem_init_phys_object(struct drm_device *dev, | 4674 | int i915_gem_init_phys_object(struct drm_device *dev, |
4844 | int id, int size) | 4675 | int id, int size, int align) |
4845 | { | 4676 | { |
4846 | drm_i915_private_t *dev_priv = dev->dev_private; | 4677 | drm_i915_private_t *dev_priv = dev->dev_private; |
4847 | struct drm_i915_gem_phys_object *phys_obj; | 4678 | struct drm_i915_gem_phys_object *phys_obj; |
@@ -4856,7 +4687,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | |||
4856 | 4687 | ||
4857 | phys_obj->id = id; | 4688 | phys_obj->id = id; |
4858 | 4689 | ||
4859 | phys_obj->handle = drm_pci_alloc(dev, size, 0); | 4690 | phys_obj->handle = drm_pci_alloc(dev, size, align); |
4860 | if (!phys_obj->handle) { | 4691 | if (!phys_obj->handle) { |
4861 | ret = -ENOMEM; | 4692 | ret = -ENOMEM; |
4862 | goto kfree_obj; | 4693 | goto kfree_obj; |
@@ -4938,7 +4769,9 @@ out: | |||
4938 | 4769 | ||
4939 | int | 4770 | int |
4940 | i915_gem_attach_phys_object(struct drm_device *dev, | 4771 | i915_gem_attach_phys_object(struct drm_device *dev, |
4941 | struct drm_gem_object *obj, int id) | 4772 | struct drm_gem_object *obj, |
4773 | int id, | ||
4774 | int align) | ||
4942 | { | 4775 | { |
4943 | drm_i915_private_t *dev_priv = dev->dev_private; | 4776 | drm_i915_private_t *dev_priv = dev->dev_private; |
4944 | struct drm_i915_gem_object *obj_priv; | 4777 | struct drm_i915_gem_object *obj_priv; |
@@ -4957,11 +4790,10 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4957 | i915_gem_detach_phys_object(dev, obj); | 4790 | i915_gem_detach_phys_object(dev, obj); |
4958 | } | 4791 | } |
4959 | 4792 | ||
4960 | |||
4961 | /* create a new object */ | 4793 | /* create a new object */ |
4962 | if (!dev_priv->mm.phys_objs[id - 1]) { | 4794 | if (!dev_priv->mm.phys_objs[id - 1]) { |
4963 | ret = i915_gem_init_phys_object(dev, id, | 4795 | ret = i915_gem_init_phys_object(dev, id, |
4964 | obj->size); | 4796 | obj->size, align); |
4965 | if (ret) { | 4797 | if (ret) { |
4966 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); | 4798 | DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); |
4967 | goto out; | 4799 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c new file mode 100644 index 000000000000..72cae3cccad8 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright © 2008-2010 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drv.h" | ||
32 | #include "i915_drm.h" | ||
33 | |||
34 | static struct drm_i915_gem_object * | ||
35 | i915_gem_next_active_object(struct drm_device *dev, | ||
36 | struct list_head **render_iter, | ||
37 | struct list_head **bsd_iter) | ||
38 | { | ||
39 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
40 | struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; | ||
41 | |||
42 | if (*render_iter != &dev_priv->render_ring.active_list) | ||
43 | render_obj = list_entry(*render_iter, | ||
44 | struct drm_i915_gem_object, | ||
45 | list); | ||
46 | |||
47 | if (HAS_BSD(dev)) { | ||
48 | if (*bsd_iter != &dev_priv->bsd_ring.active_list) | ||
49 | bsd_obj = list_entry(*bsd_iter, | ||
50 | struct drm_i915_gem_object, | ||
51 | list); | ||
52 | |||
53 | if (render_obj == NULL) { | ||
54 | *bsd_iter = (*bsd_iter)->next; | ||
55 | return bsd_obj; | ||
56 | } | ||
57 | |||
58 | if (bsd_obj == NULL) { | ||
59 | *render_iter = (*render_iter)->next; | ||
60 | return render_obj; | ||
61 | } | ||
62 | |||
63 | /* XXX can we handle seqno wrapping? */ | ||
64 | if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { | ||
65 | *render_iter = (*render_iter)->next; | ||
66 | return render_obj; | ||
67 | } else { | ||
68 | *bsd_iter = (*bsd_iter)->next; | ||
69 | return bsd_obj; | ||
70 | } | ||
71 | } else { | ||
72 | *render_iter = (*render_iter)->next; | ||
73 | return render_obj; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | static bool | ||
78 | mark_free(struct drm_i915_gem_object *obj_priv, | ||
79 | struct list_head *unwind) | ||
80 | { | ||
81 | list_add(&obj_priv->evict_list, unwind); | ||
82 | return drm_mm_scan_add_block(obj_priv->gtt_space); | ||
83 | } | ||
84 | |||
85 | #define i915_for_each_active_object(OBJ, R, B) \ | ||
86 | *(R) = dev_priv->render_ring.active_list.next; \ | ||
87 | *(B) = dev_priv->bsd_ring.active_list.next; \ | ||
88 | while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) | ||
89 | |||
90 | int | ||
91 | i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) | ||
92 | { | ||
93 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
94 | struct list_head eviction_list, unwind_list; | ||
95 | struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; | ||
96 | struct list_head *render_iter, *bsd_iter; | ||
97 | int ret = 0; | ||
98 | |||
99 | i915_gem_retire_requests(dev); | ||
100 | |||
101 | /* Re-check for free space after retiring requests */ | ||
102 | if (drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
103 | min_size, alignment, 0)) | ||
104 | return 0; | ||
105 | |||
106 | /* | ||
107 | * The goal is to evict objects and amalgamate space in LRU order. | ||
108 | * The oldest idle objects reside on the inactive list, which is in | ||
109 | * retirement order. The next objects to retire are those on the (per | ||
110 | * ring) active list that do not have an outstanding flush. Once the | ||
111 | * hardware reports completion (the seqno is updated after the | ||
112 | * batchbuffer has been finished) the clean buffer objects would | ||
113 | * be retired to the inactive list. Any dirty objects would be added | ||
114 | * to the tail of the flushing list. So after processing the clean | ||
115 | * active objects we need to emit a MI_FLUSH to retire the flushing | ||
116 | * list, hence the retirement order of the flushing list is in | ||
117 | * advance of the dirty objects on the active lists. | ||
118 | * | ||
119 | * The retirement sequence is thus: | ||
120 | * 1. Inactive objects (already retired) | ||
121 | * 2. Clean active objects | ||
122 | * 3. Flushing list | ||
123 | * 4. Dirty active objects. | ||
124 | * | ||
125 | * On each list, the oldest objects lie at the HEAD with the freshest | ||
126 | * object on the TAIL. | ||
127 | */ | ||
128 | |||
129 | INIT_LIST_HEAD(&unwind_list); | ||
130 | drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); | ||
131 | |||
132 | /* First see if there is a large enough contiguous idle region... */ | ||
133 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
134 | if (mark_free(obj_priv, &unwind_list)) | ||
135 | goto found; | ||
136 | } | ||
137 | |||
138 | /* Now merge in the soon-to-be-expired objects... */ | ||
139 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | ||
140 | /* Does the object require an outstanding flush? */ | ||
141 | if (obj_priv->base.write_domain || obj_priv->pin_count) | ||
142 | continue; | ||
143 | |||
144 | if (mark_free(obj_priv, &unwind_list)) | ||
145 | goto found; | ||
146 | } | ||
147 | |||
148 | /* Finally add anything with a pending flush (in order of retirement) */ | ||
149 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | ||
150 | if (obj_priv->pin_count) | ||
151 | continue; | ||
152 | |||
153 | if (mark_free(obj_priv, &unwind_list)) | ||
154 | goto found; | ||
155 | } | ||
156 | i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { | ||
157 | if (! obj_priv->base.write_domain || obj_priv->pin_count) | ||
158 | continue; | ||
159 | |||
160 | if (mark_free(obj_priv, &unwind_list)) | ||
161 | goto found; | ||
162 | } | ||
163 | |||
164 | /* Nothing found, clean up and bail out! */ | ||
165 | list_for_each_entry(obj_priv, &unwind_list, evict_list) { | ||
166 | ret = drm_mm_scan_remove_block(obj_priv->gtt_space); | ||
167 | BUG_ON(ret); | ||
168 | } | ||
169 | |||
170 | /* We expect the caller to unpin, evict all and try again, or give up. | ||
171 | * So calling i915_gem_evict_everything() is unnecessary. | ||
172 | */ | ||
173 | return -ENOSPC; | ||
174 | |||
175 | found: | ||
176 | INIT_LIST_HEAD(&eviction_list); | ||
177 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | ||
178 | &unwind_list, evict_list) { | ||
179 | if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { | ||
180 | /* drm_mm doesn't allow any other other operations while | ||
181 | * scanning, therefore store to be evicted objects on a | ||
182 | * temporary list. */ | ||
183 | list_move(&obj_priv->evict_list, &eviction_list); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* Unbinding will emit any required flushes */ | ||
188 | list_for_each_entry_safe(obj_priv, tmp_obj_priv, | ||
189 | &eviction_list, evict_list) { | ||
190 | #if WATCH_LRU | ||
191 | DRM_INFO("%s: evicting %p\n", __func__, obj); | ||
192 | #endif | ||
193 | ret = i915_gem_object_unbind(&obj_priv->base); | ||
194 | if (ret) | ||
195 | return ret; | ||
196 | } | ||
197 | |||
198 | /* The just created free hole should be on the top of the free stack | ||
199 | * maintained by drm_mm, so this BUG_ON actually executes in O(1). | ||
200 | * Furthermore all accessed data has just recently been used, so it | ||
201 | * should be really fast, too. */ | ||
202 | BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, | ||
203 | alignment, 0)); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | int | ||
209 | i915_gem_evict_everything(struct drm_device *dev) | ||
210 | { | ||
211 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
212 | int ret; | ||
213 | bool lists_empty; | ||
214 | |||
215 | spin_lock(&dev_priv->mm.active_list_lock); | ||
216 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
217 | list_empty(&dev_priv->mm.flushing_list) && | ||
218 | list_empty(&dev_priv->render_ring.active_list) && | ||
219 | (!HAS_BSD(dev) | ||
220 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
221 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
222 | |||
223 | if (lists_empty) | ||
224 | return -ENOSPC; | ||
225 | |||
226 | /* Flush everything (on to the inactive lists) and evict */ | ||
227 | ret = i915_gpu_idle(dev); | ||
228 | if (ret) | ||
229 | return ret; | ||
230 | |||
231 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
232 | |||
233 | ret = i915_gem_evict_inactive(dev); | ||
234 | if (ret) | ||
235 | return ret; | ||
236 | |||
237 | spin_lock(&dev_priv->mm.active_list_lock); | ||
238 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | ||
239 | list_empty(&dev_priv->mm.flushing_list) && | ||
240 | list_empty(&dev_priv->render_ring.active_list) && | ||
241 | (!HAS_BSD(dev) | ||
242 | || list_empty(&dev_priv->bsd_ring.active_list))); | ||
243 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
244 | BUG_ON(!lists_empty); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /** Unbinds all inactive objects. */ | ||
250 | int | ||
251 | i915_gem_evict_inactive(struct drm_device *dev) | ||
252 | { | ||
253 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
254 | |||
255 | while (!list_empty(&dev_priv->mm.inactive_list)) { | ||
256 | struct drm_gem_object *obj; | ||
257 | int ret; | ||
258 | |||
259 | obj = &list_first_entry(&dev_priv->mm.inactive_list, | ||
260 | struct drm_i915_gem_object, | ||
261 | list)->base; | ||
262 | |||
263 | ret = i915_gem_object_unbind(obj); | ||
264 | if (ret != 0) { | ||
265 | DRM_ERROR("Error unbinding object: %d\n", ret); | ||
266 | return ret; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85785a8844ed..16861b800fee 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -425,9 +425,11 @@ static struct drm_i915_error_object * | |||
425 | i915_error_object_create(struct drm_device *dev, | 425 | i915_error_object_create(struct drm_device *dev, |
426 | struct drm_gem_object *src) | 426 | struct drm_gem_object *src) |
427 | { | 427 | { |
428 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
428 | struct drm_i915_error_object *dst; | 429 | struct drm_i915_error_object *dst; |
429 | struct drm_i915_gem_object *src_priv; | 430 | struct drm_i915_gem_object *src_priv; |
430 | int page, page_count; | 431 | int page, page_count; |
432 | u32 reloc_offset; | ||
431 | 433 | ||
432 | if (src == NULL) | 434 | if (src == NULL) |
433 | return NULL; | 435 | return NULL; |
@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev, | |||
442 | if (dst == NULL) | 444 | if (dst == NULL) |
443 | return NULL; | 445 | return NULL; |
444 | 446 | ||
447 | reloc_offset = src_priv->gtt_offset; | ||
445 | for (page = 0; page < page_count; page++) { | 448 | for (page = 0; page < page_count; page++) { |
446 | void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | ||
447 | unsigned long flags; | 449 | unsigned long flags; |
450 | void __iomem *s; | ||
451 | void *d; | ||
448 | 452 | ||
453 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); | ||
449 | if (d == NULL) | 454 | if (d == NULL) |
450 | goto unwind; | 455 | goto unwind; |
456 | |||
451 | local_irq_save(flags); | 457 | local_irq_save(flags); |
452 | s = kmap_atomic(src_priv->pages[page], KM_IRQ0); | 458 | s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
453 | memcpy(d, s, PAGE_SIZE); | 459 | reloc_offset, |
454 | kunmap_atomic(s, KM_IRQ0); | 460 | KM_IRQ0); |
461 | memcpy_fromio(d, s, PAGE_SIZE); | ||
462 | io_mapping_unmap_atomic(s, KM_IRQ0); | ||
455 | local_irq_restore(flags); | 463 | local_irq_restore(flags); |
464 | |||
456 | dst->pages[page] = d; | 465 | dst->pages[page] = d; |
466 | |||
467 | reloc_offset += PAGE_SIZE; | ||
457 | } | 468 | } |
458 | dst->page_count = page_count; | 469 | dst->page_count = page_count; |
459 | dst->gtt_offset = src_priv->gtt_offset; | 470 | dst->gtt_offset = src_priv->gtt_offset; |
@@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev, | |||
489 | i915_error_object_free(error->batchbuffer[1]); | 500 | i915_error_object_free(error->batchbuffer[1]); |
490 | i915_error_object_free(error->ringbuffer); | 501 | i915_error_object_free(error->ringbuffer); |
491 | kfree(error->active_bo); | 502 | kfree(error->active_bo); |
503 | kfree(error->overlay); | ||
492 | kfree(error); | 504 | kfree(error); |
493 | } | 505 | } |
494 | 506 | ||
@@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
612 | 624 | ||
613 | if (batchbuffer[1] == NULL && | 625 | if (batchbuffer[1] == NULL && |
614 | error->acthd >= obj_priv->gtt_offset && | 626 | error->acthd >= obj_priv->gtt_offset && |
615 | error->acthd < obj_priv->gtt_offset + obj->size && | 627 | error->acthd < obj_priv->gtt_offset + obj->size) |
616 | batchbuffer[0] != obj) | ||
617 | batchbuffer[1] = obj; | 628 | batchbuffer[1] = obj; |
618 | 629 | ||
619 | count++; | 630 | count++; |
620 | } | 631 | } |
632 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
633 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
634 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { | ||
635 | struct drm_gem_object *obj = &obj_priv->base; | ||
636 | |||
637 | if (batchbuffer[0] == NULL && | ||
638 | bbaddr >= obj_priv->gtt_offset && | ||
639 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
640 | batchbuffer[0] = obj; | ||
641 | |||
642 | if (batchbuffer[1] == NULL && | ||
643 | error->acthd >= obj_priv->gtt_offset && | ||
644 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
645 | batchbuffer[1] = obj; | ||
646 | |||
647 | if (batchbuffer[0] && batchbuffer[1]) | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
652 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
653 | struct drm_gem_object *obj = &obj_priv->base; | ||
654 | |||
655 | if (batchbuffer[0] == NULL && | ||
656 | bbaddr >= obj_priv->gtt_offset && | ||
657 | bbaddr < obj_priv->gtt_offset + obj->size) | ||
658 | batchbuffer[0] = obj; | ||
659 | |||
660 | if (batchbuffer[1] == NULL && | ||
661 | error->acthd >= obj_priv->gtt_offset && | ||
662 | error->acthd < obj_priv->gtt_offset + obj->size) | ||
663 | batchbuffer[1] = obj; | ||
664 | |||
665 | if (batchbuffer[0] && batchbuffer[1]) | ||
666 | break; | ||
667 | } | ||
668 | } | ||
621 | 669 | ||
622 | /* We need to copy these to an anonymous buffer as the simplest | 670 | /* We need to copy these to an anonymous buffer as the simplest |
623 | * method to avoid being overwritten by userpace. | 671 | * method to avoid being overwritten by userpace. |
624 | */ | 672 | */ |
625 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | 673 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); |
626 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | 674 | if (batchbuffer[1] != batchbuffer[0]) |
675 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
676 | else | ||
677 | error->batchbuffer[1] = NULL; | ||
627 | 678 | ||
628 | /* Record the ringbuffer */ | 679 | /* Record the ringbuffer */ |
629 | error->ringbuffer = i915_error_object_create(dev, | 680 | error->ringbuffer = i915_error_object_create(dev, |
@@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
667 | 718 | ||
668 | do_gettimeofday(&error->time); | 719 | do_gettimeofday(&error->time); |
669 | 720 | ||
721 | error->overlay = intel_overlay_capture_error_state(dev); | ||
722 | |||
670 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 723 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
671 | if (dev_priv->first_error == NULL) { | 724 | if (dev_priv->first_error == NULL) { |
672 | dev_priv->first_error = error; | 725 | dev_priv->first_error = error; |
@@ -1251,6 +1304,16 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
1251 | &dev_priv->render_ring), | 1304 | &dev_priv->render_ring), |
1252 | i915_get_tail_request(dev)->seqno)) { | 1305 | i915_get_tail_request(dev)->seqno)) { |
1253 | dev_priv->hangcheck_count = 0; | 1306 | dev_priv->hangcheck_count = 0; |
1307 | |||
1308 | /* Issue a wake-up to catch stuck h/w. */ | ||
1309 | if (dev_priv->render_ring.waiting_gem_seqno | | ||
1310 | dev_priv->bsd_ring.waiting_gem_seqno) { | ||
1311 | DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); | ||
1312 | if (dev_priv->render_ring.waiting_gem_seqno) | ||
1313 | DRM_WAKEUP(&dev_priv->render_ring.irq_queue); | ||
1314 | if (dev_priv->bsd_ring.waiting_gem_seqno) | ||
1315 | DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); | ||
1316 | } | ||
1254 | return; | 1317 | return; |
1255 | } | 1318 | } |
1256 | 1319 | ||
@@ -1318,12 +1381,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1318 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); | 1381 | I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); |
1319 | (void) I915_READ(DEIER); | 1382 | (void) I915_READ(DEIER); |
1320 | 1383 | ||
1321 | /* user interrupt should be enabled, but masked initial */ | 1384 | /* Gen6 only needs render pipe_control now */ |
1385 | if (IS_GEN6(dev)) | ||
1386 | render_mask = GT_PIPE_NOTIFY; | ||
1387 | |||
1322 | dev_priv->gt_irq_mask_reg = ~render_mask; | 1388 | dev_priv->gt_irq_mask_reg = ~render_mask; |
1323 | dev_priv->gt_irq_enable_reg = render_mask; | 1389 | dev_priv->gt_irq_enable_reg = render_mask; |
1324 | 1390 | ||
1325 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 1391 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
1326 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); | 1392 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); |
1393 | if (IS_GEN6(dev)) | ||
1394 | I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); | ||
1327 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1395 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); |
1328 | (void) I915_READ(GTIER); | 1396 | (void) I915_READ(GTIER); |
1329 | 1397 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index d1bf92b99788..ea5d3fea4b61 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
@@ -114,10 +114,6 @@ struct opregion_asle { | |||
114 | #define ASLE_REQ_MSK 0xf | 114 | #define ASLE_REQ_MSK 0xf |
115 | 115 | ||
116 | /* response bits of ASLE irq request */ | 116 | /* response bits of ASLE irq request */ |
117 | #define ASLE_ALS_ILLUM_FAIL (2<<10) | ||
118 | #define ASLE_BACKLIGHT_FAIL (2<<12) | ||
119 | #define ASLE_PFIT_FAIL (2<<14) | ||
120 | #define ASLE_PWM_FREQ_FAIL (2<<16) | ||
121 | #define ASLE_ALS_ILLUM_FAILED (1<<10) | 117 | #define ASLE_ALS_ILLUM_FAILED (1<<10) |
122 | #define ASLE_BACKLIGHT_FAILED (1<<12) | 118 | #define ASLE_BACKLIGHT_FAILED (1<<12) |
123 | #define ASLE_PFIT_FAILED (1<<14) | 119 | #define ASLE_PFIT_FAILED (1<<14) |
@@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
155 | u32 max_backlight, level, shift; | 151 | u32 max_backlight, level, shift; |
156 | 152 | ||
157 | if (!(bclp & ASLE_BCLP_VALID)) | 153 | if (!(bclp & ASLE_BCLP_VALID)) |
158 | return ASLE_BACKLIGHT_FAIL; | 154 | return ASLE_BACKLIGHT_FAILED; |
159 | 155 | ||
160 | bclp &= ASLE_BCLP_MSK; | 156 | bclp &= ASLE_BCLP_MSK; |
161 | if (bclp < 0 || bclp > 255) | 157 | if (bclp < 0 || bclp > 255) |
162 | return ASLE_BACKLIGHT_FAIL; | 158 | return ASLE_BACKLIGHT_FAILED; |
163 | 159 | ||
164 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); | 160 | blc_pwm_ctl = I915_READ(BLC_PWM_CTL); |
165 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); | 161 | blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); |
@@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) | |||
211 | /* Panel fitting is currently controlled by the X code, so this is a | 207 | /* Panel fitting is currently controlled by the X code, so this is a |
212 | noop until modesetting support works fully */ | 208 | noop until modesetting support works fully */ |
213 | if (!(pfit & ASLE_PFIT_VALID)) | 209 | if (!(pfit & ASLE_PFIT_VALID)) |
214 | return ASLE_PFIT_FAIL; | 210 | return ASLE_PFIT_FAILED; |
215 | return 0; | 211 | return 0; |
216 | } | 212 | } |
217 | 213 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 281db6e5403a..67e3ec1a6af9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -170,6 +170,7 @@ | |||
170 | #define MI_NO_WRITE_FLUSH (1 << 2) | 170 | #define MI_NO_WRITE_FLUSH (1 << 2) |
171 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ | 171 | #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ |
172 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ | 172 | #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ |
173 | #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ | ||
173 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 174 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
174 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) | 175 | #define MI_REPORT_HEAD MI_INSTR(0x07, 0) |
175 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) | 176 | #define MI_OVERLAY_FLIP MI_INSTR(0x11,0) |
@@ -180,6 +181,12 @@ | |||
180 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) | 181 | #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) |
181 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) | 182 | #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) |
182 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) | 183 | #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) |
184 | #define MI_SET_CONTEXT MI_INSTR(0x18, 0) | ||
185 | #define MI_MM_SPACE_GTT (1<<8) | ||
186 | #define MI_MM_SPACE_PHYSICAL (0<<8) | ||
187 | #define MI_SAVE_EXT_STATE_EN (1<<3) | ||
188 | #define MI_RESTORE_EXT_STATE_EN (1<<2) | ||
189 | #define MI_RESTORE_INHIBIT (1<<0) | ||
183 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 190 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
184 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 191 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ |
185 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 192 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
@@ -1100,6 +1107,11 @@ | |||
1100 | #define PEG_BAND_GAP_DATA 0x14d68 | 1107 | #define PEG_BAND_GAP_DATA 0x14d68 |
1101 | 1108 | ||
1102 | /* | 1109 | /* |
1110 | * Logical Context regs | ||
1111 | */ | ||
1112 | #define CCID 0x2180 | ||
1113 | #define CCID_EN (1<<0) | ||
1114 | /* | ||
1103 | * Overlay regs | 1115 | * Overlay regs |
1104 | */ | 1116 | */ |
1105 | 1117 | ||
@@ -2069,6 +2081,7 @@ | |||
2069 | #define PIPE_DITHER_TYPE_ST01 (1 << 2) | 2081 | #define PIPE_DITHER_TYPE_ST01 (1 << 2) |
2070 | /* Pipe A */ | 2082 | /* Pipe A */ |
2071 | #define PIPEADSL 0x70000 | 2083 | #define PIPEADSL 0x70000 |
2084 | #define DSL_LINEMASK 0x00000fff | ||
2072 | #define PIPEACONF 0x70008 | 2085 | #define PIPEACONF 0x70008 |
2073 | #define PIPEACONF_ENABLE (1<<31) | 2086 | #define PIPEACONF_ENABLE (1<<31) |
2074 | #define PIPEACONF_DISABLE 0 | 2087 | #define PIPEACONF_DISABLE 0 |
@@ -2928,6 +2941,7 @@ | |||
2928 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | 2941 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 |
2929 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | 2942 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) |
2930 | #define TRANS_DP_HSYNC_ACTIVE_LOW 0 | 2943 | #define TRANS_DP_HSYNC_ACTIVE_LOW 0 |
2944 | #define TRANS_DP_SYNC_MASK (3<<3) | ||
2931 | 2945 | ||
2932 | /* SNB eDP training params */ | 2946 | /* SNB eDP training params */ |
2933 | /* SNB A-stepping */ | 2947 | /* SNB A-stepping */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 6e2025274db5..2c6b98f2440e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | |||
34 | struct drm_i915_private *dev_priv = dev->dev_private; | 34 | struct drm_i915_private *dev_priv = dev->dev_private; |
35 | u32 dpll_reg; | 35 | u32 dpll_reg; |
36 | 36 | ||
37 | if (IS_IRONLAKE(dev)) { | 37 | if (HAS_PCH_SPLIT(dev)) { |
38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; | 38 | dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; |
39 | } else { | 39 | } else { |
40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; | 40 | dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; |
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) | |||
53 | if (!i915_pipe_enabled(dev, pipe)) | 53 | if (!i915_pipe_enabled(dev, pipe)) |
54 | return; | 54 | return; |
55 | 55 | ||
56 | if (IS_IRONLAKE(dev)) | 56 | if (HAS_PCH_SPLIT(dev)) |
57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 57 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; |
58 | 58 | ||
59 | if (pipe == PIPE_A) | 59 | if (pipe == PIPE_A) |
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) | |||
75 | if (!i915_pipe_enabled(dev, pipe)) | 75 | if (!i915_pipe_enabled(dev, pipe)) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | if (IS_IRONLAKE(dev)) | 78 | if (HAS_PCH_SPLIT(dev)) |
79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; | 79 | reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; |
80 | 80 | ||
81 | if (pipe == PIPE_A) | 81 | if (pipe == PIPE_A) |
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | if (IS_IRONLAKE(dev)) { | 242 | if (HAS_PCH_SPLIT(dev)) { |
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
245 | } | 245 | } |
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
247 | /* Pipe & plane A info */ | 247 | /* Pipe & plane A info */ |
248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
250 | if (IS_IRONLAKE(dev)) { | 250 | if (HAS_PCH_SPLIT(dev)) { |
251 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); | 251 | dev_priv->saveFPA0 = I915_READ(PCH_FPA0); |
252 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); | 252 | dev_priv->saveFPA1 = I915_READ(PCH_FPA1); |
253 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); | 253 | dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); |
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
256 | dev_priv->saveFPA1 = I915_READ(FPA1); | 256 | dev_priv->saveFPA1 = I915_READ(FPA1); |
257 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); | 257 | dev_priv->saveDPLL_A = I915_READ(DPLL_A); |
258 | } | 258 | } |
259 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) | 259 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) |
260 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); | 260 | dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); |
261 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); | 261 | dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); |
262 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); | 262 | dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); |
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
264 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); | 264 | dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); |
265 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); | 265 | dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); |
266 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); | 266 | dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); |
267 | if (!IS_IRONLAKE(dev)) | 267 | if (!HAS_PCH_SPLIT(dev)) |
268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); |
269 | 269 | ||
270 | if (IS_IRONLAKE(dev)) { | 270 | if (HAS_PCH_SPLIT(dev)) { |
271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); | 271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); |
272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); | 272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); |
273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); | 273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); |
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
304 | /* Pipe & plane B info */ | 304 | /* Pipe & plane B info */ |
305 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); | 305 | dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); |
306 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); | 306 | dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); |
307 | if (IS_IRONLAKE(dev)) { | 307 | if (HAS_PCH_SPLIT(dev)) { |
308 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); | 308 | dev_priv->saveFPB0 = I915_READ(PCH_FPB0); |
309 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); | 309 | dev_priv->saveFPB1 = I915_READ(PCH_FPB1); |
310 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); | 310 | dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); |
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
313 | dev_priv->saveFPB1 = I915_READ(FPB1); | 313 | dev_priv->saveFPB1 = I915_READ(FPB1); |
314 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); | 314 | dev_priv->saveDPLL_B = I915_READ(DPLL_B); |
315 | } | 315 | } |
316 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) | 316 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) |
317 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); | 317 | dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); |
318 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); | 318 | dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); |
319 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); | 319 | dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); |
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
321 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); | 321 | dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); |
322 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); | 322 | dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); |
323 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); | 323 | dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); |
324 | if (!IS_IRONLAKE(dev)) | 324 | if (!HAS_PCH_SPLIT(dev)) |
325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | 325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); |
326 | 326 | ||
327 | if (IS_IRONLAKE(dev)) { | 327 | if (HAS_PCH_SPLIT(dev)) { |
328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); | 328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); |
329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); | 329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); |
330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); | 330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); |
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
370 | return; | 370 | return; |
371 | 371 | ||
372 | if (IS_IRONLAKE(dev)) { | 372 | if (HAS_PCH_SPLIT(dev)) { |
373 | dpll_a_reg = PCH_DPLL_A; | 373 | dpll_a_reg = PCH_DPLL_A; |
374 | dpll_b_reg = PCH_DPLL_B; | 374 | dpll_b_reg = PCH_DPLL_B; |
375 | fpa0_reg = PCH_FPA0; | 375 | fpa0_reg = PCH_FPA0; |
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
385 | fpb1_reg = FPB1; | 385 | fpb1_reg = FPB1; |
386 | } | 386 | } |
387 | 387 | ||
388 | if (IS_IRONLAKE(dev)) { | 388 | if (HAS_PCH_SPLIT(dev)) { |
389 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); | 389 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); |
390 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); | 390 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); |
391 | } | 391 | } |
@@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
395 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 395 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { |
396 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & | 396 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & |
397 | ~DPLL_VCO_ENABLE); | 397 | ~DPLL_VCO_ENABLE); |
398 | DRM_UDELAY(150); | 398 | POSTING_READ(dpll_a_reg); |
399 | udelay(150); | ||
399 | } | 400 | } |
400 | I915_WRITE(fpa0_reg, dev_priv->saveFPA0); | 401 | I915_WRITE(fpa0_reg, dev_priv->saveFPA0); |
401 | I915_WRITE(fpa1_reg, dev_priv->saveFPA1); | 402 | I915_WRITE(fpa1_reg, dev_priv->saveFPA1); |
402 | /* Actually enable it */ | 403 | /* Actually enable it */ |
403 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); | 404 | I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); |
404 | DRM_UDELAY(150); | 405 | POSTING_READ(dpll_a_reg); |
405 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) | 406 | udelay(150); |
407 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | ||
406 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); | 408 | I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); |
407 | DRM_UDELAY(150); | 409 | POSTING_READ(DPLL_A_MD); |
410 | } | ||
411 | udelay(150); | ||
408 | 412 | ||
409 | /* Restore mode */ | 413 | /* Restore mode */ |
410 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); | 414 | I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); |
@@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
413 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); | 417 | I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); |
414 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); | 418 | I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); |
415 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); | 419 | I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); |
416 | if (!IS_IRONLAKE(dev)) | 420 | if (!HAS_PCH_SPLIT(dev)) |
417 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 421 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); |
418 | 422 | ||
419 | if (IS_IRONLAKE(dev)) { | 423 | if (HAS_PCH_SPLIT(dev)) { |
420 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | 424 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); |
421 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | 425 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); |
422 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | 426 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); |
@@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
460 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { | 464 | if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { |
461 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & | 465 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & |
462 | ~DPLL_VCO_ENABLE); | 466 | ~DPLL_VCO_ENABLE); |
463 | DRM_UDELAY(150); | 467 | POSTING_READ(dpll_b_reg); |
468 | udelay(150); | ||
464 | } | 469 | } |
465 | I915_WRITE(fpb0_reg, dev_priv->saveFPB0); | 470 | I915_WRITE(fpb0_reg, dev_priv->saveFPB0); |
466 | I915_WRITE(fpb1_reg, dev_priv->saveFPB1); | 471 | I915_WRITE(fpb1_reg, dev_priv->saveFPB1); |
467 | /* Actually enable it */ | 472 | /* Actually enable it */ |
468 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); | 473 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); |
469 | DRM_UDELAY(150); | 474 | POSTING_READ(dpll_b_reg); |
470 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) | 475 | udelay(150); |
476 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { | ||
471 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 477 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
472 | DRM_UDELAY(150); | 478 | POSTING_READ(DPLL_B_MD); |
479 | } | ||
480 | udelay(150); | ||
473 | 481 | ||
474 | /* Restore mode */ | 482 | /* Restore mode */ |
475 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); | 483 | I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); |
@@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
478 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); | 486 | I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); |
479 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); | 487 | I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); |
480 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); | 488 | I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); |
481 | if (!IS_IRONLAKE(dev)) | 489 | if (!HAS_PCH_SPLIT(dev)) |
482 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 490 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); |
483 | 491 | ||
484 | if (IS_IRONLAKE(dev)) { | 492 | if (HAS_PCH_SPLIT(dev)) { |
485 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | 493 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); |
486 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | 494 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); |
487 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | 495 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); |
@@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev) | |||
546 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | 554 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); |
547 | 555 | ||
548 | /* CRT state */ | 556 | /* CRT state */ |
549 | if (IS_IRONLAKE(dev)) { | 557 | if (HAS_PCH_SPLIT(dev)) { |
550 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | 558 | dev_priv->saveADPA = I915_READ(PCH_ADPA); |
551 | } else { | 559 | } else { |
552 | dev_priv->saveADPA = I915_READ(ADPA); | 560 | dev_priv->saveADPA = I915_READ(ADPA); |
553 | } | 561 | } |
554 | 562 | ||
555 | /* LVDS state */ | 563 | /* LVDS state */ |
556 | if (IS_IRONLAKE(dev)) { | 564 | if (HAS_PCH_SPLIT(dev)) { |
557 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); | 565 | dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); |
558 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); | 566 | dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); |
559 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); | 567 | dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); |
@@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev) | |||
571 | dev_priv->saveLVDS = I915_READ(LVDS); | 579 | dev_priv->saveLVDS = I915_READ(LVDS); |
572 | } | 580 | } |
573 | 581 | ||
574 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) | 582 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) |
575 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); | 583 | dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); |
576 | 584 | ||
577 | if (IS_IRONLAKE(dev)) { | 585 | if (HAS_PCH_SPLIT(dev)) { |
578 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); | 586 | dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); |
579 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); | 587 | dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); |
580 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); | 588 | dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); |
@@ -602,7 +610,7 @@ void i915_save_display(struct drm_device *dev) | |||
602 | 610 | ||
603 | /* Only save FBC state on the platform that supports FBC */ | 611 | /* Only save FBC state on the platform that supports FBC */ |
604 | if (I915_HAS_FBC(dev)) { | 612 | if (I915_HAS_FBC(dev)) { |
605 | if (IS_IRONLAKE_M(dev)) { | 613 | if (HAS_PCH_SPLIT(dev)) { |
606 | dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); | 614 | dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); |
607 | } else if (IS_GM45(dev)) { | 615 | } else if (IS_GM45(dev)) { |
608 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); | 616 | dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); |
@@ -618,7 +626,7 @@ void i915_save_display(struct drm_device *dev) | |||
618 | dev_priv->saveVGA0 = I915_READ(VGA0); | 626 | dev_priv->saveVGA0 = I915_READ(VGA0); |
619 | dev_priv->saveVGA1 = I915_READ(VGA1); | 627 | dev_priv->saveVGA1 = I915_READ(VGA1); |
620 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); | 628 | dev_priv->saveVGA_PD = I915_READ(VGA_PD); |
621 | if (IS_IRONLAKE(dev)) | 629 | if (HAS_PCH_SPLIT(dev)) |
622 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); | 630 | dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); |
623 | else | 631 | else |
624 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); | 632 | dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); |
@@ -660,24 +668,24 @@ void i915_restore_display(struct drm_device *dev) | |||
660 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | 668 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); |
661 | 669 | ||
662 | /* CRT state */ | 670 | /* CRT state */ |
663 | if (IS_IRONLAKE(dev)) | 671 | if (HAS_PCH_SPLIT(dev)) |
664 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | 672 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); |
665 | else | 673 | else |
666 | I915_WRITE(ADPA, dev_priv->saveADPA); | 674 | I915_WRITE(ADPA, dev_priv->saveADPA); |
667 | 675 | ||
668 | /* LVDS state */ | 676 | /* LVDS state */ |
669 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) | 677 | if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) |
670 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); | 678 | I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); |
671 | 679 | ||
672 | if (IS_IRONLAKE(dev)) { | 680 | if (HAS_PCH_SPLIT(dev)) { |
673 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); | 681 | I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); |
674 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) | 682 | } else if (IS_MOBILE(dev) && !IS_I830(dev)) |
675 | I915_WRITE(LVDS, dev_priv->saveLVDS); | 683 | I915_WRITE(LVDS, dev_priv->saveLVDS); |
676 | 684 | ||
677 | if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) | 685 | if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) |
678 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); | 686 | I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); |
679 | 687 | ||
680 | if (IS_IRONLAKE(dev)) { | 688 | if (HAS_PCH_SPLIT(dev)) { |
681 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); | 689 | I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); |
682 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); | 690 | I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); |
683 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); | 691 | I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); |
@@ -708,7 +716,7 @@ void i915_restore_display(struct drm_device *dev) | |||
708 | 716 | ||
709 | /* only restore FBC info on the platform that supports FBC*/ | 717 | /* only restore FBC info on the platform that supports FBC*/ |
710 | if (I915_HAS_FBC(dev)) { | 718 | if (I915_HAS_FBC(dev)) { |
711 | if (IS_IRONLAKE_M(dev)) { | 719 | if (HAS_PCH_SPLIT(dev)) { |
712 | ironlake_disable_fbc(dev); | 720 | ironlake_disable_fbc(dev); |
713 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); | 721 | I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); |
714 | } else if (IS_GM45(dev)) { | 722 | } else if (IS_GM45(dev)) { |
@@ -723,14 +731,15 @@ void i915_restore_display(struct drm_device *dev) | |||
723 | } | 731 | } |
724 | } | 732 | } |
725 | /* VGA state */ | 733 | /* VGA state */ |
726 | if (IS_IRONLAKE(dev)) | 734 | if (HAS_PCH_SPLIT(dev)) |
727 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); | 735 | I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); |
728 | else | 736 | else |
729 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); | 737 | I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); |
730 | I915_WRITE(VGA0, dev_priv->saveVGA0); | 738 | I915_WRITE(VGA0, dev_priv->saveVGA0); |
731 | I915_WRITE(VGA1, dev_priv->saveVGA1); | 739 | I915_WRITE(VGA1, dev_priv->saveVGA1); |
732 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); | 740 | I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); |
733 | DRM_UDELAY(150); | 741 | POSTING_READ(VGA_PD); |
742 | udelay(150); | ||
734 | 743 | ||
735 | i915_restore_vga(dev); | 744 | i915_restore_vga(dev); |
736 | } | 745 | } |
@@ -748,7 +757,7 @@ int i915_save_state(struct drm_device *dev) | |||
748 | i915_save_display(dev); | 757 | i915_save_display(dev); |
749 | 758 | ||
750 | /* Interrupt state */ | 759 | /* Interrupt state */ |
751 | if (IS_IRONLAKE(dev)) { | 760 | if (HAS_PCH_SPLIT(dev)) { |
752 | dev_priv->saveDEIER = I915_READ(DEIER); | 761 | dev_priv->saveDEIER = I915_READ(DEIER); |
753 | dev_priv->saveDEIMR = I915_READ(DEIMR); | 762 | dev_priv->saveDEIMR = I915_READ(DEIMR); |
754 | dev_priv->saveGTIER = I915_READ(GTIER); | 763 | dev_priv->saveGTIER = I915_READ(GTIER); |
@@ -762,7 +771,7 @@ int i915_save_state(struct drm_device *dev) | |||
762 | dev_priv->saveIMR = I915_READ(IMR); | 771 | dev_priv->saveIMR = I915_READ(IMR); |
763 | } | 772 | } |
764 | 773 | ||
765 | if (IS_IRONLAKE_M(dev)) | 774 | if (HAS_PCH_SPLIT(dev)) |
766 | ironlake_disable_drps(dev); | 775 | ironlake_disable_drps(dev); |
767 | 776 | ||
768 | /* Cache mode state */ | 777 | /* Cache mode state */ |
@@ -820,7 +829,7 @@ int i915_restore_state(struct drm_device *dev) | |||
820 | i915_restore_display(dev); | 829 | i915_restore_display(dev); |
821 | 830 | ||
822 | /* Interrupt state */ | 831 | /* Interrupt state */ |
823 | if (IS_IRONLAKE(dev)) { | 832 | if (HAS_PCH_SPLIT(dev)) { |
824 | I915_WRITE(DEIER, dev_priv->saveDEIER); | 833 | I915_WRITE(DEIER, dev_priv->saveDEIER); |
825 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); | 834 | I915_WRITE(DEIMR, dev_priv->saveDEIMR); |
826 | I915_WRITE(GTIER, dev_priv->saveGTIER); | 835 | I915_WRITE(GTIER, dev_priv->saveGTIER); |
@@ -835,7 +844,7 @@ int i915_restore_state(struct drm_device *dev) | |||
835 | /* Clock gating state */ | 844 | /* Clock gating state */ |
836 | intel_init_clock_gating(dev); | 845 | intel_init_clock_gating(dev); |
837 | 846 | ||
838 | if (IS_IRONLAKE_M(dev)) | 847 | if (HAS_PCH_SPLIT(dev)) |
839 | ironlake_enable_drps(dev); | 848 | ironlake_enable_drps(dev); |
840 | 849 | ||
841 | /* Cache mode state */ | 850 | /* Cache mode state */ |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ee0732b222a1..4b7735196cd5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
160 | struct drm_i915_private *dev_priv = dev->dev_private; | 160 | struct drm_i915_private *dev_priv = dev->dev_private; |
161 | u32 adpa, temp; | 161 | u32 adpa, temp; |
162 | bool ret; | 162 | bool ret; |
163 | bool turn_off_dac = false; | ||
163 | 164 | ||
164 | temp = adpa = I915_READ(PCH_ADPA); | 165 | temp = adpa = I915_READ(PCH_ADPA); |
165 | 166 | ||
166 | if (HAS_PCH_CPT(dev)) { | 167 | if (HAS_PCH_SPLIT(dev)) |
167 | /* Disable DAC before force detect */ | 168 | turn_off_dac = true; |
168 | I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); | 169 | |
169 | (void)I915_READ(PCH_ADPA); | 170 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
170 | } else { | 171 | if (turn_off_dac) |
171 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 172 | adpa &= ~ADPA_DAC_ENABLE; |
172 | /* disable HPD first */ | 173 | |
173 | I915_WRITE(PCH_ADPA, adpa); | 174 | /* disable HPD first */ |
174 | (void)I915_READ(PCH_ADPA); | 175 | I915_WRITE(PCH_ADPA, adpa); |
175 | } | 176 | (void)I915_READ(PCH_ADPA); |
176 | 177 | ||
177 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 178 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
178 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 179 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
@@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
185 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); | 186 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); |
186 | I915_WRITE(PCH_ADPA, adpa); | 187 | I915_WRITE(PCH_ADPA, adpa); |
187 | 188 | ||
188 | while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) | 189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
189 | ; | 190 | 1000, 1)) |
191 | DRM_ERROR("timed out waiting for FORCE_TRIGGER"); | ||
190 | 192 | ||
191 | if (HAS_PCH_CPT(dev)) { | 193 | if (turn_off_dac) { |
192 | I915_WRITE(PCH_ADPA, temp); | 194 | I915_WRITE(PCH_ADPA, temp); |
193 | (void)I915_READ(PCH_ADPA); | 195 | (void)I915_READ(PCH_ADPA); |
194 | } | 196 | } |
@@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
237 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | 239 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; |
238 | 240 | ||
239 | for (i = 0; i < tries ; i++) { | 241 | for (i = 0; i < tries ; i++) { |
240 | unsigned long timeout; | ||
241 | /* turn on the FORCE_DETECT */ | 242 | /* turn on the FORCE_DETECT */ |
242 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 243 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
243 | timeout = jiffies + msecs_to_jiffies(1000); | ||
244 | /* wait for FORCE_DETECT to go off */ | 244 | /* wait for FORCE_DETECT to go off */ |
245 | do { | 245 | if (wait_for((I915_READ(PORT_HOTPLUG_EN) & |
246 | if (!(I915_READ(PORT_HOTPLUG_EN) & | 246 | CRT_HOTPLUG_FORCE_DETECT) == 0, |
247 | CRT_HOTPLUG_FORCE_DETECT)) | 247 | 1000, 1)) |
248 | break; | 248 | DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); |
249 | msleep(1); | ||
250 | } while (time_after(timeout, jiffies)); | ||
251 | } | 249 | } |
252 | 250 | ||
253 | stat = I915_READ(PORT_HOTPLUG_STAT); | 251 | stat = I915_READ(PORT_HOTPLUG_STAT); |
@@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
331 | I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); | 329 | I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); |
332 | /* Wait for next Vblank to substitue | 330 | /* Wait for next Vblank to substitue |
333 | * border color for Color info */ | 331 | * border color for Color info */ |
334 | intel_wait_for_vblank(dev); | 332 | intel_wait_for_vblank(dev, pipe); |
335 | st00 = I915_READ8(VGA_MSR_WRITE); | 333 | st00 = I915_READ8(VGA_MSR_WRITE); |
336 | status = ((st00 & (1 << 4)) != 0) ? | 334 | status = ((st00 & (1 << 4)) != 0) ? |
337 | connector_status_connected : | 335 | connector_status_connected : |
@@ -508,17 +506,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs | |||
508 | .best_encoder = intel_attached_encoder, | 506 | .best_encoder = intel_attached_encoder, |
509 | }; | 507 | }; |
510 | 508 | ||
511 | static void intel_crt_enc_destroy(struct drm_encoder *encoder) | ||
512 | { | ||
513 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
514 | |||
515 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
516 | drm_encoder_cleanup(encoder); | ||
517 | kfree(intel_encoder); | ||
518 | } | ||
519 | |||
520 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { | 509 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { |
521 | .destroy = intel_crt_enc_destroy, | 510 | .destroy = intel_encoder_destroy, |
522 | }; | 511 | }; |
523 | 512 | ||
524 | void intel_crt_init(struct drm_device *dev) | 513 | void intel_crt_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ec10e02341b..23157e1de3be 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/vgaarb.h> | ||
32 | #include "drmP.h" | 33 | #include "drmP.h" |
33 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 35 | #include "i915_drm.h" |
@@ -976,14 +977,54 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
976 | return true; | 977 | return true; |
977 | } | 978 | } |
978 | 979 | ||
979 | void | 980 | /** |
980 | intel_wait_for_vblank(struct drm_device *dev) | 981 | * intel_wait_for_vblank - wait for vblank on a given pipe |
982 | * @dev: drm device | ||
983 | * @pipe: pipe to wait for | ||
984 | * | ||
985 | * Wait for vblank to occur on a given pipe. Needed for various bits of | ||
986 | * mode setting code. | ||
987 | */ | ||
988 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | ||
981 | { | 989 | { |
982 | /* Wait for 20ms, i.e. one cycle at 50hz. */ | 990 | struct drm_i915_private *dev_priv = dev->dev_private; |
983 | if (in_dbg_master()) | 991 | int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); |
984 | mdelay(20); /* The kernel debugger cannot call msleep() */ | 992 | |
985 | else | 993 | /* Wait for vblank interrupt bit to set */ |
986 | msleep(20); | 994 | if (wait_for((I915_READ(pipestat_reg) & |
995 | PIPE_VBLANK_INTERRUPT_STATUS) == 0, | ||
996 | 50, 0)) | ||
997 | DRM_DEBUG_KMS("vblank wait timed out\n"); | ||
998 | } | ||
999 | |||
1000 | /** | ||
1001 | * intel_wait_for_vblank_off - wait for vblank after disabling a pipe | ||
1002 | * @dev: drm device | ||
1003 | * @pipe: pipe to wait for | ||
1004 | * | ||
1005 | * After disabling a pipe, we can't wait for vblank in the usual way, | ||
1006 | * spinning on the vblank interrupt status bit, since we won't actually | ||
1007 | * see an interrupt when the pipe is disabled. | ||
1008 | * | ||
1009 | * So this function waits for the display line value to settle (it | ||
1010 | * usually ends up stopping at the start of the next frame). | ||
1011 | */ | ||
1012 | void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) | ||
1013 | { | ||
1014 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1015 | int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); | ||
1016 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | ||
1017 | u32 last_line; | ||
1018 | |||
1019 | /* Wait for the display line to settle */ | ||
1020 | do { | ||
1021 | last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; | ||
1022 | mdelay(5); | ||
1023 | } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && | ||
1024 | time_after(timeout, jiffies)); | ||
1025 | |||
1026 | if (time_after(jiffies, timeout)) | ||
1027 | DRM_DEBUG_KMS("vblank wait timed out\n"); | ||
987 | } | 1028 | } |
988 | 1029 | ||
989 | /* Parameters have changed, update FBC info */ | 1030 | /* Parameters have changed, update FBC info */ |
@@ -1037,7 +1078,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
1037 | void i8xx_disable_fbc(struct drm_device *dev) | 1078 | void i8xx_disable_fbc(struct drm_device *dev) |
1038 | { | 1079 | { |
1039 | struct drm_i915_private *dev_priv = dev->dev_private; | 1080 | struct drm_i915_private *dev_priv = dev->dev_private; |
1040 | unsigned long timeout = jiffies + msecs_to_jiffies(1); | ||
1041 | u32 fbc_ctl; | 1081 | u32 fbc_ctl; |
1042 | 1082 | ||
1043 | if (!I915_HAS_FBC(dev)) | 1083 | if (!I915_HAS_FBC(dev)) |
@@ -1052,16 +1092,11 @@ void i8xx_disable_fbc(struct drm_device *dev) | |||
1052 | I915_WRITE(FBC_CONTROL, fbc_ctl); | 1092 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1053 | 1093 | ||
1054 | /* Wait for compressing bit to clear */ | 1094 | /* Wait for compressing bit to clear */ |
1055 | while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { | 1095 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { |
1056 | if (time_after(jiffies, timeout)) { | 1096 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
1057 | DRM_DEBUG_DRIVER("FBC idle timed out\n"); | 1097 | return; |
1058 | break; | ||
1059 | } | ||
1060 | ; /* do nothing */ | ||
1061 | } | 1098 | } |
1062 | 1099 | ||
1063 | intel_wait_for_vblank(dev); | ||
1064 | |||
1065 | DRM_DEBUG_KMS("disabled FBC\n"); | 1100 | DRM_DEBUG_KMS("disabled FBC\n"); |
1066 | } | 1101 | } |
1067 | 1102 | ||
@@ -1118,7 +1153,6 @@ void g4x_disable_fbc(struct drm_device *dev) | |||
1118 | dpfc_ctl = I915_READ(DPFC_CONTROL); | 1153 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1119 | dpfc_ctl &= ~DPFC_CTL_EN; | 1154 | dpfc_ctl &= ~DPFC_CTL_EN; |
1120 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | 1155 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); |
1121 | intel_wait_for_vblank(dev); | ||
1122 | 1156 | ||
1123 | DRM_DEBUG_KMS("disabled FBC\n"); | 1157 | DRM_DEBUG_KMS("disabled FBC\n"); |
1124 | } | 1158 | } |
@@ -1179,7 +1213,6 @@ void ironlake_disable_fbc(struct drm_device *dev) | |||
1179 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | 1213 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1180 | dpfc_ctl &= ~DPFC_CTL_EN; | 1214 | dpfc_ctl &= ~DPFC_CTL_EN; |
1181 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | 1215 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); |
1182 | intel_wait_for_vblank(dev); | ||
1183 | 1216 | ||
1184 | DRM_DEBUG_KMS("disabled FBC\n"); | 1217 | DRM_DEBUG_KMS("disabled FBC\n"); |
1185 | } | 1218 | } |
@@ -1478,7 +1511,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
1478 | if ((IS_I965G(dev) || plane == 0)) | 1511 | if ((IS_I965G(dev) || plane == 0)) |
1479 | intel_update_fbc(crtc, &crtc->mode); | 1512 | intel_update_fbc(crtc, &crtc->mode); |
1480 | 1513 | ||
1481 | intel_wait_for_vblank(dev); | 1514 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1482 | intel_increase_pllclock(crtc, true); | 1515 | intel_increase_pllclock(crtc, true); |
1483 | 1516 | ||
1484 | return 0; | 1517 | return 0; |
@@ -1585,20 +1618,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1585 | Start, Offset, x, y, crtc->fb->pitch); | 1618 | Start, Offset, x, y, crtc->fb->pitch); |
1586 | I915_WRITE(dspstride, crtc->fb->pitch); | 1619 | I915_WRITE(dspstride, crtc->fb->pitch); |
1587 | if (IS_I965G(dev)) { | 1620 | if (IS_I965G(dev)) { |
1588 | I915_WRITE(dspbase, Offset); | ||
1589 | I915_READ(dspbase); | ||
1590 | I915_WRITE(dspsurf, Start); | 1621 | I915_WRITE(dspsurf, Start); |
1591 | I915_READ(dspsurf); | ||
1592 | I915_WRITE(dsptileoff, (y << 16) | x); | 1622 | I915_WRITE(dsptileoff, (y << 16) | x); |
1623 | I915_WRITE(dspbase, Offset); | ||
1593 | } else { | 1624 | } else { |
1594 | I915_WRITE(dspbase, Start + Offset); | 1625 | I915_WRITE(dspbase, Start + Offset); |
1595 | I915_READ(dspbase); | ||
1596 | } | 1626 | } |
1627 | POSTING_READ(dspbase); | ||
1597 | 1628 | ||
1598 | if ((IS_I965G(dev) || plane == 0)) | 1629 | if ((IS_I965G(dev) || plane == 0)) |
1599 | intel_update_fbc(crtc, &crtc->mode); | 1630 | intel_update_fbc(crtc, &crtc->mode); |
1600 | 1631 | ||
1601 | intel_wait_for_vblank(dev); | 1632 | intel_wait_for_vblank(dev, pipe); |
1602 | 1633 | ||
1603 | if (old_fb) { | 1634 | if (old_fb) { |
1604 | intel_fb = to_intel_framebuffer(old_fb); | 1635 | intel_fb = to_intel_framebuffer(old_fb); |
@@ -1627,54 +1658,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1627 | return 0; | 1658 | return 0; |
1628 | } | 1659 | } |
1629 | 1660 | ||
1630 | /* Disable the VGA plane that we never use */ | ||
1631 | static void i915_disable_vga (struct drm_device *dev) | ||
1632 | { | ||
1633 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1634 | u8 sr1; | ||
1635 | u32 vga_reg; | ||
1636 | |||
1637 | if (HAS_PCH_SPLIT(dev)) | ||
1638 | vga_reg = CPU_VGACNTRL; | ||
1639 | else | ||
1640 | vga_reg = VGACNTRL; | ||
1641 | |||
1642 | if (I915_READ(vga_reg) & VGA_DISP_DISABLE) | ||
1643 | return; | ||
1644 | |||
1645 | I915_WRITE8(VGA_SR_INDEX, 1); | ||
1646 | sr1 = I915_READ8(VGA_SR_DATA); | ||
1647 | I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); | ||
1648 | udelay(100); | ||
1649 | |||
1650 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | ||
1651 | } | ||
1652 | |||
1653 | static void ironlake_disable_pll_edp (struct drm_crtc *crtc) | ||
1654 | { | ||
1655 | struct drm_device *dev = crtc->dev; | ||
1656 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1657 | u32 dpa_ctl; | ||
1658 | |||
1659 | DRM_DEBUG_KMS("\n"); | ||
1660 | dpa_ctl = I915_READ(DP_A); | ||
1661 | dpa_ctl &= ~DP_PLL_ENABLE; | ||
1662 | I915_WRITE(DP_A, dpa_ctl); | ||
1663 | } | ||
1664 | |||
1665 | static void ironlake_enable_pll_edp (struct drm_crtc *crtc) | ||
1666 | { | ||
1667 | struct drm_device *dev = crtc->dev; | ||
1668 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1669 | u32 dpa_ctl; | ||
1670 | |||
1671 | dpa_ctl = I915_READ(DP_A); | ||
1672 | dpa_ctl |= DP_PLL_ENABLE; | ||
1673 | I915_WRITE(DP_A, dpa_ctl); | ||
1674 | udelay(200); | ||
1675 | } | ||
1676 | |||
1677 | |||
1678 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) | 1661 | static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) |
1679 | { | 1662 | { |
1680 | struct drm_device *dev = crtc->dev; | 1663 | struct drm_device *dev = crtc->dev; |
@@ -1945,7 +1928,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1945 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1928 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1946 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; | 1929 | int trans_dpll_sel = (pipe == 0) ? 0 : 1; |
1947 | u32 temp; | 1930 | u32 temp; |
1948 | int n; | ||
1949 | u32 pipe_bpc; | 1931 | u32 pipe_bpc; |
1950 | 1932 | ||
1951 | temp = I915_READ(pipeconf_reg); | 1933 | temp = I915_READ(pipeconf_reg); |
@@ -1958,7 +1940,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1958 | case DRM_MODE_DPMS_ON: | 1940 | case DRM_MODE_DPMS_ON: |
1959 | case DRM_MODE_DPMS_STANDBY: | 1941 | case DRM_MODE_DPMS_STANDBY: |
1960 | case DRM_MODE_DPMS_SUSPEND: | 1942 | case DRM_MODE_DPMS_SUSPEND: |
1961 | DRM_DEBUG_KMS("crtc %d dpms on\n", pipe); | 1943 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); |
1962 | 1944 | ||
1963 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 1945 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
1964 | temp = I915_READ(PCH_LVDS); | 1946 | temp = I915_READ(PCH_LVDS); |
@@ -1968,10 +1950,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1968 | } | 1950 | } |
1969 | } | 1951 | } |
1970 | 1952 | ||
1971 | if (HAS_eDP) { | 1953 | if (!HAS_eDP) { |
1972 | /* enable eDP PLL */ | ||
1973 | ironlake_enable_pll_edp(crtc); | ||
1974 | } else { | ||
1975 | 1954 | ||
1976 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1955 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1977 | temp = I915_READ(fdi_rx_reg); | 1956 | temp = I915_READ(fdi_rx_reg); |
@@ -2005,15 +1984,13 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2005 | /* Enable panel fitting for LVDS */ | 1984 | /* Enable panel fitting for LVDS */ |
2006 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 1985 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) |
2007 | || HAS_eDP || intel_pch_has_edp(crtc)) { | 1986 | || HAS_eDP || intel_pch_has_edp(crtc)) { |
2008 | temp = I915_READ(pf_ctl_reg); | 1987 | if (dev_priv->pch_pf_size) { |
2009 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | 1988 | temp = I915_READ(pf_ctl_reg); |
2010 | 1989 | I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); | |
2011 | /* currently full aspect */ | 1990 | I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); |
2012 | I915_WRITE(pf_win_pos, 0); | 1991 | I915_WRITE(pf_win_size, dev_priv->pch_pf_size); |
2013 | 1992 | } else | |
2014 | I915_WRITE(pf_win_size, | 1993 | I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); |
2015 | (dev_priv->panel_fixed_mode->hdisplay << 16) | | ||
2016 | (dev_priv->panel_fixed_mode->vdisplay)); | ||
2017 | } | 1994 | } |
2018 | 1995 | ||
2019 | /* Enable CPU pipe */ | 1996 | /* Enable CPU pipe */ |
@@ -2097,9 +2074,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2097 | int reg; | 2074 | int reg; |
2098 | 2075 | ||
2099 | reg = I915_READ(trans_dp_ctl); | 2076 | reg = I915_READ(trans_dp_ctl); |
2100 | reg &= ~TRANS_DP_PORT_SEL_MASK; | 2077 | reg &= ~(TRANS_DP_PORT_SEL_MASK | |
2101 | reg = TRANS_DP_OUTPUT_ENABLE | | 2078 | TRANS_DP_SYNC_MASK); |
2102 | TRANS_DP_ENH_FRAMING; | 2079 | reg |= (TRANS_DP_OUTPUT_ENABLE | |
2080 | TRANS_DP_ENH_FRAMING); | ||
2103 | 2081 | ||
2104 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 2082 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2105 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 2083 | reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
@@ -2137,18 +2115,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2137 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 2115 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
2138 | I915_READ(transconf_reg); | 2116 | I915_READ(transconf_reg); |
2139 | 2117 | ||
2140 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) | 2118 | if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) |
2141 | ; | 2119 | DRM_ERROR("failed to enable transcoder\n"); |
2142 | |||
2143 | } | 2120 | } |
2144 | 2121 | ||
2145 | intel_crtc_load_lut(crtc); | 2122 | intel_crtc_load_lut(crtc); |
2146 | 2123 | ||
2147 | intel_update_fbc(crtc, &crtc->mode); | 2124 | intel_update_fbc(crtc, &crtc->mode); |
2125 | break; | ||
2148 | 2126 | ||
2149 | break; | ||
2150 | case DRM_MODE_DPMS_OFF: | 2127 | case DRM_MODE_DPMS_OFF: |
2151 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 2128 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); |
2152 | 2129 | ||
2153 | drm_vblank_off(dev, pipe); | 2130 | drm_vblank_off(dev, pipe); |
2154 | /* Disable display plane */ | 2131 | /* Disable display plane */ |
@@ -2164,26 +2141,14 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2164 | dev_priv->display.disable_fbc) | 2141 | dev_priv->display.disable_fbc) |
2165 | dev_priv->display.disable_fbc(dev); | 2142 | dev_priv->display.disable_fbc(dev); |
2166 | 2143 | ||
2167 | i915_disable_vga(dev); | ||
2168 | |||
2169 | /* disable cpu pipe, disable after all planes disabled */ | 2144 | /* disable cpu pipe, disable after all planes disabled */ |
2170 | temp = I915_READ(pipeconf_reg); | 2145 | temp = I915_READ(pipeconf_reg); |
2171 | if ((temp & PIPEACONF_ENABLE) != 0) { | 2146 | if ((temp & PIPEACONF_ENABLE) != 0) { |
2172 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); | 2147 | I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); |
2173 | I915_READ(pipeconf_reg); | 2148 | |
2174 | n = 0; | ||
2175 | /* wait for cpu pipe off, pipe state */ | 2149 | /* wait for cpu pipe off, pipe state */ |
2176 | while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { | 2150 | if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) |
2177 | n++; | 2151 | DRM_ERROR("failed to turn off cpu pipe\n"); |
2178 | if (n < 60) { | ||
2179 | udelay(500); | ||
2180 | continue; | ||
2181 | } else { | ||
2182 | DRM_DEBUG_KMS("pipe %d off delay\n", | ||
2183 | pipe); | ||
2184 | break; | ||
2185 | } | ||
2186 | } | ||
2187 | } else | 2152 | } else |
2188 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | 2153 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
2189 | 2154 | ||
@@ -2244,20 +2209,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2244 | temp = I915_READ(transconf_reg); | 2209 | temp = I915_READ(transconf_reg); |
2245 | if ((temp & TRANS_ENABLE) != 0) { | 2210 | if ((temp & TRANS_ENABLE) != 0) { |
2246 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); | 2211 | I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); |
2247 | I915_READ(transconf_reg); | 2212 | |
2248 | n = 0; | ||
2249 | /* wait for PCH transcoder off, transcoder state */ | 2213 | /* wait for PCH transcoder off, transcoder state */ |
2250 | while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { | 2214 | if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) |
2251 | n++; | 2215 | DRM_ERROR("failed to disable transcoder\n"); |
2252 | if (n < 60) { | ||
2253 | udelay(500); | ||
2254 | continue; | ||
2255 | } else { | ||
2256 | DRM_DEBUG_KMS("transcoder %d off " | ||
2257 | "delay\n", pipe); | ||
2258 | break; | ||
2259 | } | ||
2260 | } | ||
2261 | } | 2216 | } |
2262 | 2217 | ||
2263 | temp = I915_READ(transconf_reg); | 2218 | temp = I915_READ(transconf_reg); |
@@ -2294,10 +2249,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2294 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); | 2249 | I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); |
2295 | I915_READ(pch_dpll_reg); | 2250 | I915_READ(pch_dpll_reg); |
2296 | 2251 | ||
2297 | if (HAS_eDP) { | ||
2298 | ironlake_disable_pll_edp(crtc); | ||
2299 | } | ||
2300 | |||
2301 | /* Switch from PCDclk to Rawclk */ | 2252 | /* Switch from PCDclk to Rawclk */ |
2302 | temp = I915_READ(fdi_rx_reg); | 2253 | temp = I915_READ(fdi_rx_reg); |
2303 | temp &= ~FDI_SEL_PCDCLK; | 2254 | temp &= ~FDI_SEL_PCDCLK; |
@@ -2372,8 +2323,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2372 | case DRM_MODE_DPMS_ON: | 2323 | case DRM_MODE_DPMS_ON: |
2373 | case DRM_MODE_DPMS_STANDBY: | 2324 | case DRM_MODE_DPMS_STANDBY: |
2374 | case DRM_MODE_DPMS_SUSPEND: | 2325 | case DRM_MODE_DPMS_SUSPEND: |
2375 | intel_update_watermarks(dev); | ||
2376 | |||
2377 | /* Enable the DPLL */ | 2326 | /* Enable the DPLL */ |
2378 | temp = I915_READ(dpll_reg); | 2327 | temp = I915_READ(dpll_reg); |
2379 | if ((temp & DPLL_VCO_ENABLE) == 0) { | 2328 | if ((temp & DPLL_VCO_ENABLE) == 0) { |
@@ -2413,8 +2362,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2413 | intel_crtc_dpms_overlay(intel_crtc, true); | 2362 | intel_crtc_dpms_overlay(intel_crtc, true); |
2414 | break; | 2363 | break; |
2415 | case DRM_MODE_DPMS_OFF: | 2364 | case DRM_MODE_DPMS_OFF: |
2416 | intel_update_watermarks(dev); | ||
2417 | |||
2418 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | 2365 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
2419 | intel_crtc_dpms_overlay(intel_crtc, false); | 2366 | intel_crtc_dpms_overlay(intel_crtc, false); |
2420 | drm_vblank_off(dev, pipe); | 2367 | drm_vblank_off(dev, pipe); |
@@ -2423,9 +2370,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2423 | dev_priv->display.disable_fbc) | 2370 | dev_priv->display.disable_fbc) |
2424 | dev_priv->display.disable_fbc(dev); | 2371 | dev_priv->display.disable_fbc(dev); |
2425 | 2372 | ||
2426 | /* Disable the VGA plane that we never use */ | ||
2427 | i915_disable_vga(dev); | ||
2428 | |||
2429 | /* Disable display plane */ | 2373 | /* Disable display plane */ |
2430 | temp = I915_READ(dspcntr_reg); | 2374 | temp = I915_READ(dspcntr_reg); |
2431 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 2375 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
@@ -2435,10 +2379,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2435 | I915_READ(dspbase_reg); | 2379 | I915_READ(dspbase_reg); |
2436 | } | 2380 | } |
2437 | 2381 | ||
2438 | if (!IS_I9XX(dev)) { | 2382 | /* Wait for vblank for the disable to take effect */ |
2439 | /* Wait for vblank for the disable to take effect */ | 2383 | intel_wait_for_vblank_off(dev, pipe); |
2440 | intel_wait_for_vblank(dev); | ||
2441 | } | ||
2442 | 2384 | ||
2443 | /* Don't disable pipe A or pipe A PLLs if needed */ | 2385 | /* Don't disable pipe A or pipe A PLLs if needed */ |
2444 | if (pipeconf_reg == PIPEACONF && | 2386 | if (pipeconf_reg == PIPEACONF && |
@@ -2453,7 +2395,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2453 | } | 2395 | } |
2454 | 2396 | ||
2455 | /* Wait for vblank for the disable to take effect. */ | 2397 | /* Wait for vblank for the disable to take effect. */ |
2456 | intel_wait_for_vblank(dev); | 2398 | intel_wait_for_vblank_off(dev, pipe); |
2457 | 2399 | ||
2458 | temp = I915_READ(dpll_reg); | 2400 | temp = I915_READ(dpll_reg); |
2459 | if ((temp & DPLL_VCO_ENABLE) != 0) { | 2401 | if ((temp & DPLL_VCO_ENABLE) != 0) { |
@@ -2469,9 +2411,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2469 | 2411 | ||
2470 | /** | 2412 | /** |
2471 | * Sets the power management mode of the pipe and plane. | 2413 | * Sets the power management mode of the pipe and plane. |
2472 | * | ||
2473 | * This code should probably grow support for turning the cursor off and back | ||
2474 | * on appropriately at the same time as we're turning the pipe off/on. | ||
2475 | */ | 2414 | */ |
2476 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | 2415 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) |
2477 | { | 2416 | { |
@@ -2482,9 +2421,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2482 | int pipe = intel_crtc->pipe; | 2421 | int pipe = intel_crtc->pipe; |
2483 | bool enabled; | 2422 | bool enabled; |
2484 | 2423 | ||
2424 | intel_crtc->dpms_mode = mode; | ||
2425 | intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; | ||
2426 | |||
2427 | /* When switching on the display, ensure that SR is disabled | ||
2428 | * with multiple pipes prior to enabling to new pipe. | ||
2429 | * | ||
2430 | * When switching off the display, make sure the cursor is | ||
2431 | * properly hidden prior to disabling the pipe. | ||
2432 | */ | ||
2433 | if (mode == DRM_MODE_DPMS_ON) | ||
2434 | intel_update_watermarks(dev); | ||
2435 | else | ||
2436 | intel_crtc_update_cursor(crtc); | ||
2437 | |||
2485 | dev_priv->display.dpms(crtc, mode); | 2438 | dev_priv->display.dpms(crtc, mode); |
2486 | 2439 | ||
2487 | intel_crtc->dpms_mode = mode; | 2440 | if (mode == DRM_MODE_DPMS_ON) |
2441 | intel_crtc_update_cursor(crtc); | ||
2442 | else | ||
2443 | intel_update_watermarks(dev); | ||
2488 | 2444 | ||
2489 | if (!dev->primary->master) | 2445 | if (!dev->primary->master) |
2490 | return; | 2446 | return; |
@@ -2536,6 +2492,20 @@ void intel_encoder_commit (struct drm_encoder *encoder) | |||
2536 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | 2492 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
2537 | } | 2493 | } |
2538 | 2494 | ||
2495 | void intel_encoder_destroy(struct drm_encoder *encoder) | ||
2496 | { | ||
2497 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
2498 | |||
2499 | if (intel_encoder->ddc_bus) | ||
2500 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2501 | |||
2502 | if (intel_encoder->i2c_bus) | ||
2503 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
2504 | |||
2505 | drm_encoder_cleanup(encoder); | ||
2506 | kfree(intel_encoder); | ||
2507 | } | ||
2508 | |||
2539 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, | 2509 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
2540 | struct drm_display_mode *mode, | 2510 | struct drm_display_mode *mode, |
2541 | struct drm_display_mode *adjusted_mode) | 2511 | struct drm_display_mode *adjusted_mode) |
@@ -2867,7 +2837,7 @@ struct cxsr_latency { | |||
2867 | unsigned long cursor_hpll_disable; | 2837 | unsigned long cursor_hpll_disable; |
2868 | }; | 2838 | }; |
2869 | 2839 | ||
2870 | static struct cxsr_latency cxsr_latency_table[] = { | 2840 | static const struct cxsr_latency cxsr_latency_table[] = { |
2871 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ | 2841 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ |
2872 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | 2842 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ |
2873 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | 2843 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ |
@@ -2905,11 +2875,13 @@ static struct cxsr_latency cxsr_latency_table[] = { | |||
2905 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | 2875 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ |
2906 | }; | 2876 | }; |
2907 | 2877 | ||
2908 | static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, | 2878 | static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, |
2909 | int fsb, int mem) | 2879 | int is_ddr3, |
2880 | int fsb, | ||
2881 | int mem) | ||
2910 | { | 2882 | { |
2883 | const struct cxsr_latency *latency; | ||
2911 | int i; | 2884 | int i; |
2912 | struct cxsr_latency *latency; | ||
2913 | 2885 | ||
2914 | if (fsb == 0 || mem == 0) | 2886 | if (fsb == 0 || mem == 0) |
2915 | return NULL; | 2887 | return NULL; |
@@ -2930,13 +2902,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, | |||
2930 | static void pineview_disable_cxsr(struct drm_device *dev) | 2902 | static void pineview_disable_cxsr(struct drm_device *dev) |
2931 | { | 2903 | { |
2932 | struct drm_i915_private *dev_priv = dev->dev_private; | 2904 | struct drm_i915_private *dev_priv = dev->dev_private; |
2933 | u32 reg; | ||
2934 | 2905 | ||
2935 | /* deactivate cxsr */ | 2906 | /* deactivate cxsr */ |
2936 | reg = I915_READ(DSPFW3); | 2907 | I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); |
2937 | reg &= ~(PINEVIEW_SELF_REFRESH_EN); | ||
2938 | I915_WRITE(DSPFW3, reg); | ||
2939 | DRM_INFO("Big FIFO is disabled\n"); | ||
2940 | } | 2908 | } |
2941 | 2909 | ||
2942 | /* | 2910 | /* |
@@ -3024,12 +2992,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3024 | int pixel_size) | 2992 | int pixel_size) |
3025 | { | 2993 | { |
3026 | struct drm_i915_private *dev_priv = dev->dev_private; | 2994 | struct drm_i915_private *dev_priv = dev->dev_private; |
2995 | const struct cxsr_latency *latency; | ||
3027 | u32 reg; | 2996 | u32 reg; |
3028 | unsigned long wm; | 2997 | unsigned long wm; |
3029 | struct cxsr_latency *latency; | ||
3030 | int sr_clock; | 2998 | int sr_clock; |
3031 | 2999 | ||
3032 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, | 3000 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
3033 | dev_priv->fsb_freq, dev_priv->mem_freq); | 3001 | dev_priv->fsb_freq, dev_priv->mem_freq); |
3034 | if (!latency) { | 3002 | if (!latency) { |
3035 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | 3003 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
@@ -3075,9 +3043,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, | |||
3075 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | 3043 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
3076 | 3044 | ||
3077 | /* activate cxsr */ | 3045 | /* activate cxsr */ |
3078 | reg = I915_READ(DSPFW3); | 3046 | I915_WRITE(DSPFW3, |
3079 | reg |= PINEVIEW_SELF_REFRESH_EN; | 3047 | I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); |
3080 | I915_WRITE(DSPFW3, reg); | ||
3081 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); | 3048 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); |
3082 | } else { | 3049 | } else { |
3083 | pineview_disable_cxsr(dev); | 3050 | pineview_disable_cxsr(dev); |
@@ -3354,12 +3321,11 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, | |||
3354 | int line_count; | 3321 | int line_count; |
3355 | int planea_htotal = 0, planeb_htotal = 0; | 3322 | int planea_htotal = 0, planeb_htotal = 0; |
3356 | struct drm_crtc *crtc; | 3323 | struct drm_crtc *crtc; |
3357 | struct intel_crtc *intel_crtc; | ||
3358 | 3324 | ||
3359 | /* Need htotal for all active display plane */ | 3325 | /* Need htotal for all active display plane */ |
3360 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3326 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3361 | intel_crtc = to_intel_crtc(crtc); | 3327 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3362 | if (crtc->enabled) { | 3328 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { |
3363 | if (intel_crtc->plane == 0) | 3329 | if (intel_crtc->plane == 0) |
3364 | planea_htotal = crtc->mode.htotal; | 3330 | planea_htotal = crtc->mode.htotal; |
3365 | else | 3331 | else |
@@ -3519,7 +3485,6 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3519 | { | 3485 | { |
3520 | struct drm_i915_private *dev_priv = dev->dev_private; | 3486 | struct drm_i915_private *dev_priv = dev->dev_private; |
3521 | struct drm_crtc *crtc; | 3487 | struct drm_crtc *crtc; |
3522 | struct intel_crtc *intel_crtc; | ||
3523 | int sr_hdisplay = 0; | 3488 | int sr_hdisplay = 0; |
3524 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; | 3489 | unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; |
3525 | int enabled = 0, pixel_size = 0; | 3490 | int enabled = 0, pixel_size = 0; |
@@ -3530,8 +3495,8 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
3530 | 3495 | ||
3531 | /* Get the clock config from both planes */ | 3496 | /* Get the clock config from both planes */ |
3532 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 3497 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3533 | intel_crtc = to_intel_crtc(crtc); | 3498 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3534 | if (crtc->enabled) { | 3499 | if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { |
3535 | enabled++; | 3500 | enabled++; |
3536 | if (intel_crtc->plane == 0) { | 3501 | if (intel_crtc->plane == 0) { |
3537 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", | 3502 | DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", |
@@ -3966,9 +3931,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3966 | dpll_reg = pch_dpll_reg; | 3931 | dpll_reg = pch_dpll_reg; |
3967 | } | 3932 | } |
3968 | 3933 | ||
3969 | if (is_edp) { | 3934 | if (!is_edp) { |
3970 | ironlake_disable_pll_edp(crtc); | ||
3971 | } else if ((dpll & DPLL_VCO_ENABLE)) { | ||
3972 | I915_WRITE(fp_reg, fp); | 3935 | I915_WRITE(fp_reg, fp); |
3973 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3936 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3974 | I915_READ(dpll_reg); | 3937 | I915_READ(dpll_reg); |
@@ -4167,7 +4130,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4167 | I915_WRITE(pipeconf_reg, pipeconf); | 4130 | I915_WRITE(pipeconf_reg, pipeconf); |
4168 | I915_READ(pipeconf_reg); | 4131 | I915_READ(pipeconf_reg); |
4169 | 4132 | ||
4170 | intel_wait_for_vblank(dev); | 4133 | intel_wait_for_vblank(dev, pipe); |
4171 | 4134 | ||
4172 | if (IS_IRONLAKE(dev)) { | 4135 | if (IS_IRONLAKE(dev)) { |
4173 | /* enable address swizzle for tiling buffer */ | 4136 | /* enable address swizzle for tiling buffer */ |
@@ -4180,9 +4143,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4180 | /* Flush the plane changes */ | 4143 | /* Flush the plane changes */ |
4181 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | 4144 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
4182 | 4145 | ||
4183 | if ((IS_I965G(dev) || plane == 0)) | ||
4184 | intel_update_fbc(crtc, &crtc->mode); | ||
4185 | |||
4186 | intel_update_watermarks(dev); | 4146 | intel_update_watermarks(dev); |
4187 | 4147 | ||
4188 | drm_vblank_post_modeset(dev, pipe); | 4148 | drm_vblank_post_modeset(dev, pipe); |
@@ -4216,6 +4176,62 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) | |||
4216 | } | 4176 | } |
4217 | } | 4177 | } |
4218 | 4178 | ||
4179 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | ||
4180 | { | ||
4181 | struct drm_device *dev = crtc->dev; | ||
4182 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4184 | bool visible = base != 0; | ||
4185 | u32 cntl; | ||
4186 | |||
4187 | if (intel_crtc->cursor_visible == visible) | ||
4188 | return; | ||
4189 | |||
4190 | cntl = I915_READ(CURACNTR); | ||
4191 | if (visible) { | ||
4192 | /* On these chipsets we can only modify the base whilst | ||
4193 | * the cursor is disabled. | ||
4194 | */ | ||
4195 | I915_WRITE(CURABASE, base); | ||
4196 | |||
4197 | cntl &= ~(CURSOR_FORMAT_MASK); | ||
4198 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | ||
4199 | cntl |= CURSOR_ENABLE | | ||
4200 | CURSOR_GAMMA_ENABLE | | ||
4201 | CURSOR_FORMAT_ARGB; | ||
4202 | } else | ||
4203 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
4204 | I915_WRITE(CURACNTR, cntl); | ||
4205 | |||
4206 | intel_crtc->cursor_visible = visible; | ||
4207 | } | ||
4208 | |||
4209 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | ||
4210 | { | ||
4211 | struct drm_device *dev = crtc->dev; | ||
4212 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4213 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
4214 | int pipe = intel_crtc->pipe; | ||
4215 | bool visible = base != 0; | ||
4216 | |||
4217 | if (intel_crtc->cursor_visible != visible) { | ||
4218 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | ||
4219 | if (base) { | ||
4220 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4221 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4222 | cntl |= pipe << 28; /* Connect to correct pipe */ | ||
4223 | } else { | ||
4224 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
4225 | cntl |= CURSOR_MODE_DISABLE; | ||
4226 | } | ||
4227 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | ||
4228 | |||
4229 | intel_crtc->cursor_visible = visible; | ||
4230 | } | ||
4231 | /* and commit changes on next vblank */ | ||
4232 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | ||
4233 | } | ||
4234 | |||
4219 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 4235 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
4220 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) | 4236 | static void intel_crtc_update_cursor(struct drm_crtc *crtc) |
4221 | { | 4237 | { |
@@ -4225,12 +4241,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4225 | int pipe = intel_crtc->pipe; | 4241 | int pipe = intel_crtc->pipe; |
4226 | int x = intel_crtc->cursor_x; | 4242 | int x = intel_crtc->cursor_x; |
4227 | int y = intel_crtc->cursor_y; | 4243 | int y = intel_crtc->cursor_y; |
4228 | uint32_t base, pos; | 4244 | u32 base, pos; |
4229 | bool visible; | 4245 | bool visible; |
4230 | 4246 | ||
4231 | pos = 0; | 4247 | pos = 0; |
4232 | 4248 | ||
4233 | if (crtc->fb) { | 4249 | if (intel_crtc->cursor_on && crtc->fb) { |
4234 | base = intel_crtc->cursor_addr; | 4250 | base = intel_crtc->cursor_addr; |
4235 | if (x > (int) crtc->fb->width) | 4251 | if (x > (int) crtc->fb->width) |
4236 | base = 0; | 4252 | base = 0; |
@@ -4259,37 +4275,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) | |||
4259 | pos |= y << CURSOR_Y_SHIFT; | 4275 | pos |= y << CURSOR_Y_SHIFT; |
4260 | 4276 | ||
4261 | visible = base != 0; | 4277 | visible = base != 0; |
4262 | if (!visible && !intel_crtc->cursor_visble) | 4278 | if (!visible && !intel_crtc->cursor_visible) |
4263 | return; | 4279 | return; |
4264 | 4280 | ||
4265 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); | 4281 | I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); |
4266 | if (intel_crtc->cursor_visble != visible) { | 4282 | if (IS_845G(dev) || IS_I865G(dev)) |
4267 | uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); | 4283 | i845_update_cursor(crtc, base); |
4268 | if (base) { | 4284 | else |
4269 | /* Hooray for CUR*CNTR differences */ | 4285 | i9xx_update_cursor(crtc, base); |
4270 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4271 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | ||
4272 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
4273 | cntl |= pipe << 28; /* Connect to correct pipe */ | ||
4274 | } else { | ||
4275 | cntl &= ~(CURSOR_FORMAT_MASK); | ||
4276 | cntl |= CURSOR_ENABLE; | ||
4277 | cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; | ||
4278 | } | ||
4279 | } else { | ||
4280 | if (IS_MOBILE(dev) || IS_I9XX(dev)) { | ||
4281 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
4282 | cntl |= CURSOR_MODE_DISABLE; | ||
4283 | } else { | ||
4284 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | ||
4285 | } | ||
4286 | } | ||
4287 | I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); | ||
4288 | |||
4289 | intel_crtc->cursor_visble = visible; | ||
4290 | } | ||
4291 | /* and commit changes on next vblank */ | ||
4292 | I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); | ||
4293 | 4286 | ||
4294 | if (visible) | 4287 | if (visible) |
4295 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | 4288 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); |
@@ -4354,8 +4347,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
4354 | 4347 | ||
4355 | addr = obj_priv->gtt_offset; | 4348 | addr = obj_priv->gtt_offset; |
4356 | } else { | 4349 | } else { |
4350 | int align = IS_I830(dev) ? 16 * 1024 : 256; | ||
4357 | ret = i915_gem_attach_phys_object(dev, bo, | 4351 | ret = i915_gem_attach_phys_object(dev, bo, |
4358 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); | 4352 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
4353 | align); | ||
4359 | if (ret) { | 4354 | if (ret) { |
4360 | DRM_ERROR("failed to attach phys object\n"); | 4355 | DRM_ERROR("failed to attach phys object\n"); |
4361 | goto fail_locked; | 4356 | goto fail_locked; |
@@ -4544,7 +4539,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
4544 | encoder_funcs->commit(encoder); | 4539 | encoder_funcs->commit(encoder); |
4545 | } | 4540 | } |
4546 | /* let the connector get through one full cycle before testing */ | 4541 | /* let the connector get through one full cycle before testing */ |
4547 | intel_wait_for_vblank(dev); | 4542 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
4548 | 4543 | ||
4549 | return crtc; | 4544 | return crtc; |
4550 | } | 4545 | } |
@@ -4749,7 +4744,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) | |||
4749 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | 4744 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
4750 | I915_WRITE(dpll_reg, dpll); | 4745 | I915_WRITE(dpll_reg, dpll); |
4751 | dpll = I915_READ(dpll_reg); | 4746 | dpll = I915_READ(dpll_reg); |
4752 | intel_wait_for_vblank(dev); | 4747 | intel_wait_for_vblank(dev, pipe); |
4753 | dpll = I915_READ(dpll_reg); | 4748 | dpll = I915_READ(dpll_reg); |
4754 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | 4749 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
4755 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); | 4750 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
@@ -4793,7 +4788,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) | |||
4793 | dpll |= DISPLAY_RATE_SELECT_FPA1; | 4788 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
4794 | I915_WRITE(dpll_reg, dpll); | 4789 | I915_WRITE(dpll_reg, dpll); |
4795 | dpll = I915_READ(dpll_reg); | 4790 | dpll = I915_READ(dpll_reg); |
4796 | intel_wait_for_vblank(dev); | 4791 | intel_wait_for_vblank(dev, pipe); |
4797 | dpll = I915_READ(dpll_reg); | 4792 | dpll = I915_READ(dpll_reg); |
4798 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | 4793 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
4799 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); | 4794 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
@@ -5083,14 +5078,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5083 | work->pending_flip_obj = obj; | 5078 | work->pending_flip_obj = obj; |
5084 | 5079 | ||
5085 | if (intel_crtc->plane) | 5080 | if (intel_crtc->plane) |
5086 | flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | 5081 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
5087 | else | 5082 | else |
5088 | flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; | 5083 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
5089 | 5084 | ||
5090 | /* Wait for any previous flip to finish */ | 5085 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
5091 | if (IS_GEN3(dev)) | 5086 | BEGIN_LP_RING(2); |
5092 | while (I915_READ(ISR) & flip_mask) | 5087 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); |
5093 | ; | 5088 | OUT_RING(0); |
5089 | ADVANCE_LP_RING(); | ||
5090 | } | ||
5094 | 5091 | ||
5095 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | 5092 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
5096 | offset = obj_priv->gtt_offset; | 5093 | offset = obj_priv->gtt_offset; |
@@ -5104,12 +5101,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5104 | OUT_RING(offset | obj_priv->tiling_mode); | 5101 | OUT_RING(offset | obj_priv->tiling_mode); |
5105 | pipesrc = I915_READ(pipesrc_reg); | 5102 | pipesrc = I915_READ(pipesrc_reg); |
5106 | OUT_RING(pipesrc & 0x0fff0fff); | 5103 | OUT_RING(pipesrc & 0x0fff0fff); |
5107 | } else { | 5104 | } else if (IS_GEN3(dev)) { |
5108 | OUT_RING(MI_DISPLAY_FLIP_I915 | | 5105 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
5109 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | 5106 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
5110 | OUT_RING(fb->pitch); | 5107 | OUT_RING(fb->pitch); |
5111 | OUT_RING(offset); | 5108 | OUT_RING(offset); |
5112 | OUT_RING(MI_NOOP); | 5109 | OUT_RING(MI_NOOP); |
5110 | } else { | ||
5111 | OUT_RING(MI_DISPLAY_FLIP | | ||
5112 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | ||
5113 | OUT_RING(fb->pitch); | ||
5114 | OUT_RING(offset); | ||
5115 | OUT_RING(MI_NOOP); | ||
5113 | } | 5116 | } |
5114 | ADVANCE_LP_RING(); | 5117 | ADVANCE_LP_RING(); |
5115 | 5118 | ||
@@ -5432,37 +5435,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
5432 | }; | 5435 | }; |
5433 | 5436 | ||
5434 | static struct drm_gem_object * | 5437 | static struct drm_gem_object * |
5435 | intel_alloc_power_context(struct drm_device *dev) | 5438 | intel_alloc_context_page(struct drm_device *dev) |
5436 | { | 5439 | { |
5437 | struct drm_gem_object *pwrctx; | 5440 | struct drm_gem_object *ctx; |
5438 | int ret; | 5441 | int ret; |
5439 | 5442 | ||
5440 | pwrctx = i915_gem_alloc_object(dev, 4096); | 5443 | ctx = i915_gem_alloc_object(dev, 4096); |
5441 | if (!pwrctx) { | 5444 | if (!ctx) { |
5442 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | 5445 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); |
5443 | return NULL; | 5446 | return NULL; |
5444 | } | 5447 | } |
5445 | 5448 | ||
5446 | mutex_lock(&dev->struct_mutex); | 5449 | mutex_lock(&dev->struct_mutex); |
5447 | ret = i915_gem_object_pin(pwrctx, 4096); | 5450 | ret = i915_gem_object_pin(ctx, 4096); |
5448 | if (ret) { | 5451 | if (ret) { |
5449 | DRM_ERROR("failed to pin power context: %d\n", ret); | 5452 | DRM_ERROR("failed to pin power context: %d\n", ret); |
5450 | goto err_unref; | 5453 | goto err_unref; |
5451 | } | 5454 | } |
5452 | 5455 | ||
5453 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | 5456 | ret = i915_gem_object_set_to_gtt_domain(ctx, 1); |
5454 | if (ret) { | 5457 | if (ret) { |
5455 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | 5458 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); |
5456 | goto err_unpin; | 5459 | goto err_unpin; |
5457 | } | 5460 | } |
5458 | mutex_unlock(&dev->struct_mutex); | 5461 | mutex_unlock(&dev->struct_mutex); |
5459 | 5462 | ||
5460 | return pwrctx; | 5463 | return ctx; |
5461 | 5464 | ||
5462 | err_unpin: | 5465 | err_unpin: |
5463 | i915_gem_object_unpin(pwrctx); | 5466 | i915_gem_object_unpin(ctx); |
5464 | err_unref: | 5467 | err_unref: |
5465 | drm_gem_object_unreference(pwrctx); | 5468 | drm_gem_object_unreference(ctx); |
5466 | mutex_unlock(&dev->struct_mutex); | 5469 | mutex_unlock(&dev->struct_mutex); |
5467 | return NULL; | 5470 | return NULL; |
5468 | } | 5471 | } |
@@ -5494,7 +5497,6 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5494 | struct drm_i915_private *dev_priv = dev->dev_private; | 5497 | struct drm_i915_private *dev_priv = dev->dev_private; |
5495 | u32 rgvmodectl = I915_READ(MEMMODECTL); | 5498 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
5496 | u8 fmax, fmin, fstart, vstart; | 5499 | u8 fmax, fmin, fstart, vstart; |
5497 | int i = 0; | ||
5498 | 5500 | ||
5499 | /* 100ms RC evaluation intervals */ | 5501 | /* 100ms RC evaluation intervals */ |
5500 | I915_WRITE(RCUPEI, 100000); | 5502 | I915_WRITE(RCUPEI, 100000); |
@@ -5538,13 +5540,8 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5538 | rgvmodectl |= MEMMODE_SWMODE_EN; | 5540 | rgvmodectl |= MEMMODE_SWMODE_EN; |
5539 | I915_WRITE(MEMMODECTL, rgvmodectl); | 5541 | I915_WRITE(MEMMODECTL, rgvmodectl); |
5540 | 5542 | ||
5541 | while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { | 5543 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) |
5542 | if (i++ > 100) { | 5544 | DRM_ERROR("stuck trying to change perf mode\n"); |
5543 | DRM_ERROR("stuck trying to change perf mode\n"); | ||
5544 | break; | ||
5545 | } | ||
5546 | msleep(1); | ||
5547 | } | ||
5548 | msleep(1); | 5545 | msleep(1); |
5549 | 5546 | ||
5550 | ironlake_set_drps(dev, fstart); | 5547 | ironlake_set_drps(dev, fstart); |
@@ -5725,7 +5722,8 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5725 | ILK_DPFC_DIS2 | | 5722 | ILK_DPFC_DIS2 | |
5726 | ILK_CLK_FBC); | 5723 | ILK_CLK_FBC); |
5727 | } | 5724 | } |
5728 | return; | 5725 | if (IS_GEN6(dev)) |
5726 | return; | ||
5729 | } else if (IS_G4X(dev)) { | 5727 | } else if (IS_G4X(dev)) { |
5730 | uint32_t dspclk_gate; | 5728 | uint32_t dspclk_gate; |
5731 | I915_WRITE(RENCLK_GATE_D1, 0); | 5729 | I915_WRITE(RENCLK_GATE_D1, 0); |
@@ -5768,6 +5766,31 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5768 | * GPU can automatically power down the render unit if given a page | 5766 | * GPU can automatically power down the render unit if given a page |
5769 | * to save state. | 5767 | * to save state. |
5770 | */ | 5768 | */ |
5769 | if (IS_IRONLAKE_M(dev)) { | ||
5770 | if (dev_priv->renderctx == NULL) | ||
5771 | dev_priv->renderctx = intel_alloc_context_page(dev); | ||
5772 | if (dev_priv->renderctx) { | ||
5773 | struct drm_i915_gem_object *obj_priv; | ||
5774 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
5775 | if (obj_priv) { | ||
5776 | BEGIN_LP_RING(4); | ||
5777 | OUT_RING(MI_SET_CONTEXT); | ||
5778 | OUT_RING(obj_priv->gtt_offset | | ||
5779 | MI_MM_SPACE_GTT | | ||
5780 | MI_SAVE_EXT_STATE_EN | | ||
5781 | MI_RESTORE_EXT_STATE_EN | | ||
5782 | MI_RESTORE_INHIBIT); | ||
5783 | OUT_RING(MI_NOOP); | ||
5784 | OUT_RING(MI_FLUSH); | ||
5785 | ADVANCE_LP_RING(); | ||
5786 | } | ||
5787 | } else { | ||
5788 | DRM_DEBUG_KMS("Failed to allocate render context." | ||
5789 | "Disable RC6\n"); | ||
5790 | return; | ||
5791 | } | ||
5792 | } | ||
5793 | |||
5771 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { | 5794 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
5772 | struct drm_i915_gem_object *obj_priv = NULL; | 5795 | struct drm_i915_gem_object *obj_priv = NULL; |
5773 | 5796 | ||
@@ -5776,7 +5799,7 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5776 | } else { | 5799 | } else { |
5777 | struct drm_gem_object *pwrctx; | 5800 | struct drm_gem_object *pwrctx; |
5778 | 5801 | ||
5779 | pwrctx = intel_alloc_power_context(dev); | 5802 | pwrctx = intel_alloc_context_page(dev); |
5780 | if (pwrctx) { | 5803 | if (pwrctx) { |
5781 | dev_priv->pwrctx = pwrctx; | 5804 | dev_priv->pwrctx = pwrctx; |
5782 | obj_priv = to_intel_bo(pwrctx); | 5805 | obj_priv = to_intel_bo(pwrctx); |
@@ -5948,6 +5971,29 @@ static void intel_init_quirks(struct drm_device *dev) | |||
5948 | } | 5971 | } |
5949 | } | 5972 | } |
5950 | 5973 | ||
5974 | /* Disable the VGA plane that we never use */ | ||
5975 | static void i915_disable_vga(struct drm_device *dev) | ||
5976 | { | ||
5977 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5978 | u8 sr1; | ||
5979 | u32 vga_reg; | ||
5980 | |||
5981 | if (HAS_PCH_SPLIT(dev)) | ||
5982 | vga_reg = CPU_VGACNTRL; | ||
5983 | else | ||
5984 | vga_reg = VGACNTRL; | ||
5985 | |||
5986 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
5987 | outb(1, VGA_SR_INDEX); | ||
5988 | sr1 = inb(VGA_SR_DATA); | ||
5989 | outb(sr1 | 1<<5, VGA_SR_DATA); | ||
5990 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
5991 | udelay(300); | ||
5992 | |||
5993 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | ||
5994 | POSTING_READ(vga_reg); | ||
5995 | } | ||
5996 | |||
5951 | void intel_modeset_init(struct drm_device *dev) | 5997 | void intel_modeset_init(struct drm_device *dev) |
5952 | { | 5998 | { |
5953 | struct drm_i915_private *dev_priv = dev->dev_private; | 5999 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5996,6 +6042,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
5996 | 6042 | ||
5997 | intel_init_clock_gating(dev); | 6043 | intel_init_clock_gating(dev); |
5998 | 6044 | ||
6045 | /* Just disable it once at startup */ | ||
6046 | i915_disable_vga(dev); | ||
6047 | |||
5999 | if (IS_IRONLAKE_M(dev)) { | 6048 | if (IS_IRONLAKE_M(dev)) { |
6000 | ironlake_enable_drps(dev); | 6049 | ironlake_enable_drps(dev); |
6001 | intel_init_emon(dev); | 6050 | intel_init_emon(dev); |
@@ -6034,6 +6083,16 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6034 | if (dev_priv->display.disable_fbc) | 6083 | if (dev_priv->display.disable_fbc) |
6035 | dev_priv->display.disable_fbc(dev); | 6084 | dev_priv->display.disable_fbc(dev); |
6036 | 6085 | ||
6086 | if (dev_priv->renderctx) { | ||
6087 | struct drm_i915_gem_object *obj_priv; | ||
6088 | |||
6089 | obj_priv = to_intel_bo(dev_priv->renderctx); | ||
6090 | I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); | ||
6091 | I915_READ(CCID); | ||
6092 | i915_gem_object_unpin(dev_priv->renderctx); | ||
6093 | drm_gem_object_unreference(dev_priv->renderctx); | ||
6094 | } | ||
6095 | |||
6037 | if (dev_priv->pwrctx) { | 6096 | if (dev_priv->pwrctx) { |
6038 | struct drm_i915_gem_object *obj_priv; | 6097 | struct drm_i915_gem_object *obj_priv; |
6039 | 6098 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 40be1fa65be1..9caccd03dccb 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -42,10 +42,11 @@ | |||
42 | 42 | ||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) | 45 | #define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) |
46 | #define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) | 46 | #define IS_PCH_eDP(i) ((i)->is_pch_edp) |
47 | 47 | ||
48 | struct intel_dp_priv { | 48 | struct intel_dp { |
49 | struct intel_encoder base; | ||
49 | uint32_t output_reg; | 50 | uint32_t output_reg; |
50 | uint32_t DP; | 51 | uint32_t DP; |
51 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; | 52 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; |
@@ -54,40 +55,39 @@ struct intel_dp_priv { | |||
54 | uint8_t link_bw; | 55 | uint8_t link_bw; |
55 | uint8_t lane_count; | 56 | uint8_t lane_count; |
56 | uint8_t dpcd[4]; | 57 | uint8_t dpcd[4]; |
57 | struct intel_encoder *intel_encoder; | ||
58 | struct i2c_adapter adapter; | 58 | struct i2c_adapter adapter; |
59 | struct i2c_algo_dp_aux_data algo; | 59 | struct i2c_algo_dp_aux_data algo; |
60 | bool is_pch_edp; | 60 | bool is_pch_edp; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static void | 63 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
64 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | 64 | { |
65 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); | 65 | return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); |
66 | } | ||
66 | 67 | ||
67 | static void | 68 | static void intel_dp_link_train(struct intel_dp *intel_dp); |
68 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); | 69 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
69 | 70 | ||
70 | void | 71 | void |
71 | intel_edp_link_config (struct intel_encoder *intel_encoder, | 72 | intel_edp_link_config (struct intel_encoder *intel_encoder, |
72 | int *lane_num, int *link_bw) | 73 | int *lane_num, int *link_bw) |
73 | { | 74 | { |
74 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 75 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
75 | 76 | ||
76 | *lane_num = dp_priv->lane_count; | 77 | *lane_num = intel_dp->lane_count; |
77 | if (dp_priv->link_bw == DP_LINK_BW_1_62) | 78 | if (intel_dp->link_bw == DP_LINK_BW_1_62) |
78 | *link_bw = 162000; | 79 | *link_bw = 162000; |
79 | else if (dp_priv->link_bw == DP_LINK_BW_2_7) | 80 | else if (intel_dp->link_bw == DP_LINK_BW_2_7) |
80 | *link_bw = 270000; | 81 | *link_bw = 270000; |
81 | } | 82 | } |
82 | 83 | ||
83 | static int | 84 | static int |
84 | intel_dp_max_lane_count(struct intel_encoder *intel_encoder) | 85 | intel_dp_max_lane_count(struct intel_dp *intel_dp) |
85 | { | 86 | { |
86 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
87 | int max_lane_count = 4; | 87 | int max_lane_count = 4; |
88 | 88 | ||
89 | if (dp_priv->dpcd[0] >= 0x11) { | 89 | if (intel_dp->dpcd[0] >= 0x11) { |
90 | max_lane_count = dp_priv->dpcd[2] & 0x1f; | 90 | max_lane_count = intel_dp->dpcd[2] & 0x1f; |
91 | switch (max_lane_count) { | 91 | switch (max_lane_count) { |
92 | case 1: case 2: case 4: | 92 | case 1: case 2: case 4: |
93 | break; | 93 | break; |
@@ -99,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | static int | 101 | static int |
102 | intel_dp_max_link_bw(struct intel_encoder *intel_encoder) | 102 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
103 | { | 103 | { |
104 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 104 | int max_link_bw = intel_dp->dpcd[1]; |
105 | int max_link_bw = dp_priv->dpcd[1]; | ||
106 | 105 | ||
107 | switch (max_link_bw) { | 106 | switch (max_link_bw) { |
108 | case DP_LINK_BW_1_62: | 107 | case DP_LINK_BW_1_62: |
@@ -126,13 +125,11 @@ intel_dp_link_clock(uint8_t link_bw) | |||
126 | 125 | ||
127 | /* I think this is a fiction */ | 126 | /* I think this is a fiction */ |
128 | static int | 127 | static int |
129 | intel_dp_link_required(struct drm_device *dev, | 128 | intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) |
130 | struct intel_encoder *intel_encoder, int pixel_clock) | ||
131 | { | 129 | { |
132 | struct drm_i915_private *dev_priv = dev->dev_private; | 130 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
134 | 131 | ||
135 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) | 132 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) |
136 | return (pixel_clock * dev_priv->edp_bpp) / 8; | 133 | return (pixel_clock * dev_priv->edp_bpp) / 8; |
137 | else | 134 | else |
138 | return pixel_clock * 3; | 135 | return pixel_clock * 3; |
@@ -149,14 +146,13 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
149 | struct drm_display_mode *mode) | 146 | struct drm_display_mode *mode) |
150 | { | 147 | { |
151 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 148 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
152 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 149 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
153 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
154 | struct drm_device *dev = connector->dev; | 150 | struct drm_device *dev = connector->dev; |
155 | struct drm_i915_private *dev_priv = dev->dev_private; | 151 | struct drm_i915_private *dev_priv = dev->dev_private; |
156 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); | 152 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
157 | int max_lanes = intel_dp_max_lane_count(intel_encoder); | 153 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
158 | 154 | ||
159 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | 155 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && |
160 | dev_priv->panel_fixed_mode) { | 156 | dev_priv->panel_fixed_mode) { |
161 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 157 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) |
162 | return MODE_PANEL; | 158 | return MODE_PANEL; |
@@ -167,8 +163,8 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
167 | 163 | ||
168 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 164 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
169 | which are outside spec tolerances but somehow work by magic */ | 165 | which are outside spec tolerances but somehow work by magic */ |
170 | if (!IS_eDP(intel_encoder) && | 166 | if (!IS_eDP(intel_dp) && |
171 | (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) | 167 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) |
172 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | 168 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) |
173 | return MODE_CLOCK_HIGH; | 169 | return MODE_CLOCK_HIGH; |
174 | 170 | ||
@@ -232,13 +228,12 @@ intel_hrawclk(struct drm_device *dev) | |||
232 | } | 228 | } |
233 | 229 | ||
234 | static int | 230 | static int |
235 | intel_dp_aux_ch(struct intel_encoder *intel_encoder, | 231 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
236 | uint8_t *send, int send_bytes, | 232 | uint8_t *send, int send_bytes, |
237 | uint8_t *recv, int recv_size) | 233 | uint8_t *recv, int recv_size) |
238 | { | 234 | { |
239 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 235 | uint32_t output_reg = intel_dp->output_reg; |
240 | uint32_t output_reg = dp_priv->output_reg; | 236 | struct drm_device *dev = intel_dp->base.enc.dev; |
241 | struct drm_device *dev = intel_encoder->enc.dev; | ||
242 | struct drm_i915_private *dev_priv = dev->dev_private; | 237 | struct drm_i915_private *dev_priv = dev->dev_private; |
243 | uint32_t ch_ctl = output_reg + 0x10; | 238 | uint32_t ch_ctl = output_reg + 0x10; |
244 | uint32_t ch_data = ch_ctl + 4; | 239 | uint32_t ch_data = ch_ctl + 4; |
@@ -253,7 +248,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, | |||
253 | * and would like to run at 2MHz. So, take the | 248 | * and would like to run at 2MHz. So, take the |
254 | * hrawclk value and divide by 2 and use that | 249 | * hrawclk value and divide by 2 and use that |
255 | */ | 250 | */ |
256 | if (IS_eDP(intel_encoder)) { | 251 | if (IS_eDP(intel_dp)) { |
257 | if (IS_GEN6(dev)) | 252 | if (IS_GEN6(dev)) |
258 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 253 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
259 | else | 254 | else |
@@ -344,7 +339,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, | |||
344 | 339 | ||
345 | /* Write data to the aux channel in native mode */ | 340 | /* Write data to the aux channel in native mode */ |
346 | static int | 341 | static int |
347 | intel_dp_aux_native_write(struct intel_encoder *intel_encoder, | 342 | intel_dp_aux_native_write(struct intel_dp *intel_dp, |
348 | uint16_t address, uint8_t *send, int send_bytes) | 343 | uint16_t address, uint8_t *send, int send_bytes) |
349 | { | 344 | { |
350 | int ret; | 345 | int ret; |
@@ -361,7 +356,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder, | |||
361 | memcpy(&msg[4], send, send_bytes); | 356 | memcpy(&msg[4], send, send_bytes); |
362 | msg_bytes = send_bytes + 4; | 357 | msg_bytes = send_bytes + 4; |
363 | for (;;) { | 358 | for (;;) { |
364 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); | 359 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
365 | if (ret < 0) | 360 | if (ret < 0) |
366 | return ret; | 361 | return ret; |
367 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 362 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
@@ -376,15 +371,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder, | |||
376 | 371 | ||
377 | /* Write a single byte to the aux channel in native mode */ | 372 | /* Write a single byte to the aux channel in native mode */ |
378 | static int | 373 | static int |
379 | intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, | 374 | intel_dp_aux_native_write_1(struct intel_dp *intel_dp, |
380 | uint16_t address, uint8_t byte) | 375 | uint16_t address, uint8_t byte) |
381 | { | 376 | { |
382 | return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); | 377 | return intel_dp_aux_native_write(intel_dp, address, &byte, 1); |
383 | } | 378 | } |
384 | 379 | ||
385 | /* read bytes from a native aux channel */ | 380 | /* read bytes from a native aux channel */ |
386 | static int | 381 | static int |
387 | intel_dp_aux_native_read(struct intel_encoder *intel_encoder, | 382 | intel_dp_aux_native_read(struct intel_dp *intel_dp, |
388 | uint16_t address, uint8_t *recv, int recv_bytes) | 383 | uint16_t address, uint8_t *recv, int recv_bytes) |
389 | { | 384 | { |
390 | uint8_t msg[4]; | 385 | uint8_t msg[4]; |
@@ -403,7 +398,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder, | |||
403 | reply_bytes = recv_bytes + 1; | 398 | reply_bytes = recv_bytes + 1; |
404 | 399 | ||
405 | for (;;) { | 400 | for (;;) { |
406 | ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, | 401 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, |
407 | reply, reply_bytes); | 402 | reply, reply_bytes); |
408 | if (ret == 0) | 403 | if (ret == 0) |
409 | return -EPROTO; | 404 | return -EPROTO; |
@@ -426,10 +421,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
426 | uint8_t write_byte, uint8_t *read_byte) | 421 | uint8_t write_byte, uint8_t *read_byte) |
427 | { | 422 | { |
428 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 423 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
429 | struct intel_dp_priv *dp_priv = container_of(adapter, | 424 | struct intel_dp *intel_dp = container_of(adapter, |
430 | struct intel_dp_priv, | 425 | struct intel_dp, |
431 | adapter); | 426 | adapter); |
432 | struct intel_encoder *intel_encoder = dp_priv->intel_encoder; | ||
433 | uint16_t address = algo_data->address; | 427 | uint16_t address = algo_data->address; |
434 | uint8_t msg[5]; | 428 | uint8_t msg[5]; |
435 | uint8_t reply[2]; | 429 | uint8_t reply[2]; |
@@ -468,7 +462,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
468 | } | 462 | } |
469 | 463 | ||
470 | for (;;) { | 464 | for (;;) { |
471 | ret = intel_dp_aux_ch(intel_encoder, | 465 | ret = intel_dp_aux_ch(intel_dp, |
472 | msg, msg_bytes, | 466 | msg, msg_bytes, |
473 | reply, reply_bytes); | 467 | reply, reply_bytes); |
474 | if (ret < 0) { | 468 | if (ret < 0) { |
@@ -496,57 +490,42 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
496 | } | 490 | } |
497 | 491 | ||
498 | static int | 492 | static int |
499 | intel_dp_i2c_init(struct intel_encoder *intel_encoder, | 493 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
500 | struct intel_connector *intel_connector, const char *name) | 494 | struct intel_connector *intel_connector, const char *name) |
501 | { | 495 | { |
502 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
503 | |||
504 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 496 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
505 | dp_priv->algo.running = false; | 497 | intel_dp->algo.running = false; |
506 | dp_priv->algo.address = 0; | 498 | intel_dp->algo.address = 0; |
507 | dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; | 499 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; |
508 | 500 | ||
509 | memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); | 501 | memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); |
510 | dp_priv->adapter.owner = THIS_MODULE; | 502 | intel_dp->adapter.owner = THIS_MODULE; |
511 | dp_priv->adapter.class = I2C_CLASS_DDC; | 503 | intel_dp->adapter.class = I2C_CLASS_DDC; |
512 | strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); | 504 | strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); |
513 | dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; | 505 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; |
514 | dp_priv->adapter.algo_data = &dp_priv->algo; | 506 | intel_dp->adapter.algo_data = &intel_dp->algo; |
515 | dp_priv->adapter.dev.parent = &intel_connector->base.kdev; | 507 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; |
516 | 508 | ||
517 | return i2c_dp_aux_add_bus(&dp_priv->adapter); | 509 | return i2c_dp_aux_add_bus(&intel_dp->adapter); |
518 | } | 510 | } |
519 | 511 | ||
520 | static bool | 512 | static bool |
521 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | 513 | intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, |
522 | struct drm_display_mode *adjusted_mode) | 514 | struct drm_display_mode *adjusted_mode) |
523 | { | 515 | { |
524 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
525 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
526 | struct drm_device *dev = encoder->dev; | 516 | struct drm_device *dev = encoder->dev; |
527 | struct drm_i915_private *dev_priv = dev->dev_private; | 517 | struct drm_i915_private *dev_priv = dev->dev_private; |
518 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
528 | int lane_count, clock; | 519 | int lane_count, clock; |
529 | int max_lane_count = intel_dp_max_lane_count(intel_encoder); | 520 | int max_lane_count = intel_dp_max_lane_count(intel_dp); |
530 | int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; | 521 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
531 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 522 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
532 | 523 | ||
533 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | 524 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && |
534 | dev_priv->panel_fixed_mode) { | 525 | dev_priv->panel_fixed_mode) { |
535 | struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; | 526 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); |
536 | 527 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | |
537 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | 528 | mode, adjusted_mode); |
538 | adjusted_mode->hsync_start = fixed_mode->hsync_start; | ||
539 | adjusted_mode->hsync_end = fixed_mode->hsync_end; | ||
540 | adjusted_mode->htotal = fixed_mode->htotal; | ||
541 | |||
542 | adjusted_mode->vdisplay = fixed_mode->vdisplay; | ||
543 | adjusted_mode->vsync_start = fixed_mode->vsync_start; | ||
544 | adjusted_mode->vsync_end = fixed_mode->vsync_end; | ||
545 | adjusted_mode->vtotal = fixed_mode->vtotal; | ||
546 | |||
547 | adjusted_mode->clock = fixed_mode->clock; | ||
548 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
549 | |||
550 | /* | 529 | /* |
551 | * the mode->clock is used to calculate the Data&Link M/N | 530 | * the mode->clock is used to calculate the Data&Link M/N |
552 | * of the pipe. For the eDP the fixed clock should be used. | 531 | * of the pipe. For the eDP the fixed clock should be used. |
@@ -558,31 +537,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
558 | for (clock = 0; clock <= max_clock; clock++) { | 537 | for (clock = 0; clock <= max_clock; clock++) { |
559 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 538 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
560 | 539 | ||
561 | if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) | 540 | if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock) |
562 | <= link_avail) { | 541 | <= link_avail) { |
563 | dp_priv->link_bw = bws[clock]; | 542 | intel_dp->link_bw = bws[clock]; |
564 | dp_priv->lane_count = lane_count; | 543 | intel_dp->lane_count = lane_count; |
565 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | 544 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); |
566 | DRM_DEBUG_KMS("Display port link bw %02x lane " | 545 | DRM_DEBUG_KMS("Display port link bw %02x lane " |
567 | "count %d clock %d\n", | 546 | "count %d clock %d\n", |
568 | dp_priv->link_bw, dp_priv->lane_count, | 547 | intel_dp->link_bw, intel_dp->lane_count, |
569 | adjusted_mode->clock); | 548 | adjusted_mode->clock); |
570 | return true; | 549 | return true; |
571 | } | 550 | } |
572 | } | 551 | } |
573 | } | 552 | } |
574 | 553 | ||
575 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { | 554 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { |
576 | /* okay we failed just pick the highest */ | 555 | /* okay we failed just pick the highest */ |
577 | dp_priv->lane_count = max_lane_count; | 556 | intel_dp->lane_count = max_lane_count; |
578 | dp_priv->link_bw = bws[max_clock]; | 557 | intel_dp->link_bw = bws[max_clock]; |
579 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | 558 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); |
580 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | 559 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " |
581 | "count %d clock %d\n", | 560 | "count %d clock %d\n", |
582 | dp_priv->link_bw, dp_priv->lane_count, | 561 | intel_dp->link_bw, intel_dp->lane_count, |
583 | adjusted_mode->clock); | 562 | adjusted_mode->clock); |
563 | |||
584 | return true; | 564 | return true; |
585 | } | 565 | } |
566 | |||
586 | return false; | 567 | return false; |
587 | } | 568 | } |
588 | 569 | ||
@@ -626,17 +607,14 @@ bool intel_pch_has_edp(struct drm_crtc *crtc) | |||
626 | struct drm_encoder *encoder; | 607 | struct drm_encoder *encoder; |
627 | 608 | ||
628 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 609 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
629 | struct intel_encoder *intel_encoder; | 610 | struct intel_dp *intel_dp; |
630 | struct intel_dp_priv *dp_priv; | ||
631 | 611 | ||
632 | if (!encoder || encoder->crtc != crtc) | 612 | if (encoder->crtc != crtc) |
633 | continue; | 613 | continue; |
634 | 614 | ||
635 | intel_encoder = enc_to_intel_encoder(encoder); | 615 | intel_dp = enc_to_intel_dp(encoder); |
636 | dp_priv = intel_encoder->dev_priv; | 616 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) |
637 | 617 | return intel_dp->is_pch_edp; | |
638 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) | ||
639 | return dp_priv->is_pch_edp; | ||
640 | } | 618 | } |
641 | return false; | 619 | return false; |
642 | } | 620 | } |
@@ -657,18 +635,15 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
657 | * Find the lane count in the intel_encoder private | 635 | * Find the lane count in the intel_encoder private |
658 | */ | 636 | */ |
659 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 637 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
660 | struct intel_encoder *intel_encoder; | 638 | struct intel_dp *intel_dp; |
661 | struct intel_dp_priv *dp_priv; | ||
662 | 639 | ||
663 | if (encoder->crtc != crtc) | 640 | if (encoder->crtc != crtc) |
664 | continue; | 641 | continue; |
665 | 642 | ||
666 | intel_encoder = enc_to_intel_encoder(encoder); | 643 | intel_dp = enc_to_intel_dp(encoder); |
667 | dp_priv = intel_encoder->dev_priv; | 644 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { |
668 | 645 | lane_count = intel_dp->lane_count; | |
669 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | 646 | if (IS_PCH_eDP(intel_dp)) |
670 | lane_count = dp_priv->lane_count; | ||
671 | if (IS_PCH_eDP(dp_priv)) | ||
672 | bpp = dev_priv->edp_bpp; | 647 | bpp = dev_priv->edp_bpp; |
673 | break; | 648 | break; |
674 | } | 649 | } |
@@ -724,107 +699,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
724 | struct drm_display_mode *adjusted_mode) | 699 | struct drm_display_mode *adjusted_mode) |
725 | { | 700 | { |
726 | struct drm_device *dev = encoder->dev; | 701 | struct drm_device *dev = encoder->dev; |
727 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 702 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
728 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 703 | struct drm_crtc *crtc = intel_dp->base.enc.crtc; |
729 | struct drm_crtc *crtc = intel_encoder->enc.crtc; | ||
730 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 704 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
731 | 705 | ||
732 | dp_priv->DP = (DP_VOLTAGE_0_4 | | 706 | intel_dp->DP = (DP_VOLTAGE_0_4 | |
733 | DP_PRE_EMPHASIS_0); | 707 | DP_PRE_EMPHASIS_0); |
734 | 708 | ||
735 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 709 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
736 | dp_priv->DP |= DP_SYNC_HS_HIGH; | 710 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
737 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 711 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
738 | dp_priv->DP |= DP_SYNC_VS_HIGH; | 712 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
739 | 713 | ||
740 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | 714 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) |
741 | dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; | 715 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
742 | else | 716 | else |
743 | dp_priv->DP |= DP_LINK_TRAIN_OFF; | 717 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
744 | 718 | ||
745 | switch (dp_priv->lane_count) { | 719 | switch (intel_dp->lane_count) { |
746 | case 1: | 720 | case 1: |
747 | dp_priv->DP |= DP_PORT_WIDTH_1; | 721 | intel_dp->DP |= DP_PORT_WIDTH_1; |
748 | break; | 722 | break; |
749 | case 2: | 723 | case 2: |
750 | dp_priv->DP |= DP_PORT_WIDTH_2; | 724 | intel_dp->DP |= DP_PORT_WIDTH_2; |
751 | break; | 725 | break; |
752 | case 4: | 726 | case 4: |
753 | dp_priv->DP |= DP_PORT_WIDTH_4; | 727 | intel_dp->DP |= DP_PORT_WIDTH_4; |
754 | break; | 728 | break; |
755 | } | 729 | } |
756 | if (dp_priv->has_audio) | 730 | if (intel_dp->has_audio) |
757 | dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; | 731 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
758 | 732 | ||
759 | memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | 733 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
760 | dp_priv->link_configuration[0] = dp_priv->link_bw; | 734 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
761 | dp_priv->link_configuration[1] = dp_priv->lane_count; | 735 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
762 | 736 | ||
763 | /* | 737 | /* |
764 | * Check for DPCD version > 1.1 and enhanced framing support | 738 | * Check for DPCD version > 1.1 and enhanced framing support |
765 | */ | 739 | */ |
766 | if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { | 740 | if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { |
767 | dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 741 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
768 | dp_priv->DP |= DP_ENHANCED_FRAMING; | 742 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
769 | } | 743 | } |
770 | 744 | ||
771 | /* CPT DP's pipe select is decided in TRANS_DP_CTL */ | 745 | /* CPT DP's pipe select is decided in TRANS_DP_CTL */ |
772 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 746 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
773 | dp_priv->DP |= DP_PIPEB_SELECT; | 747 | intel_dp->DP |= DP_PIPEB_SELECT; |
774 | 748 | ||
775 | if (IS_eDP(intel_encoder)) { | 749 | if (IS_eDP(intel_dp)) { |
776 | /* don't miss out required setting for eDP */ | 750 | /* don't miss out required setting for eDP */ |
777 | dp_priv->DP |= DP_PLL_ENABLE; | 751 | intel_dp->DP |= DP_PLL_ENABLE; |
778 | if (adjusted_mode->clock < 200000) | 752 | if (adjusted_mode->clock < 200000) |
779 | dp_priv->DP |= DP_PLL_FREQ_160MHZ; | 753 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
780 | else | 754 | else |
781 | dp_priv->DP |= DP_PLL_FREQ_270MHZ; | 755 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
782 | } | 756 | } |
783 | } | 757 | } |
784 | 758 | ||
785 | static void ironlake_edp_panel_on (struct drm_device *dev) | 759 | static void ironlake_edp_panel_on (struct drm_device *dev) |
786 | { | 760 | { |
787 | struct drm_i915_private *dev_priv = dev->dev_private; | 761 | struct drm_i915_private *dev_priv = dev->dev_private; |
788 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | 762 | u32 pp; |
789 | u32 pp, pp_status; | ||
790 | 763 | ||
791 | pp_status = I915_READ(PCH_PP_STATUS); | 764 | if (I915_READ(PCH_PP_STATUS) & PP_ON) |
792 | if (pp_status & PP_ON) | ||
793 | return; | 765 | return; |
794 | 766 | ||
795 | pp = I915_READ(PCH_PP_CONTROL); | 767 | pp = I915_READ(PCH_PP_CONTROL); |
768 | |||
769 | /* ILK workaround: disable reset around power sequence */ | ||
770 | pp &= ~PANEL_POWER_RESET; | ||
771 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
772 | POSTING_READ(PCH_PP_CONTROL); | ||
773 | |||
796 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | 774 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; |
797 | I915_WRITE(PCH_PP_CONTROL, pp); | 775 | I915_WRITE(PCH_PP_CONTROL, pp); |
798 | do { | ||
799 | pp_status = I915_READ(PCH_PP_STATUS); | ||
800 | } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout)); | ||
801 | 776 | ||
802 | if (time_after(jiffies, timeout)) | 777 | if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) |
803 | DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); | 778 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
779 | I915_READ(PCH_PP_STATUS)); | ||
804 | 780 | ||
805 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); | 781 | pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); |
782 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | ||
806 | I915_WRITE(PCH_PP_CONTROL, pp); | 783 | I915_WRITE(PCH_PP_CONTROL, pp); |
784 | POSTING_READ(PCH_PP_CONTROL); | ||
807 | } | 785 | } |
808 | 786 | ||
809 | static void ironlake_edp_panel_off (struct drm_device *dev) | 787 | static void ironlake_edp_panel_off (struct drm_device *dev) |
810 | { | 788 | { |
811 | struct drm_i915_private *dev_priv = dev->dev_private; | 789 | struct drm_i915_private *dev_priv = dev->dev_private; |
812 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | 790 | u32 pp; |
813 | u32 pp, pp_status; | ||
814 | 791 | ||
815 | pp = I915_READ(PCH_PP_CONTROL); | 792 | pp = I915_READ(PCH_PP_CONTROL); |
793 | |||
794 | /* ILK workaround: disable reset around power sequence */ | ||
795 | pp &= ~PANEL_POWER_RESET; | ||
796 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
797 | POSTING_READ(PCH_PP_CONTROL); | ||
798 | |||
816 | pp &= ~POWER_TARGET_ON; | 799 | pp &= ~POWER_TARGET_ON; |
817 | I915_WRITE(PCH_PP_CONTROL, pp); | 800 | I915_WRITE(PCH_PP_CONTROL, pp); |
818 | do { | ||
819 | pp_status = I915_READ(PCH_PP_STATUS); | ||
820 | } while ((pp_status & PP_ON) && !time_after(jiffies, timeout)); | ||
821 | 801 | ||
822 | if (time_after(jiffies, timeout)) | 802 | if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) |
823 | DRM_DEBUG_KMS("panel off wait timed out\n"); | 803 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
804 | I915_READ(PCH_PP_STATUS)); | ||
824 | 805 | ||
825 | /* Make sure VDD is enabled so DP AUX will work */ | 806 | /* Make sure VDD is enabled so DP AUX will work */ |
826 | pp |= EDP_FORCE_VDD; | 807 | pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ |
827 | I915_WRITE(PCH_PP_CONTROL, pp); | 808 | I915_WRITE(PCH_PP_CONTROL, pp); |
809 | POSTING_READ(PCH_PP_CONTROL); | ||
828 | } | 810 | } |
829 | 811 | ||
830 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 812 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
@@ -849,33 +831,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev) | |||
849 | I915_WRITE(PCH_PP_CONTROL, pp); | 831 | I915_WRITE(PCH_PP_CONTROL, pp); |
850 | } | 832 | } |
851 | 833 | ||
834 | static void ironlake_edp_pll_on(struct drm_encoder *encoder) | ||
835 | { | ||
836 | struct drm_device *dev = encoder->dev; | ||
837 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
838 | u32 dpa_ctl; | ||
839 | |||
840 | DRM_DEBUG_KMS("\n"); | ||
841 | dpa_ctl = I915_READ(DP_A); | ||
842 | dpa_ctl &= ~DP_PLL_ENABLE; | ||
843 | I915_WRITE(DP_A, dpa_ctl); | ||
844 | } | ||
845 | |||
846 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) | ||
847 | { | ||
848 | struct drm_device *dev = encoder->dev; | ||
849 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
850 | u32 dpa_ctl; | ||
851 | |||
852 | dpa_ctl = I915_READ(DP_A); | ||
853 | dpa_ctl |= DP_PLL_ENABLE; | ||
854 | I915_WRITE(DP_A, dpa_ctl); | ||
855 | udelay(200); | ||
856 | } | ||
857 | |||
858 | static void intel_dp_prepare(struct drm_encoder *encoder) | ||
859 | { | ||
860 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
861 | struct drm_device *dev = encoder->dev; | ||
862 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
863 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | ||
864 | |||
865 | if (IS_eDP(intel_dp)) { | ||
866 | ironlake_edp_backlight_off(dev); | ||
867 | ironlake_edp_panel_on(dev); | ||
868 | ironlake_edp_pll_on(encoder); | ||
869 | } | ||
870 | if (dp_reg & DP_PORT_EN) | ||
871 | intel_dp_link_down(intel_dp); | ||
872 | } | ||
873 | |||
874 | static void intel_dp_commit(struct drm_encoder *encoder) | ||
875 | { | ||
876 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
877 | struct drm_device *dev = encoder->dev; | ||
878 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
879 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | ||
880 | |||
881 | if (!(dp_reg & DP_PORT_EN)) { | ||
882 | intel_dp_link_train(intel_dp); | ||
883 | } | ||
884 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
885 | ironlake_edp_backlight_on(dev); | ||
886 | } | ||
887 | |||
852 | static void | 888 | static void |
853 | intel_dp_dpms(struct drm_encoder *encoder, int mode) | 889 | intel_dp_dpms(struct drm_encoder *encoder, int mode) |
854 | { | 890 | { |
855 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 891 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
856 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
857 | struct drm_device *dev = encoder->dev; | 892 | struct drm_device *dev = encoder->dev; |
858 | struct drm_i915_private *dev_priv = dev->dev_private; | 893 | struct drm_i915_private *dev_priv = dev->dev_private; |
859 | uint32_t dp_reg = I915_READ(dp_priv->output_reg); | 894 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
860 | 895 | ||
861 | if (mode != DRM_MODE_DPMS_ON) { | 896 | if (mode != DRM_MODE_DPMS_ON) { |
862 | if (dp_reg & DP_PORT_EN) { | 897 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { |
863 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 898 | ironlake_edp_backlight_off(dev); |
864 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { | 899 | ironlake_edp_panel_off(dev); |
865 | ironlake_edp_backlight_off(dev); | ||
866 | ironlake_edp_panel_off(dev); | ||
867 | } | ||
868 | } | 900 | } |
901 | if (dp_reg & DP_PORT_EN) | ||
902 | intel_dp_link_down(intel_dp); | ||
903 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
904 | ironlake_edp_pll_off(encoder); | ||
869 | } else { | 905 | } else { |
870 | if (!(dp_reg & DP_PORT_EN)) { | 906 | if (!(dp_reg & DP_PORT_EN)) { |
871 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 907 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) |
872 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { | ||
873 | ironlake_edp_panel_on(dev); | 908 | ironlake_edp_panel_on(dev); |
909 | intel_dp_link_train(intel_dp); | ||
910 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
874 | ironlake_edp_backlight_on(dev); | 911 | ironlake_edp_backlight_on(dev); |
875 | } | ||
876 | } | 912 | } |
877 | } | 913 | } |
878 | dp_priv->dpms_mode = mode; | 914 | intel_dp->dpms_mode = mode; |
879 | } | 915 | } |
880 | 916 | ||
881 | /* | 917 | /* |
@@ -883,12 +919,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
883 | * link status information | 919 | * link status information |
884 | */ | 920 | */ |
885 | static bool | 921 | static bool |
886 | intel_dp_get_link_status(struct intel_encoder *intel_encoder, | 922 | intel_dp_get_link_status(struct intel_dp *intel_dp, |
887 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | 923 | uint8_t link_status[DP_LINK_STATUS_SIZE]) |
888 | { | 924 | { |
889 | int ret; | 925 | int ret; |
890 | 926 | ||
891 | ret = intel_dp_aux_native_read(intel_encoder, | 927 | ret = intel_dp_aux_native_read(intel_dp, |
892 | DP_LANE0_1_STATUS, | 928 | DP_LANE0_1_STATUS, |
893 | link_status, DP_LINK_STATUS_SIZE); | 929 | link_status, DP_LINK_STATUS_SIZE); |
894 | if (ret != DP_LINK_STATUS_SIZE) | 930 | if (ret != DP_LINK_STATUS_SIZE) |
@@ -965,7 +1001,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) | |||
965 | } | 1001 | } |
966 | 1002 | ||
967 | static void | 1003 | static void |
968 | intel_get_adjust_train(struct intel_encoder *intel_encoder, | 1004 | intel_get_adjust_train(struct intel_dp *intel_dp, |
969 | uint8_t link_status[DP_LINK_STATUS_SIZE], | 1005 | uint8_t link_status[DP_LINK_STATUS_SIZE], |
970 | int lane_count, | 1006 | int lane_count, |
971 | uint8_t train_set[4]) | 1007 | uint8_t train_set[4]) |
@@ -1101,27 +1137,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) | |||
1101 | } | 1137 | } |
1102 | 1138 | ||
1103 | static bool | 1139 | static bool |
1104 | intel_dp_set_link_train(struct intel_encoder *intel_encoder, | 1140 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1105 | uint32_t dp_reg_value, | 1141 | uint32_t dp_reg_value, |
1106 | uint8_t dp_train_pat, | 1142 | uint8_t dp_train_pat, |
1107 | uint8_t train_set[4], | 1143 | uint8_t train_set[4], |
1108 | bool first) | 1144 | bool first) |
1109 | { | 1145 | { |
1110 | struct drm_device *dev = intel_encoder->enc.dev; | 1146 | struct drm_device *dev = intel_dp->base.enc.dev; |
1111 | struct drm_i915_private *dev_priv = dev->dev_private; | 1147 | struct drm_i915_private *dev_priv = dev->dev_private; |
1112 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1148 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); |
1113 | int ret; | 1149 | int ret; |
1114 | 1150 | ||
1115 | I915_WRITE(dp_priv->output_reg, dp_reg_value); | 1151 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1116 | POSTING_READ(dp_priv->output_reg); | 1152 | POSTING_READ(intel_dp->output_reg); |
1117 | if (first) | 1153 | if (first) |
1118 | intel_wait_for_vblank(dev); | 1154 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1119 | 1155 | ||
1120 | intel_dp_aux_native_write_1(intel_encoder, | 1156 | intel_dp_aux_native_write_1(intel_dp, |
1121 | DP_TRAINING_PATTERN_SET, | 1157 | DP_TRAINING_PATTERN_SET, |
1122 | dp_train_pat); | 1158 | dp_train_pat); |
1123 | 1159 | ||
1124 | ret = intel_dp_aux_native_write(intel_encoder, | 1160 | ret = intel_dp_aux_native_write(intel_dp, |
1125 | DP_TRAINING_LANE0_SET, train_set, 4); | 1161 | DP_TRAINING_LANE0_SET, train_set, 4); |
1126 | if (ret != 4) | 1162 | if (ret != 4) |
1127 | return false; | 1163 | return false; |
@@ -1130,12 +1166,10 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder, | |||
1130 | } | 1166 | } |
1131 | 1167 | ||
1132 | static void | 1168 | static void |
1133 | intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | 1169 | intel_dp_link_train(struct intel_dp *intel_dp) |
1134 | uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) | ||
1135 | { | 1170 | { |
1136 | struct drm_device *dev = intel_encoder->enc.dev; | 1171 | struct drm_device *dev = intel_dp->base.enc.dev; |
1137 | struct drm_i915_private *dev_priv = dev->dev_private; | 1172 | struct drm_i915_private *dev_priv = dev->dev_private; |
1138 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1139 | uint8_t train_set[4]; | 1173 | uint8_t train_set[4]; |
1140 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1174 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1141 | int i; | 1175 | int i; |
@@ -1145,13 +1179,15 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1145 | bool first = true; | 1179 | bool first = true; |
1146 | int tries; | 1180 | int tries; |
1147 | u32 reg; | 1181 | u32 reg; |
1182 | uint32_t DP = intel_dp->DP; | ||
1148 | 1183 | ||
1149 | /* Write the link configuration data */ | 1184 | /* Write the link configuration data */ |
1150 | intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, | 1185 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1151 | link_configuration, DP_LINK_CONFIGURATION_SIZE); | 1186 | intel_dp->link_configuration, |
1187 | DP_LINK_CONFIGURATION_SIZE); | ||
1152 | 1188 | ||
1153 | DP |= DP_PORT_EN; | 1189 | DP |= DP_PORT_EN; |
1154 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | 1190 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) |
1155 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1191 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1156 | else | 1192 | else |
1157 | DP &= ~DP_LINK_TRAIN_MASK; | 1193 | DP &= ~DP_LINK_TRAIN_MASK; |
@@ -1162,39 +1198,39 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1162 | for (;;) { | 1198 | for (;;) { |
1163 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1199 | /* Use train_set[0] to set the voltage and pre emphasis values */ |
1164 | uint32_t signal_levels; | 1200 | uint32_t signal_levels; |
1165 | if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { | 1201 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { |
1166 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | 1202 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); |
1167 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1203 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1168 | } else { | 1204 | } else { |
1169 | signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1205 | signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); |
1170 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1206 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1171 | } | 1207 | } |
1172 | 1208 | ||
1173 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | 1209 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) |
1174 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1210 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1175 | else | 1211 | else |
1176 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1212 | reg = DP | DP_LINK_TRAIN_PAT_1; |
1177 | 1213 | ||
1178 | if (!intel_dp_set_link_train(intel_encoder, reg, | 1214 | if (!intel_dp_set_link_train(intel_dp, reg, |
1179 | DP_TRAINING_PATTERN_1, train_set, first)) | 1215 | DP_TRAINING_PATTERN_1, train_set, first)) |
1180 | break; | 1216 | break; |
1181 | first = false; | 1217 | first = false; |
1182 | /* Set training pattern 1 */ | 1218 | /* Set training pattern 1 */ |
1183 | 1219 | ||
1184 | udelay(100); | 1220 | udelay(100); |
1185 | if (!intel_dp_get_link_status(intel_encoder, link_status)) | 1221 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
1186 | break; | 1222 | break; |
1187 | 1223 | ||
1188 | if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { | 1224 | if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1189 | clock_recovery = true; | 1225 | clock_recovery = true; |
1190 | break; | 1226 | break; |
1191 | } | 1227 | } |
1192 | 1228 | ||
1193 | /* Check to see if we've tried the max voltage */ | 1229 | /* Check to see if we've tried the max voltage */ |
1194 | for (i = 0; i < dp_priv->lane_count; i++) | 1230 | for (i = 0; i < intel_dp->lane_count; i++) |
1195 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1231 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1196 | break; | 1232 | break; |
1197 | if (i == dp_priv->lane_count) | 1233 | if (i == intel_dp->lane_count) |
1198 | break; | 1234 | break; |
1199 | 1235 | ||
1200 | /* Check to see if we've tried the same voltage 5 times */ | 1236 | /* Check to see if we've tried the same voltage 5 times */ |
@@ -1207,7 +1243,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1207 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1243 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1208 | 1244 | ||
1209 | /* Compute new train_set as requested by target */ | 1245 | /* Compute new train_set as requested by target */ |
1210 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); | 1246 | intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); |
1211 | } | 1247 | } |
1212 | 1248 | ||
1213 | /* channel equalization */ | 1249 | /* channel equalization */ |
@@ -1217,30 +1253,30 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1217 | /* Use train_set[0] to set the voltage and pre emphasis values */ | 1253 | /* Use train_set[0] to set the voltage and pre emphasis values */ |
1218 | uint32_t signal_levels; | 1254 | uint32_t signal_levels; |
1219 | 1255 | ||
1220 | if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { | 1256 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { |
1221 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); | 1257 | signal_levels = intel_gen6_edp_signal_levels(train_set[0]); |
1222 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1258 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1223 | } else { | 1259 | } else { |
1224 | signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); | 1260 | signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); |
1225 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1261 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1226 | } | 1262 | } |
1227 | 1263 | ||
1228 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | 1264 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) |
1229 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1265 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1230 | else | 1266 | else |
1231 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1267 | reg = DP | DP_LINK_TRAIN_PAT_2; |
1232 | 1268 | ||
1233 | /* channel eq pattern */ | 1269 | /* channel eq pattern */ |
1234 | if (!intel_dp_set_link_train(intel_encoder, reg, | 1270 | if (!intel_dp_set_link_train(intel_dp, reg, |
1235 | DP_TRAINING_PATTERN_2, train_set, | 1271 | DP_TRAINING_PATTERN_2, train_set, |
1236 | false)) | 1272 | false)) |
1237 | break; | 1273 | break; |
1238 | 1274 | ||
1239 | udelay(400); | 1275 | udelay(400); |
1240 | if (!intel_dp_get_link_status(intel_encoder, link_status)) | 1276 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
1241 | break; | 1277 | break; |
1242 | 1278 | ||
1243 | if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { | 1279 | if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { |
1244 | channel_eq = true; | 1280 | channel_eq = true; |
1245 | break; | 1281 | break; |
1246 | } | 1282 | } |
@@ -1250,53 +1286,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, | |||
1250 | break; | 1286 | break; |
1251 | 1287 | ||
1252 | /* Compute new train_set as requested by target */ | 1288 | /* Compute new train_set as requested by target */ |
1253 | intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); | 1289 | intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); |
1254 | ++tries; | 1290 | ++tries; |
1255 | } | 1291 | } |
1256 | 1292 | ||
1257 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) | 1293 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) |
1258 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1294 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1259 | else | 1295 | else |
1260 | reg = DP | DP_LINK_TRAIN_OFF; | 1296 | reg = DP | DP_LINK_TRAIN_OFF; |
1261 | 1297 | ||
1262 | I915_WRITE(dp_priv->output_reg, reg); | 1298 | I915_WRITE(intel_dp->output_reg, reg); |
1263 | POSTING_READ(dp_priv->output_reg); | 1299 | POSTING_READ(intel_dp->output_reg); |
1264 | intel_dp_aux_native_write_1(intel_encoder, | 1300 | intel_dp_aux_native_write_1(intel_dp, |
1265 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); | 1301 | DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); |
1266 | } | 1302 | } |
1267 | 1303 | ||
1268 | static void | 1304 | static void |
1269 | intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) | 1305 | intel_dp_link_down(struct intel_dp *intel_dp) |
1270 | { | 1306 | { |
1271 | struct drm_device *dev = intel_encoder->enc.dev; | 1307 | struct drm_device *dev = intel_dp->base.enc.dev; |
1272 | struct drm_i915_private *dev_priv = dev->dev_private; | 1308 | struct drm_i915_private *dev_priv = dev->dev_private; |
1273 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1309 | uint32_t DP = intel_dp->DP; |
1274 | 1310 | ||
1275 | DRM_DEBUG_KMS("\n"); | 1311 | DRM_DEBUG_KMS("\n"); |
1276 | 1312 | ||
1277 | if (IS_eDP(intel_encoder)) { | 1313 | if (IS_eDP(intel_dp)) { |
1278 | DP &= ~DP_PLL_ENABLE; | 1314 | DP &= ~DP_PLL_ENABLE; |
1279 | I915_WRITE(dp_priv->output_reg, DP); | 1315 | I915_WRITE(intel_dp->output_reg, DP); |
1280 | POSTING_READ(dp_priv->output_reg); | 1316 | POSTING_READ(intel_dp->output_reg); |
1281 | udelay(100); | 1317 | udelay(100); |
1282 | } | 1318 | } |
1283 | 1319 | ||
1284 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { | 1320 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { |
1285 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1321 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1286 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1322 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1287 | POSTING_READ(dp_priv->output_reg); | 1323 | POSTING_READ(intel_dp->output_reg); |
1288 | } else { | 1324 | } else { |
1289 | DP &= ~DP_LINK_TRAIN_MASK; | 1325 | DP &= ~DP_LINK_TRAIN_MASK; |
1290 | I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); | 1326 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); |
1291 | POSTING_READ(dp_priv->output_reg); | 1327 | POSTING_READ(intel_dp->output_reg); |
1292 | } | 1328 | } |
1293 | 1329 | ||
1294 | udelay(17000); | 1330 | udelay(17000); |
1295 | 1331 | ||
1296 | if (IS_eDP(intel_encoder)) | 1332 | if (IS_eDP(intel_dp)) |
1297 | DP |= DP_LINK_TRAIN_OFF; | 1333 | DP |= DP_LINK_TRAIN_OFF; |
1298 | I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); | 1334 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1299 | POSTING_READ(dp_priv->output_reg); | 1335 | POSTING_READ(intel_dp->output_reg); |
1300 | } | 1336 | } |
1301 | 1337 | ||
1302 | /* | 1338 | /* |
@@ -1309,41 +1345,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) | |||
1309 | */ | 1345 | */ |
1310 | 1346 | ||
1311 | static void | 1347 | static void |
1312 | intel_dp_check_link_status(struct intel_encoder *intel_encoder) | 1348 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1313 | { | 1349 | { |
1314 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1315 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 1350 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1316 | 1351 | ||
1317 | if (!intel_encoder->enc.crtc) | 1352 | if (!intel_dp->base.enc.crtc) |
1318 | return; | 1353 | return; |
1319 | 1354 | ||
1320 | if (!intel_dp_get_link_status(intel_encoder, link_status)) { | 1355 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1321 | intel_dp_link_down(intel_encoder, dp_priv->DP); | 1356 | intel_dp_link_down(intel_dp); |
1322 | return; | 1357 | return; |
1323 | } | 1358 | } |
1324 | 1359 | ||
1325 | if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) | 1360 | if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) |
1326 | intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); | 1361 | intel_dp_link_train(intel_dp); |
1327 | } | 1362 | } |
1328 | 1363 | ||
1329 | static enum drm_connector_status | 1364 | static enum drm_connector_status |
1330 | ironlake_dp_detect(struct drm_connector *connector) | 1365 | ironlake_dp_detect(struct drm_connector *connector) |
1331 | { | 1366 | { |
1332 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1367 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1333 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1368 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1334 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1335 | enum drm_connector_status status; | 1369 | enum drm_connector_status status; |
1336 | 1370 | ||
1337 | status = connector_status_disconnected; | 1371 | status = connector_status_disconnected; |
1338 | if (intel_dp_aux_native_read(intel_encoder, | 1372 | if (intel_dp_aux_native_read(intel_dp, |
1339 | 0x000, dp_priv->dpcd, | 1373 | 0x000, intel_dp->dpcd, |
1340 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1374 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1341 | { | 1375 | { |
1342 | if (dp_priv->dpcd[0] != 0) | 1376 | if (intel_dp->dpcd[0] != 0) |
1343 | status = connector_status_connected; | 1377 | status = connector_status_connected; |
1344 | } | 1378 | } |
1345 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], | 1379 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1346 | dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); | 1380 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1347 | return status; | 1381 | return status; |
1348 | } | 1382 | } |
1349 | 1383 | ||
@@ -1357,19 +1391,18 @@ static enum drm_connector_status | |||
1357 | intel_dp_detect(struct drm_connector *connector) | 1391 | intel_dp_detect(struct drm_connector *connector) |
1358 | { | 1392 | { |
1359 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1393 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1360 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1394 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1361 | struct drm_device *dev = intel_encoder->enc.dev; | 1395 | struct drm_device *dev = intel_dp->base.enc.dev; |
1362 | struct drm_i915_private *dev_priv = dev->dev_private; | 1396 | struct drm_i915_private *dev_priv = dev->dev_private; |
1363 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1364 | uint32_t temp, bit; | 1397 | uint32_t temp, bit; |
1365 | enum drm_connector_status status; | 1398 | enum drm_connector_status status; |
1366 | 1399 | ||
1367 | dp_priv->has_audio = false; | 1400 | intel_dp->has_audio = false; |
1368 | 1401 | ||
1369 | if (HAS_PCH_SPLIT(dev)) | 1402 | if (HAS_PCH_SPLIT(dev)) |
1370 | return ironlake_dp_detect(connector); | 1403 | return ironlake_dp_detect(connector); |
1371 | 1404 | ||
1372 | switch (dp_priv->output_reg) { | 1405 | switch (intel_dp->output_reg) { |
1373 | case DP_B: | 1406 | case DP_B: |
1374 | bit = DPB_HOTPLUG_INT_STATUS; | 1407 | bit = DPB_HOTPLUG_INT_STATUS; |
1375 | break; | 1408 | break; |
@@ -1389,11 +1422,11 @@ intel_dp_detect(struct drm_connector *connector) | |||
1389 | return connector_status_disconnected; | 1422 | return connector_status_disconnected; |
1390 | 1423 | ||
1391 | status = connector_status_disconnected; | 1424 | status = connector_status_disconnected; |
1392 | if (intel_dp_aux_native_read(intel_encoder, | 1425 | if (intel_dp_aux_native_read(intel_dp, |
1393 | 0x000, dp_priv->dpcd, | 1426 | 0x000, intel_dp->dpcd, |
1394 | sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) | 1427 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1395 | { | 1428 | { |
1396 | if (dp_priv->dpcd[0] != 0) | 1429 | if (intel_dp->dpcd[0] != 0) |
1397 | status = connector_status_connected; | 1430 | status = connector_status_connected; |
1398 | } | 1431 | } |
1399 | return status; | 1432 | return status; |
@@ -1402,18 +1435,17 @@ intel_dp_detect(struct drm_connector *connector) | |||
1402 | static int intel_dp_get_modes(struct drm_connector *connector) | 1435 | static int intel_dp_get_modes(struct drm_connector *connector) |
1403 | { | 1436 | { |
1404 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1437 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1405 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1438 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1406 | struct drm_device *dev = intel_encoder->enc.dev; | 1439 | struct drm_device *dev = intel_dp->base.enc.dev; |
1407 | struct drm_i915_private *dev_priv = dev->dev_private; | 1440 | struct drm_i915_private *dev_priv = dev->dev_private; |
1408 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | ||
1409 | int ret; | 1441 | int ret; |
1410 | 1442 | ||
1411 | /* We should parse the EDID data and find out if it has an audio sink | 1443 | /* We should parse the EDID data and find out if it has an audio sink |
1412 | */ | 1444 | */ |
1413 | 1445 | ||
1414 | ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 1446 | ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); |
1415 | if (ret) { | 1447 | if (ret) { |
1416 | if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && | 1448 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && |
1417 | !dev_priv->panel_fixed_mode) { | 1449 | !dev_priv->panel_fixed_mode) { |
1418 | struct drm_display_mode *newmode; | 1450 | struct drm_display_mode *newmode; |
1419 | list_for_each_entry(newmode, &connector->probed_modes, | 1451 | list_for_each_entry(newmode, &connector->probed_modes, |
@@ -1430,7 +1462,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1430 | } | 1462 | } |
1431 | 1463 | ||
1432 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1464 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1433 | if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { | 1465 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { |
1434 | if (dev_priv->panel_fixed_mode != NULL) { | 1466 | if (dev_priv->panel_fixed_mode != NULL) { |
1435 | struct drm_display_mode *mode; | 1467 | struct drm_display_mode *mode; |
1436 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1468 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1452,9 +1484,9 @@ intel_dp_destroy (struct drm_connector *connector) | |||
1452 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { | 1484 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
1453 | .dpms = intel_dp_dpms, | 1485 | .dpms = intel_dp_dpms, |
1454 | .mode_fixup = intel_dp_mode_fixup, | 1486 | .mode_fixup = intel_dp_mode_fixup, |
1455 | .prepare = intel_encoder_prepare, | 1487 | .prepare = intel_dp_prepare, |
1456 | .mode_set = intel_dp_mode_set, | 1488 | .mode_set = intel_dp_mode_set, |
1457 | .commit = intel_encoder_commit, | 1489 | .commit = intel_dp_commit, |
1458 | }; | 1490 | }; |
1459 | 1491 | ||
1460 | static const struct drm_connector_funcs intel_dp_connector_funcs = { | 1492 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
@@ -1470,27 +1502,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = | |||
1470 | .best_encoder = intel_attached_encoder, | 1502 | .best_encoder = intel_attached_encoder, |
1471 | }; | 1503 | }; |
1472 | 1504 | ||
1473 | static void intel_dp_enc_destroy(struct drm_encoder *encoder) | ||
1474 | { | ||
1475 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1476 | |||
1477 | if (intel_encoder->i2c_bus) | ||
1478 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
1479 | drm_encoder_cleanup(encoder); | ||
1480 | kfree(intel_encoder); | ||
1481 | } | ||
1482 | |||
1483 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { | 1505 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
1484 | .destroy = intel_dp_enc_destroy, | 1506 | .destroy = intel_encoder_destroy, |
1485 | }; | 1507 | }; |
1486 | 1508 | ||
1487 | void | 1509 | void |
1488 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) | 1510 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
1489 | { | 1511 | { |
1490 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1512 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
1491 | 1513 | ||
1492 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1514 | if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) |
1493 | intel_dp_check_link_status(intel_encoder); | 1515 | intel_dp_check_link_status(intel_dp); |
1494 | } | 1516 | } |
1495 | 1517 | ||
1496 | /* Return which DP Port should be selected for Transcoder DP control */ | 1518 | /* Return which DP Port should be selected for Transcoder DP control */ |
@@ -1500,18 +1522,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) | |||
1500 | struct drm_device *dev = crtc->dev; | 1522 | struct drm_device *dev = crtc->dev; |
1501 | struct drm_mode_config *mode_config = &dev->mode_config; | 1523 | struct drm_mode_config *mode_config = &dev->mode_config; |
1502 | struct drm_encoder *encoder; | 1524 | struct drm_encoder *encoder; |
1503 | struct intel_encoder *intel_encoder = NULL; | ||
1504 | 1525 | ||
1505 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | 1526 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { |
1527 | struct intel_dp *intel_dp; | ||
1528 | |||
1506 | if (encoder->crtc != crtc) | 1529 | if (encoder->crtc != crtc) |
1507 | continue; | 1530 | continue; |
1508 | 1531 | ||
1509 | intel_encoder = enc_to_intel_encoder(encoder); | 1532 | intel_dp = enc_to_intel_dp(encoder); |
1510 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { | 1533 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) |
1511 | struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; | 1534 | return intel_dp->output_reg; |
1512 | return dp_priv->output_reg; | ||
1513 | } | ||
1514 | } | 1535 | } |
1536 | |||
1515 | return -1; | 1537 | return -1; |
1516 | } | 1538 | } |
1517 | 1539 | ||
@@ -1540,30 +1562,28 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1540 | { | 1562 | { |
1541 | struct drm_i915_private *dev_priv = dev->dev_private; | 1563 | struct drm_i915_private *dev_priv = dev->dev_private; |
1542 | struct drm_connector *connector; | 1564 | struct drm_connector *connector; |
1565 | struct intel_dp *intel_dp; | ||
1543 | struct intel_encoder *intel_encoder; | 1566 | struct intel_encoder *intel_encoder; |
1544 | struct intel_connector *intel_connector; | 1567 | struct intel_connector *intel_connector; |
1545 | struct intel_dp_priv *dp_priv; | ||
1546 | const char *name = NULL; | 1568 | const char *name = NULL; |
1547 | int type; | 1569 | int type; |
1548 | 1570 | ||
1549 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + | 1571 | intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); |
1550 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1572 | if (!intel_dp) |
1551 | if (!intel_encoder) | ||
1552 | return; | 1573 | return; |
1553 | 1574 | ||
1554 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1575 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1555 | if (!intel_connector) { | 1576 | if (!intel_connector) { |
1556 | kfree(intel_encoder); | 1577 | kfree(intel_dp); |
1557 | return; | 1578 | return; |
1558 | } | 1579 | } |
1580 | intel_encoder = &intel_dp->base; | ||
1559 | 1581 | ||
1560 | dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); | 1582 | if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) |
1561 | |||
1562 | if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) | ||
1563 | if (intel_dpd_is_edp(dev)) | 1583 | if (intel_dpd_is_edp(dev)) |
1564 | dp_priv->is_pch_edp = true; | 1584 | intel_dp->is_pch_edp = true; |
1565 | 1585 | ||
1566 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { | 1586 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { |
1567 | type = DRM_MODE_CONNECTOR_eDP; | 1587 | type = DRM_MODE_CONNECTOR_eDP; |
1568 | intel_encoder->type = INTEL_OUTPUT_EDP; | 1588 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1569 | } else { | 1589 | } else { |
@@ -1584,18 +1604,16 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1584 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1604 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1585 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1605 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1586 | 1606 | ||
1587 | if (IS_eDP(intel_encoder)) | 1607 | if (IS_eDP(intel_dp)) |
1588 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1608 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1589 | 1609 | ||
1590 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1610 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1591 | connector->interlace_allowed = true; | 1611 | connector->interlace_allowed = true; |
1592 | connector->doublescan_allowed = 0; | 1612 | connector->doublescan_allowed = 0; |
1593 | 1613 | ||
1594 | dp_priv->intel_encoder = intel_encoder; | 1614 | intel_dp->output_reg = output_reg; |
1595 | dp_priv->output_reg = output_reg; | 1615 | intel_dp->has_audio = false; |
1596 | dp_priv->has_audio = false; | 1616 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; |
1597 | dp_priv->dpms_mode = DRM_MODE_DPMS_ON; | ||
1598 | intel_encoder->dev_priv = dp_priv; | ||
1599 | 1617 | ||
1600 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, | 1618 | drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, |
1601 | DRM_MODE_ENCODER_TMDS); | 1619 | DRM_MODE_ENCODER_TMDS); |
@@ -1630,12 +1648,12 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1630 | break; | 1648 | break; |
1631 | } | 1649 | } |
1632 | 1650 | ||
1633 | intel_dp_i2c_init(intel_encoder, intel_connector, name); | 1651 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
1634 | 1652 | ||
1635 | intel_encoder->ddc_bus = &dp_priv->adapter; | 1653 | intel_encoder->ddc_bus = &intel_dp->adapter; |
1636 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1654 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1637 | 1655 | ||
1638 | if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { | 1656 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { |
1639 | /* initialize panel mode from VBT if available for eDP */ | 1657 | /* initialize panel mode from VBT if available for eDP */ |
1640 | if (dev_priv->lfp_lvds_vbt_mode) { | 1658 | if (dev_priv->lfp_lvds_vbt_mode) { |
1641 | dev_priv->panel_fixed_mode = | 1659 | dev_priv->panel_fixed_mode = |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b2190148703a..0e92aa07b382 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -32,6 +32,20 @@ | |||
32 | #include "drm_crtc.h" | 32 | #include "drm_crtc.h" |
33 | 33 | ||
34 | #include "drm_crtc_helper.h" | 34 | #include "drm_crtc_helper.h" |
35 | |||
36 | #define wait_for(COND, MS, W) ({ \ | ||
37 | unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ | ||
38 | int ret__ = 0; \ | ||
39 | while (! (COND)) { \ | ||
40 | if (time_after(jiffies, timeout__)) { \ | ||
41 | ret__ = -ETIMEDOUT; \ | ||
42 | break; \ | ||
43 | } \ | ||
44 | if (W) msleep(W); \ | ||
45 | } \ | ||
46 | ret__; \ | ||
47 | }) | ||
48 | |||
35 | /* | 49 | /* |
36 | * Display related stuff | 50 | * Display related stuff |
37 | */ | 51 | */ |
@@ -102,7 +116,6 @@ struct intel_encoder { | |||
102 | struct i2c_adapter *ddc_bus; | 116 | struct i2c_adapter *ddc_bus; |
103 | bool load_detect_temp; | 117 | bool load_detect_temp; |
104 | bool needs_tv_clock; | 118 | bool needs_tv_clock; |
105 | void *dev_priv; | ||
106 | void (*hot_plug)(struct intel_encoder *); | 119 | void (*hot_plug)(struct intel_encoder *); |
107 | int crtc_mask; | 120 | int crtc_mask; |
108 | int clone_mask; | 121 | int clone_mask; |
@@ -110,7 +123,6 @@ struct intel_encoder { | |||
110 | 123 | ||
111 | struct intel_connector { | 124 | struct intel_connector { |
112 | struct drm_connector base; | 125 | struct drm_connector base; |
113 | void *dev_priv; | ||
114 | }; | 126 | }; |
115 | 127 | ||
116 | struct intel_crtc; | 128 | struct intel_crtc; |
@@ -156,7 +168,7 @@ struct intel_crtc { | |||
156 | uint32_t cursor_addr; | 168 | uint32_t cursor_addr; |
157 | int16_t cursor_x, cursor_y; | 169 | int16_t cursor_x, cursor_y; |
158 | int16_t cursor_width, cursor_height; | 170 | int16_t cursor_width, cursor_height; |
159 | bool cursor_visble; | 171 | bool cursor_visible, cursor_on; |
160 | }; | 172 | }; |
161 | 173 | ||
162 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 174 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -188,10 +200,18 @@ extern bool intel_dpd_is_edp(struct drm_device *dev); | |||
188 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 200 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
189 | 201 | ||
190 | 202 | ||
203 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | ||
204 | struct drm_display_mode *adjusted_mode); | ||
205 | extern void intel_pch_panel_fitting(struct drm_device *dev, | ||
206 | int fitting_mode, | ||
207 | struct drm_display_mode *mode, | ||
208 | struct drm_display_mode *adjusted_mode); | ||
209 | |||
191 | extern int intel_panel_fitter_pipe (struct drm_device *dev); | 210 | extern int intel_panel_fitter_pipe (struct drm_device *dev); |
192 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); | 211 | extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
193 | extern void intel_encoder_prepare (struct drm_encoder *encoder); | 212 | extern void intel_encoder_prepare (struct drm_encoder *encoder); |
194 | extern void intel_encoder_commit (struct drm_encoder *encoder); | 213 | extern void intel_encoder_commit (struct drm_encoder *encoder); |
214 | extern void intel_encoder_destroy(struct drm_encoder *encoder); | ||
195 | 215 | ||
196 | extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); | 216 | extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); |
197 | 217 | ||
@@ -199,7 +219,8 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
199 | struct drm_crtc *crtc); | 219 | struct drm_crtc *crtc); |
200 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 220 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
201 | struct drm_file *file_priv); | 221 | struct drm_file *file_priv); |
202 | extern void intel_wait_for_vblank(struct drm_device *dev); | 222 | extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); |
223 | extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); | ||
203 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); | 224 | extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); |
204 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, | 225 | extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
205 | struct drm_connector *connector, | 226 | struct drm_connector *connector, |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 227feca7cf8d..a399f4b2c1c5 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #define CH7xxx_ADDR 0x76 | 38 | #define CH7xxx_ADDR 0x76 |
39 | #define TFP410_ADDR 0x38 | 39 | #define TFP410_ADDR 0x38 |
40 | 40 | ||
41 | static struct intel_dvo_device intel_dvo_devices[] = { | 41 | static const struct intel_dvo_device intel_dvo_devices[] = { |
42 | { | 42 | { |
43 | .type = INTEL_DVO_CHIP_TMDS, | 43 | .type = INTEL_DVO_CHIP_TMDS, |
44 | .name = "sil164", | 44 | .name = "sil164", |
@@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = { | |||
77 | } | 77 | } |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct intel_dvo { | ||
81 | struct intel_encoder base; | ||
82 | |||
83 | struct intel_dvo_device dev; | ||
84 | |||
85 | struct drm_display_mode *panel_fixed_mode; | ||
86 | bool panel_wants_dither; | ||
87 | }; | ||
88 | |||
89 | static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) | ||
90 | { | ||
91 | return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); | ||
92 | } | ||
93 | |||
80 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) | 94 | static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) |
81 | { | 95 | { |
82 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 96 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
83 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 97 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
84 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 98 | u32 dvo_reg = intel_dvo->dev.dvo_reg; |
85 | u32 dvo_reg = dvo->dvo_reg; | ||
86 | u32 temp = I915_READ(dvo_reg); | 99 | u32 temp = I915_READ(dvo_reg); |
87 | 100 | ||
88 | if (mode == DRM_MODE_DPMS_ON) { | 101 | if (mode == DRM_MODE_DPMS_ON) { |
89 | I915_WRITE(dvo_reg, temp | DVO_ENABLE); | 102 | I915_WRITE(dvo_reg, temp | DVO_ENABLE); |
90 | I915_READ(dvo_reg); | 103 | I915_READ(dvo_reg); |
91 | dvo->dev_ops->dpms(dvo, mode); | 104 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); |
92 | } else { | 105 | } else { |
93 | dvo->dev_ops->dpms(dvo, mode); | 106 | intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); |
94 | I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); | 107 | I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); |
95 | I915_READ(dvo_reg); | 108 | I915_READ(dvo_reg); |
96 | } | 109 | } |
@@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, | |||
100 | struct drm_display_mode *mode) | 113 | struct drm_display_mode *mode) |
101 | { | 114 | { |
102 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 115 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
103 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 116 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
104 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
105 | 117 | ||
106 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 118 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
107 | return MODE_NO_DBLESCAN; | 119 | return MODE_NO_DBLESCAN; |
108 | 120 | ||
109 | /* XXX: Validate clock range */ | 121 | /* XXX: Validate clock range */ |
110 | 122 | ||
111 | if (dvo->panel_fixed_mode) { | 123 | if (intel_dvo->panel_fixed_mode) { |
112 | if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) | 124 | if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay) |
113 | return MODE_PANEL; | 125 | return MODE_PANEL; |
114 | if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) | 126 | if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) |
115 | return MODE_PANEL; | 127 | return MODE_PANEL; |
116 | } | 128 | } |
117 | 129 | ||
118 | return dvo->dev_ops->mode_valid(dvo, mode); | 130 | return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); |
119 | } | 131 | } |
120 | 132 | ||
121 | static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | 133 | static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, |
122 | struct drm_display_mode *mode, | 134 | struct drm_display_mode *mode, |
123 | struct drm_display_mode *adjusted_mode) | 135 | struct drm_display_mode *adjusted_mode) |
124 | { | 136 | { |
125 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 137 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
126 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
127 | 138 | ||
128 | /* If we have timings from the BIOS for the panel, put them in | 139 | /* If we have timings from the BIOS for the panel, put them in |
129 | * to the adjusted mode. The CRTC will be set up for this mode, | 140 | * to the adjusted mode. The CRTC will be set up for this mode, |
130 | * with the panel scaling set up to source from the H/VDisplay | 141 | * with the panel scaling set up to source from the H/VDisplay |
131 | * of the original mode. | 142 | * of the original mode. |
132 | */ | 143 | */ |
133 | if (dvo->panel_fixed_mode != NULL) { | 144 | if (intel_dvo->panel_fixed_mode != NULL) { |
134 | #define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x | 145 | #define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x |
135 | C(hdisplay); | 146 | C(hdisplay); |
136 | C(hsync_start); | 147 | C(hsync_start); |
137 | C(hsync_end); | 148 | C(hsync_end); |
@@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, | |||
145 | #undef C | 156 | #undef C |
146 | } | 157 | } |
147 | 158 | ||
148 | if (dvo->dev_ops->mode_fixup) | 159 | if (intel_dvo->dev.dev_ops->mode_fixup) |
149 | return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); | 160 | return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); |
150 | 161 | ||
151 | return true; | 162 | return true; |
152 | } | 163 | } |
@@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
158 | struct drm_device *dev = encoder->dev; | 169 | struct drm_device *dev = encoder->dev; |
159 | struct drm_i915_private *dev_priv = dev->dev_private; | 170 | struct drm_i915_private *dev_priv = dev->dev_private; |
160 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 171 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
161 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 172 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
162 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
163 | int pipe = intel_crtc->pipe; | 173 | int pipe = intel_crtc->pipe; |
164 | u32 dvo_val; | 174 | u32 dvo_val; |
165 | u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; | 175 | u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; |
166 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; | 176 | int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; |
167 | 177 | ||
168 | switch (dvo_reg) { | 178 | switch (dvo_reg) { |
@@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
178 | break; | 188 | break; |
179 | } | 189 | } |
180 | 190 | ||
181 | dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); | 191 | intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); |
182 | 192 | ||
183 | /* Save the data order, since I don't know what it should be set to. */ | 193 | /* Save the data order, since I don't know what it should be set to. */ |
184 | dvo_val = I915_READ(dvo_reg) & | 194 | dvo_val = I915_READ(dvo_reg) & |
@@ -214,40 +224,38 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, | |||
214 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) | 224 | static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) |
215 | { | 225 | { |
216 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 226 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
217 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 227 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
218 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
219 | 228 | ||
220 | return dvo->dev_ops->detect(dvo); | 229 | return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); |
221 | } | 230 | } |
222 | 231 | ||
223 | static int intel_dvo_get_modes(struct drm_connector *connector) | 232 | static int intel_dvo_get_modes(struct drm_connector *connector) |
224 | { | 233 | { |
225 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 234 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
226 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 235 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
227 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | ||
228 | 236 | ||
229 | /* We should probably have an i2c driver get_modes function for those | 237 | /* We should probably have an i2c driver get_modes function for those |
230 | * devices which will have a fixed set of modes determined by the chip | 238 | * devices which will have a fixed set of modes determined by the chip |
231 | * (TV-out, for example), but for now with just TMDS and LVDS, | 239 | * (TV-out, for example), but for now with just TMDS and LVDS, |
232 | * that's not the case. | 240 | * that's not the case. |
233 | */ | 241 | */ |
234 | intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 242 | intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); |
235 | if (!list_empty(&connector->probed_modes)) | 243 | if (!list_empty(&connector->probed_modes)) |
236 | return 1; | 244 | return 1; |
237 | 245 | ||
238 | 246 | if (intel_dvo->panel_fixed_mode != NULL) { | |
239 | if (dvo->panel_fixed_mode != NULL) { | ||
240 | struct drm_display_mode *mode; | 247 | struct drm_display_mode *mode; |
241 | mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); | 248 | mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode); |
242 | if (mode) { | 249 | if (mode) { |
243 | drm_mode_probed_add(connector, mode); | 250 | drm_mode_probed_add(connector, mode); |
244 | return 1; | 251 | return 1; |
245 | } | 252 | } |
246 | } | 253 | } |
254 | |||
247 | return 0; | 255 | return 0; |
248 | } | 256 | } |
249 | 257 | ||
250 | static void intel_dvo_destroy (struct drm_connector *connector) | 258 | static void intel_dvo_destroy(struct drm_connector *connector) |
251 | { | 259 | { |
252 | drm_sysfs_connector_remove(connector); | 260 | drm_sysfs_connector_remove(connector); |
253 | drm_connector_cleanup(connector); | 261 | drm_connector_cleanup(connector); |
@@ -277,28 +285,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs | |||
277 | 285 | ||
278 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) | 286 | static void intel_dvo_enc_destroy(struct drm_encoder *encoder) |
279 | { | 287 | { |
280 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 288 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
281 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 289 | |
282 | 290 | if (intel_dvo->dev.dev_ops->destroy) | |
283 | if (dvo) { | 291 | intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); |
284 | if (dvo->dev_ops->destroy) | 292 | |
285 | dvo->dev_ops->destroy(dvo); | 293 | kfree(intel_dvo->panel_fixed_mode); |
286 | if (dvo->panel_fixed_mode) | 294 | |
287 | kfree(dvo->panel_fixed_mode); | 295 | intel_encoder_destroy(encoder); |
288 | } | ||
289 | if (intel_encoder->i2c_bus) | ||
290 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
291 | if (intel_encoder->ddc_bus) | ||
292 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
293 | drm_encoder_cleanup(encoder); | ||
294 | kfree(intel_encoder); | ||
295 | } | 296 | } |
296 | 297 | ||
297 | static const struct drm_encoder_funcs intel_dvo_enc_funcs = { | 298 | static const struct drm_encoder_funcs intel_dvo_enc_funcs = { |
298 | .destroy = intel_dvo_enc_destroy, | 299 | .destroy = intel_dvo_enc_destroy, |
299 | }; | 300 | }; |
300 | 301 | ||
301 | |||
302 | /** | 302 | /** |
303 | * Attempts to get a fixed panel timing for LVDS (currently only the i830). | 303 | * Attempts to get a fixed panel timing for LVDS (currently only the i830). |
304 | * | 304 | * |
@@ -306,15 +306,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = { | |||
306 | * chip being on DVOB/C and having multiple pipes. | 306 | * chip being on DVOB/C and having multiple pipes. |
307 | */ | 307 | */ |
308 | static struct drm_display_mode * | 308 | static struct drm_display_mode * |
309 | intel_dvo_get_current_mode (struct drm_connector *connector) | 309 | intel_dvo_get_current_mode(struct drm_connector *connector) |
310 | { | 310 | { |
311 | struct drm_device *dev = connector->dev; | 311 | struct drm_device *dev = connector->dev; |
312 | struct drm_i915_private *dev_priv = dev->dev_private; | 312 | struct drm_i915_private *dev_priv = dev->dev_private; |
313 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 313 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
314 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 314 | struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); |
315 | struct intel_dvo_device *dvo = intel_encoder->dev_priv; | 315 | uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); |
316 | uint32_t dvo_reg = dvo->dvo_reg; | ||
317 | uint32_t dvo_val = I915_READ(dvo_reg); | ||
318 | struct drm_display_mode *mode = NULL; | 316 | struct drm_display_mode *mode = NULL; |
319 | 317 | ||
320 | /* If the DVO port is active, that'll be the LVDS, so we can pull out | 318 | /* If the DVO port is active, that'll be the LVDS, so we can pull out |
@@ -327,7 +325,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
327 | crtc = intel_get_crtc_from_pipe(dev, pipe); | 325 | crtc = intel_get_crtc_from_pipe(dev, pipe); |
328 | if (crtc) { | 326 | if (crtc) { |
329 | mode = intel_crtc_mode_get(dev, crtc); | 327 | mode = intel_crtc_mode_get(dev, crtc); |
330 | |||
331 | if (mode) { | 328 | if (mode) { |
332 | mode->type |= DRM_MODE_TYPE_PREFERRED; | 329 | mode->type |= DRM_MODE_TYPE_PREFERRED; |
333 | if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) | 330 | if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) |
@@ -337,28 +334,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector) | |||
337 | } | 334 | } |
338 | } | 335 | } |
339 | } | 336 | } |
337 | |||
340 | return mode; | 338 | return mode; |
341 | } | 339 | } |
342 | 340 | ||
343 | void intel_dvo_init(struct drm_device *dev) | 341 | void intel_dvo_init(struct drm_device *dev) |
344 | { | 342 | { |
345 | struct intel_encoder *intel_encoder; | 343 | struct intel_encoder *intel_encoder; |
344 | struct intel_dvo *intel_dvo; | ||
346 | struct intel_connector *intel_connector; | 345 | struct intel_connector *intel_connector; |
347 | struct intel_dvo_device *dvo; | ||
348 | struct i2c_adapter *i2cbus = NULL; | 346 | struct i2c_adapter *i2cbus = NULL; |
349 | int ret = 0; | 347 | int ret = 0; |
350 | int i; | 348 | int i; |
351 | int encoder_type = DRM_MODE_ENCODER_NONE; | 349 | int encoder_type = DRM_MODE_ENCODER_NONE; |
352 | intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); | 350 | |
353 | if (!intel_encoder) | 351 | intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); |
352 | if (!intel_dvo) | ||
354 | return; | 353 | return; |
355 | 354 | ||
356 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 355 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
357 | if (!intel_connector) { | 356 | if (!intel_connector) { |
358 | kfree(intel_encoder); | 357 | kfree(intel_dvo); |
359 | return; | 358 | return; |
360 | } | 359 | } |
361 | 360 | ||
361 | intel_encoder = &intel_dvo->base; | ||
362 | |||
362 | /* Set up the DDC bus */ | 363 | /* Set up the DDC bus */ |
363 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); | 364 | intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); |
364 | if (!intel_encoder->ddc_bus) | 365 | if (!intel_encoder->ddc_bus) |
@@ -367,10 +368,9 @@ void intel_dvo_init(struct drm_device *dev) | |||
367 | /* Now, try to find a controller */ | 368 | /* Now, try to find a controller */ |
368 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { | 369 | for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { |
369 | struct drm_connector *connector = &intel_connector->base; | 370 | struct drm_connector *connector = &intel_connector->base; |
371 | const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; | ||
370 | int gpio; | 372 | int gpio; |
371 | 373 | ||
372 | dvo = &intel_dvo_devices[i]; | ||
373 | |||
374 | /* Allow the I2C driver info to specify the GPIO to be used in | 374 | /* Allow the I2C driver info to specify the GPIO to be used in |
375 | * special cases, but otherwise default to what's defined | 375 | * special cases, but otherwise default to what's defined |
376 | * in the spec. | 376 | * in the spec. |
@@ -393,11 +393,8 @@ void intel_dvo_init(struct drm_device *dev) | |||
393 | continue; | 393 | continue; |
394 | } | 394 | } |
395 | 395 | ||
396 | if (dvo->dev_ops!= NULL) | 396 | intel_dvo->dev = *dvo; |
397 | ret = dvo->dev_ops->init(dvo, i2cbus); | 397 | ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); |
398 | else | ||
399 | ret = false; | ||
400 | |||
401 | if (!ret) | 398 | if (!ret) |
402 | continue; | 399 | continue; |
403 | 400 | ||
@@ -429,9 +426,6 @@ void intel_dvo_init(struct drm_device *dev) | |||
429 | connector->interlace_allowed = false; | 426 | connector->interlace_allowed = false; |
430 | connector->doublescan_allowed = false; | 427 | connector->doublescan_allowed = false; |
431 | 428 | ||
432 | intel_encoder->dev_priv = dvo; | ||
433 | intel_encoder->i2c_bus = i2cbus; | ||
434 | |||
435 | drm_encoder_init(dev, &intel_encoder->enc, | 429 | drm_encoder_init(dev, &intel_encoder->enc, |
436 | &intel_dvo_enc_funcs, encoder_type); | 430 | &intel_dvo_enc_funcs, encoder_type); |
437 | drm_encoder_helper_add(&intel_encoder->enc, | 431 | drm_encoder_helper_add(&intel_encoder->enc, |
@@ -447,9 +441,9 @@ void intel_dvo_init(struct drm_device *dev) | |||
447 | * headers, likely), so for now, just get the current | 441 | * headers, likely), so for now, just get the current |
448 | * mode being output through DVO. | 442 | * mode being output through DVO. |
449 | */ | 443 | */ |
450 | dvo->panel_fixed_mode = | 444 | intel_dvo->panel_fixed_mode = |
451 | intel_dvo_get_current_mode(connector); | 445 | intel_dvo_get_current_mode(connector); |
452 | dvo->panel_wants_dither = true; | 446 | intel_dvo->panel_wants_dither = true; |
453 | } | 447 | } |
454 | 448 | ||
455 | drm_sysfs_connector_add(connector); | 449 | drm_sysfs_connector_add(connector); |
@@ -461,6 +455,6 @@ void intel_dvo_init(struct drm_device *dev) | |||
461 | if (i2cbus != NULL) | 455 | if (i2cbus != NULL) |
462 | intel_i2c_destroy(i2cbus); | 456 | intel_i2c_destroy(i2cbus); |
463 | free_intel: | 457 | free_intel: |
464 | kfree(intel_encoder); | 458 | kfree(intel_dvo); |
465 | kfree(intel_connector); | 459 | kfree(intel_connector); |
466 | } | 460 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 197887ed1823..ccd4c97e6524 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -37,11 +37,17 @@ | |||
37 | #include "i915_drm.h" | 37 | #include "i915_drm.h" |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | 39 | ||
40 | struct intel_hdmi_priv { | 40 | struct intel_hdmi { |
41 | struct intel_encoder base; | ||
41 | u32 sdvox_reg; | 42 | u32 sdvox_reg; |
42 | bool has_hdmi_sink; | 43 | bool has_hdmi_sink; |
43 | }; | 44 | }; |
44 | 45 | ||
46 | static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) | ||
47 | { | ||
48 | return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); | ||
49 | } | ||
50 | |||
45 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, | 51 | static void intel_hdmi_mode_set(struct drm_encoder *encoder, |
46 | struct drm_display_mode *mode, | 52 | struct drm_display_mode *mode, |
47 | struct drm_display_mode *adjusted_mode) | 53 | struct drm_display_mode *adjusted_mode) |
@@ -50,8 +56,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
50 | struct drm_i915_private *dev_priv = dev->dev_private; | 56 | struct drm_i915_private *dev_priv = dev->dev_private; |
51 | struct drm_crtc *crtc = encoder->crtc; | 57 | struct drm_crtc *crtc = encoder->crtc; |
52 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 58 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
53 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 59 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
54 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | ||
55 | u32 sdvox; | 60 | u32 sdvox; |
56 | 61 | ||
57 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; | 62 | sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; |
@@ -60,7 +65,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
60 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 65 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
61 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 66 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
62 | 67 | ||
63 | if (hdmi_priv->has_hdmi_sink) { | 68 | if (intel_hdmi->has_hdmi_sink) { |
64 | sdvox |= SDVO_AUDIO_ENABLE; | 69 | sdvox |= SDVO_AUDIO_ENABLE; |
65 | if (HAS_PCH_CPT(dev)) | 70 | if (HAS_PCH_CPT(dev)) |
66 | sdvox |= HDMI_MODE_SELECT; | 71 | sdvox |= HDMI_MODE_SELECT; |
@@ -73,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
73 | sdvox |= SDVO_PIPE_B_SELECT; | 78 | sdvox |= SDVO_PIPE_B_SELECT; |
74 | } | 79 | } |
75 | 80 | ||
76 | I915_WRITE(hdmi_priv->sdvox_reg, sdvox); | 81 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); |
77 | POSTING_READ(hdmi_priv->sdvox_reg); | 82 | POSTING_READ(intel_hdmi->sdvox_reg); |
78 | } | 83 | } |
79 | 84 | ||
80 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | 85 | static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) |
81 | { | 86 | { |
82 | struct drm_device *dev = encoder->dev; | 87 | struct drm_device *dev = encoder->dev; |
83 | struct drm_i915_private *dev_priv = dev->dev_private; | 88 | struct drm_i915_private *dev_priv = dev->dev_private; |
84 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 89 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
85 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | ||
86 | u32 temp; | 90 | u32 temp; |
87 | 91 | ||
88 | temp = I915_READ(hdmi_priv->sdvox_reg); | 92 | temp = I915_READ(intel_hdmi->sdvox_reg); |
89 | 93 | ||
90 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but | 94 | /* HW workaround, need to toggle enable bit off and on for 12bpc, but |
91 | * we do this anyway which shows more stable in testing. | 95 | * we do this anyway which shows more stable in testing. |
92 | */ | 96 | */ |
93 | if (HAS_PCH_SPLIT(dev)) { | 97 | if (HAS_PCH_SPLIT(dev)) { |
94 | I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); | 98 | I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); |
95 | POSTING_READ(hdmi_priv->sdvox_reg); | 99 | POSTING_READ(intel_hdmi->sdvox_reg); |
96 | } | 100 | } |
97 | 101 | ||
98 | if (mode != DRM_MODE_DPMS_ON) { | 102 | if (mode != DRM_MODE_DPMS_ON) { |
@@ -101,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) | |||
101 | temp |= SDVO_ENABLE; | 105 | temp |= SDVO_ENABLE; |
102 | } | 106 | } |
103 | 107 | ||
104 | I915_WRITE(hdmi_priv->sdvox_reg, temp); | 108 | I915_WRITE(intel_hdmi->sdvox_reg, temp); |
105 | POSTING_READ(hdmi_priv->sdvox_reg); | 109 | POSTING_READ(intel_hdmi->sdvox_reg); |
106 | 110 | ||
107 | /* HW workaround, need to write this twice for issue that may result | 111 | /* HW workaround, need to write this twice for issue that may result |
108 | * in first write getting masked. | 112 | * in first write getting masked. |
109 | */ | 113 | */ |
110 | if (HAS_PCH_SPLIT(dev)) { | 114 | if (HAS_PCH_SPLIT(dev)) { |
111 | I915_WRITE(hdmi_priv->sdvox_reg, temp); | 115 | I915_WRITE(intel_hdmi->sdvox_reg, temp); |
112 | POSTING_READ(hdmi_priv->sdvox_reg); | 116 | POSTING_READ(intel_hdmi->sdvox_reg); |
113 | } | 117 | } |
114 | } | 118 | } |
115 | 119 | ||
@@ -138,19 +142,17 @@ static enum drm_connector_status | |||
138 | intel_hdmi_detect(struct drm_connector *connector) | 142 | intel_hdmi_detect(struct drm_connector *connector) |
139 | { | 143 | { |
140 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 144 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
141 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 145 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
142 | struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; | ||
143 | struct edid *edid = NULL; | 146 | struct edid *edid = NULL; |
144 | enum drm_connector_status status = connector_status_disconnected; | 147 | enum drm_connector_status status = connector_status_disconnected; |
145 | 148 | ||
146 | hdmi_priv->has_hdmi_sink = false; | 149 | intel_hdmi->has_hdmi_sink = false; |
147 | edid = drm_get_edid(connector, | 150 | edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); |
148 | intel_encoder->ddc_bus); | ||
149 | 151 | ||
150 | if (edid) { | 152 | if (edid) { |
151 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 153 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
152 | status = connector_status_connected; | 154 | status = connector_status_connected; |
153 | hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); | 155 | intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); |
154 | } | 156 | } |
155 | connector->display_info.raw_edid = NULL; | 157 | connector->display_info.raw_edid = NULL; |
156 | kfree(edid); | 158 | kfree(edid); |
@@ -162,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector) | |||
162 | static int intel_hdmi_get_modes(struct drm_connector *connector) | 164 | static int intel_hdmi_get_modes(struct drm_connector *connector) |
163 | { | 165 | { |
164 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 166 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
165 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 167 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
166 | 168 | ||
167 | /* We should parse the EDID data and find out if it's an HDMI sink so | 169 | /* We should parse the EDID data and find out if it's an HDMI sink so |
168 | * we can send audio to it. | 170 | * we can send audio to it. |
169 | */ | 171 | */ |
170 | 172 | ||
171 | return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 173 | return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); |
172 | } | 174 | } |
173 | 175 | ||
174 | static void intel_hdmi_destroy(struct drm_connector *connector) | 176 | static void intel_hdmi_destroy(struct drm_connector *connector) |
@@ -199,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs | |||
199 | .best_encoder = intel_attached_encoder, | 201 | .best_encoder = intel_attached_encoder, |
200 | }; | 202 | }; |
201 | 203 | ||
202 | static void intel_hdmi_enc_destroy(struct drm_encoder *encoder) | ||
203 | { | ||
204 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
205 | |||
206 | if (intel_encoder->i2c_bus) | ||
207 | intel_i2c_destroy(intel_encoder->i2c_bus); | ||
208 | drm_encoder_cleanup(encoder); | ||
209 | kfree(intel_encoder); | ||
210 | } | ||
211 | |||
212 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | 204 | static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { |
213 | .destroy = intel_hdmi_enc_destroy, | 205 | .destroy = intel_encoder_destroy, |
214 | }; | 206 | }; |
215 | 207 | ||
216 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 208 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
@@ -219,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
219 | struct drm_connector *connector; | 211 | struct drm_connector *connector; |
220 | struct intel_encoder *intel_encoder; | 212 | struct intel_encoder *intel_encoder; |
221 | struct intel_connector *intel_connector; | 213 | struct intel_connector *intel_connector; |
222 | struct intel_hdmi_priv *hdmi_priv; | 214 | struct intel_hdmi *intel_hdmi; |
223 | 215 | ||
224 | intel_encoder = kcalloc(sizeof(struct intel_encoder) + | 216 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); |
225 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 217 | if (!intel_hdmi) |
226 | if (!intel_encoder) | ||
227 | return; | 218 | return; |
228 | 219 | ||
229 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 220 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
230 | if (!intel_connector) { | 221 | if (!intel_connector) { |
231 | kfree(intel_encoder); | 222 | kfree(intel_hdmi); |
232 | return; | 223 | return; |
233 | } | 224 | } |
234 | 225 | ||
235 | hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); | 226 | intel_encoder = &intel_hdmi->base; |
236 | |||
237 | connector = &intel_connector->base; | 227 | connector = &intel_connector->base; |
238 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, | 228 | drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, |
239 | DRM_MODE_CONNECTOR_HDMIA); | 229 | DRM_MODE_CONNECTOR_HDMIA); |
@@ -274,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
274 | if (!intel_encoder->ddc_bus) | 264 | if (!intel_encoder->ddc_bus) |
275 | goto err_connector; | 265 | goto err_connector; |
276 | 266 | ||
277 | hdmi_priv->sdvox_reg = sdvox_reg; | 267 | intel_hdmi->sdvox_reg = sdvox_reg; |
278 | intel_encoder->dev_priv = hdmi_priv; | ||
279 | 268 | ||
280 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, | 269 | drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, |
281 | DRM_MODE_ENCODER_TMDS); | 270 | DRM_MODE_ENCODER_TMDS); |
@@ -298,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
298 | 287 | ||
299 | err_connector: | 288 | err_connector: |
300 | drm_connector_cleanup(connector); | 289 | drm_connector_cleanup(connector); |
301 | kfree(intel_encoder); | 290 | kfree(intel_hdmi); |
302 | kfree(intel_connector); | 291 | kfree(intel_connector); |
303 | 292 | ||
304 | return; | 293 | return; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0a2e60059fb3..b819c1081147 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -41,12 +41,18 @@ | |||
41 | #include <linux/acpi.h> | 41 | #include <linux/acpi.h> |
42 | 42 | ||
43 | /* Private structure for the integrated LVDS support */ | 43 | /* Private structure for the integrated LVDS support */ |
44 | struct intel_lvds_priv { | 44 | struct intel_lvds { |
45 | struct intel_encoder base; | ||
45 | int fitting_mode; | 46 | int fitting_mode; |
46 | u32 pfit_control; | 47 | u32 pfit_control; |
47 | u32 pfit_pgm_ratios; | 48 | u32 pfit_pgm_ratios; |
48 | }; | 49 | }; |
49 | 50 | ||
51 | static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) | ||
52 | { | ||
53 | return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); | ||
54 | } | ||
55 | |||
50 | /** | 56 | /** |
51 | * Sets the backlight level. | 57 | * Sets the backlight level. |
52 | * | 58 | * |
@@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) | |||
90 | static void intel_lvds_set_power(struct drm_device *dev, bool on) | 96 | static void intel_lvds_set_power(struct drm_device *dev, bool on) |
91 | { | 97 | { |
92 | struct drm_i915_private *dev_priv = dev->dev_private; | 98 | struct drm_i915_private *dev_priv = dev->dev_private; |
93 | u32 pp_status, ctl_reg, status_reg, lvds_reg; | 99 | u32 ctl_reg, status_reg, lvds_reg; |
94 | 100 | ||
95 | if (HAS_PCH_SPLIT(dev)) { | 101 | if (HAS_PCH_SPLIT(dev)) { |
96 | ctl_reg = PCH_PP_CONTROL; | 102 | ctl_reg = PCH_PP_CONTROL; |
@@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) | |||
108 | 114 | ||
109 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | | 115 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | |
110 | POWER_TARGET_ON); | 116 | POWER_TARGET_ON); |
111 | do { | 117 | if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0)) |
112 | pp_status = I915_READ(status_reg); | 118 | DRM_ERROR("timed out waiting to enable LVDS pipe"); |
113 | } while ((pp_status & PP_ON) == 0); | ||
114 | 119 | ||
115 | intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); | 120 | intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); |
116 | } else { | 121 | } else { |
@@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) | |||
118 | 123 | ||
119 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & | 124 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & |
120 | ~POWER_TARGET_ON); | 125 | ~POWER_TARGET_ON); |
121 | do { | 126 | if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) |
122 | pp_status = I915_READ(status_reg); | 127 | DRM_ERROR("timed out waiting for LVDS pipe to turn off"); |
123 | } while (pp_status & PP_ON); | ||
124 | 128 | ||
125 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | 129 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); |
126 | POSTING_READ(lvds_reg); | 130 | POSTING_READ(lvds_reg); |
@@ -219,9 +223,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
219 | struct drm_device *dev = encoder->dev; | 223 | struct drm_device *dev = encoder->dev; |
220 | struct drm_i915_private *dev_priv = dev->dev_private; | 224 | struct drm_i915_private *dev_priv = dev->dev_private; |
221 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 225 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
226 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); | ||
222 | struct drm_encoder *tmp_encoder; | 227 | struct drm_encoder *tmp_encoder; |
223 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
224 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | ||
225 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; | 228 | u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; |
226 | 229 | ||
227 | /* Should never happen!! */ | 230 | /* Should never happen!! */ |
@@ -241,26 +244,20 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
241 | /* If we don't have a panel mode, there is nothing we can do */ | 244 | /* If we don't have a panel mode, there is nothing we can do */ |
242 | if (dev_priv->panel_fixed_mode == NULL) | 245 | if (dev_priv->panel_fixed_mode == NULL) |
243 | return true; | 246 | return true; |
247 | |||
244 | /* | 248 | /* |
245 | * We have timings from the BIOS for the panel, put them in | 249 | * We have timings from the BIOS for the panel, put them in |
246 | * to the adjusted mode. The CRTC will be set up for this mode, | 250 | * to the adjusted mode. The CRTC will be set up for this mode, |
247 | * with the panel scaling set up to source from the H/VDisplay | 251 | * with the panel scaling set up to source from the H/VDisplay |
248 | * of the original mode. | 252 | * of the original mode. |
249 | */ | 253 | */ |
250 | adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; | 254 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); |
251 | adjusted_mode->hsync_start = | 255 | |
252 | dev_priv->panel_fixed_mode->hsync_start; | 256 | if (HAS_PCH_SPLIT(dev)) { |
253 | adjusted_mode->hsync_end = | 257 | intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, |
254 | dev_priv->panel_fixed_mode->hsync_end; | 258 | mode, adjusted_mode); |
255 | adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; | 259 | return true; |
256 | adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; | 260 | } |
257 | adjusted_mode->vsync_start = | ||
258 | dev_priv->panel_fixed_mode->vsync_start; | ||
259 | adjusted_mode->vsync_end = | ||
260 | dev_priv->panel_fixed_mode->vsync_end; | ||
261 | adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; | ||
262 | adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; | ||
263 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
264 | 261 | ||
265 | /* Make sure pre-965s set dither correctly */ | 262 | /* Make sure pre-965s set dither correctly */ |
266 | if (!IS_I965G(dev)) { | 263 | if (!IS_I965G(dev)) { |
@@ -273,10 +270,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
273 | adjusted_mode->vdisplay == mode->vdisplay) | 270 | adjusted_mode->vdisplay == mode->vdisplay) |
274 | goto out; | 271 | goto out; |
275 | 272 | ||
276 | /* full screen scale for now */ | ||
277 | if (HAS_PCH_SPLIT(dev)) | ||
278 | goto out; | ||
279 | |||
280 | /* 965+ wants fuzzy fitting */ | 273 | /* 965+ wants fuzzy fitting */ |
281 | if (IS_I965G(dev)) | 274 | if (IS_I965G(dev)) |
282 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | | 275 | pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | |
@@ -288,12 +281,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
288 | * to register description and PRM. | 281 | * to register description and PRM. |
289 | * Change the value here to see the borders for debugging | 282 | * Change the value here to see the borders for debugging |
290 | */ | 283 | */ |
291 | if (!HAS_PCH_SPLIT(dev)) { | 284 | I915_WRITE(BCLRPAT_A, 0); |
292 | I915_WRITE(BCLRPAT_A, 0); | 285 | I915_WRITE(BCLRPAT_B, 0); |
293 | I915_WRITE(BCLRPAT_B, 0); | ||
294 | } | ||
295 | 286 | ||
296 | switch (lvds_priv->fitting_mode) { | 287 | switch (intel_lvds->fitting_mode) { |
297 | case DRM_MODE_SCALE_CENTER: | 288 | case DRM_MODE_SCALE_CENTER: |
298 | /* | 289 | /* |
299 | * For centered modes, we have to calculate border widths & | 290 | * For centered modes, we have to calculate border widths & |
@@ -378,8 +369,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, | |||
378 | } | 369 | } |
379 | 370 | ||
380 | out: | 371 | out: |
381 | lvds_priv->pfit_control = pfit_control; | 372 | intel_lvds->pfit_control = pfit_control; |
382 | lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; | 373 | intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; |
383 | dev_priv->lvds_border_bits = border; | 374 | dev_priv->lvds_border_bits = border; |
384 | 375 | ||
385 | /* | 376 | /* |
@@ -427,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
427 | { | 418 | { |
428 | struct drm_device *dev = encoder->dev; | 419 | struct drm_device *dev = encoder->dev; |
429 | struct drm_i915_private *dev_priv = dev->dev_private; | 420 | struct drm_i915_private *dev_priv = dev->dev_private; |
430 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 421 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); |
431 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | ||
432 | 422 | ||
433 | /* | 423 | /* |
434 | * The LVDS pin pair will already have been turned on in the | 424 | * The LVDS pin pair will already have been turned on in the |
@@ -444,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
444 | * screen. Should be enabled before the pipe is enabled, according to | 434 | * screen. Should be enabled before the pipe is enabled, according to |
445 | * register description and PRM. | 435 | * register description and PRM. |
446 | */ | 436 | */ |
447 | I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); | 437 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); |
448 | I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); | 438 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); |
449 | } | 439 | } |
450 | 440 | ||
451 | /** | 441 | /** |
@@ -600,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector, | |||
600 | connector->encoder) { | 590 | connector->encoder) { |
601 | struct drm_crtc *crtc = connector->encoder->crtc; | 591 | struct drm_crtc *crtc = connector->encoder->crtc; |
602 | struct drm_encoder *encoder = connector->encoder; | 592 | struct drm_encoder *encoder = connector->encoder; |
603 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 593 | struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); |
604 | struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; | ||
605 | 594 | ||
606 | if (value == DRM_MODE_SCALE_NONE) { | 595 | if (value == DRM_MODE_SCALE_NONE) { |
607 | DRM_DEBUG_KMS("no scaling not supported\n"); | 596 | DRM_DEBUG_KMS("no scaling not supported\n"); |
608 | return 0; | 597 | return 0; |
609 | } | 598 | } |
610 | if (lvds_priv->fitting_mode == value) { | 599 | if (intel_lvds->fitting_mode == value) { |
611 | /* the LVDS scaling property is not changed */ | 600 | /* the LVDS scaling property is not changed */ |
612 | return 0; | 601 | return 0; |
613 | } | 602 | } |
614 | lvds_priv->fitting_mode = value; | 603 | intel_lvds->fitting_mode = value; |
615 | if (crtc && crtc->enabled) { | 604 | if (crtc && crtc->enabled) { |
616 | /* | 605 | /* |
617 | * If the CRTC is enabled, the display will be changed | 606 | * If the CRTC is enabled, the display will be changed |
@@ -647,19 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { | |||
647 | .destroy = intel_lvds_destroy, | 636 | .destroy = intel_lvds_destroy, |
648 | }; | 637 | }; |
649 | 638 | ||
650 | |||
651 | static void intel_lvds_enc_destroy(struct drm_encoder *encoder) | ||
652 | { | ||
653 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
654 | |||
655 | if (intel_encoder->ddc_bus) | ||
656 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
657 | drm_encoder_cleanup(encoder); | ||
658 | kfree(intel_encoder); | ||
659 | } | ||
660 | |||
661 | static const struct drm_encoder_funcs intel_lvds_enc_funcs = { | 639 | static const struct drm_encoder_funcs intel_lvds_enc_funcs = { |
662 | .destroy = intel_lvds_enc_destroy, | 640 | .destroy = intel_encoder_destroy, |
663 | }; | 641 | }; |
664 | 642 | ||
665 | static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) | 643 | static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) |
@@ -843,13 +821,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev) | |||
843 | void intel_lvds_init(struct drm_device *dev) | 821 | void intel_lvds_init(struct drm_device *dev) |
844 | { | 822 | { |
845 | struct drm_i915_private *dev_priv = dev->dev_private; | 823 | struct drm_i915_private *dev_priv = dev->dev_private; |
824 | struct intel_lvds *intel_lvds; | ||
846 | struct intel_encoder *intel_encoder; | 825 | struct intel_encoder *intel_encoder; |
847 | struct intel_connector *intel_connector; | 826 | struct intel_connector *intel_connector; |
848 | struct drm_connector *connector; | 827 | struct drm_connector *connector; |
849 | struct drm_encoder *encoder; | 828 | struct drm_encoder *encoder; |
850 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ | 829 | struct drm_display_mode *scan; /* *modes, *bios_mode; */ |
851 | struct drm_crtc *crtc; | 830 | struct drm_crtc *crtc; |
852 | struct intel_lvds_priv *lvds_priv; | ||
853 | u32 lvds; | 831 | u32 lvds; |
854 | int pipe, gpio = GPIOC; | 832 | int pipe, gpio = GPIOC; |
855 | 833 | ||
@@ -872,20 +850,20 @@ void intel_lvds_init(struct drm_device *dev) | |||
872 | gpio = PCH_GPIOC; | 850 | gpio = PCH_GPIOC; |
873 | } | 851 | } |
874 | 852 | ||
875 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + | 853 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); |
876 | sizeof(struct intel_lvds_priv), GFP_KERNEL); | 854 | if (!intel_lvds) { |
877 | if (!intel_encoder) { | ||
878 | return; | 855 | return; |
879 | } | 856 | } |
880 | 857 | ||
881 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 858 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
882 | if (!intel_connector) { | 859 | if (!intel_connector) { |
883 | kfree(intel_encoder); | 860 | kfree(intel_lvds); |
884 | return; | 861 | return; |
885 | } | 862 | } |
886 | 863 | ||
887 | connector = &intel_connector->base; | 864 | intel_encoder = &intel_lvds->base; |
888 | encoder = &intel_encoder->enc; | 865 | encoder = &intel_encoder->enc; |
866 | connector = &intel_connector->base; | ||
889 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, | 867 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
890 | DRM_MODE_CONNECTOR_LVDS); | 868 | DRM_MODE_CONNECTOR_LVDS); |
891 | 869 | ||
@@ -905,8 +883,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
905 | connector->interlace_allowed = false; | 883 | connector->interlace_allowed = false; |
906 | connector->doublescan_allowed = false; | 884 | connector->doublescan_allowed = false; |
907 | 885 | ||
908 | lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1); | ||
909 | intel_encoder->dev_priv = lvds_priv; | ||
910 | /* create the scaling mode property */ | 886 | /* create the scaling mode property */ |
911 | drm_mode_create_scaling_mode_property(dev); | 887 | drm_mode_create_scaling_mode_property(dev); |
912 | /* | 888 | /* |
@@ -916,7 +892,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
916 | drm_connector_attach_property(&intel_connector->base, | 892 | drm_connector_attach_property(&intel_connector->base, |
917 | dev->mode_config.scaling_mode_property, | 893 | dev->mode_config.scaling_mode_property, |
918 | DRM_MODE_SCALE_ASPECT); | 894 | DRM_MODE_SCALE_ASPECT); |
919 | lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; | 895 | intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; |
920 | /* | 896 | /* |
921 | * LVDS discovery: | 897 | * LVDS discovery: |
922 | * 1) check for EDID on DDC | 898 | * 1) check for EDID on DDC |
@@ -1024,6 +1000,6 @@ failed: | |||
1024 | intel_i2c_destroy(intel_encoder->ddc_bus); | 1000 | intel_i2c_destroy(intel_encoder->ddc_bus); |
1025 | drm_connector_cleanup(connector); | 1001 | drm_connector_cleanup(connector); |
1026 | drm_encoder_cleanup(encoder); | 1002 | drm_encoder_cleanup(encoder); |
1027 | kfree(intel_encoder); | 1003 | kfree(intel_lvds); |
1028 | kfree(intel_connector); | 1004 | kfree(intel_connector); |
1029 | } | 1005 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d39aea24eabe..4f00390d7c61 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -1367,7 +1367,8 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1367 | overlay->flip_addr = overlay->reg_bo->gtt_offset; | 1367 | overlay->flip_addr = overlay->reg_bo->gtt_offset; |
1368 | } else { | 1368 | } else { |
1369 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1369 | ret = i915_gem_attach_phys_object(dev, reg_bo, |
1370 | I915_GEM_PHYS_OVERLAY_REGS); | 1370 | I915_GEM_PHYS_OVERLAY_REGS, |
1371 | 0); | ||
1371 | if (ret) { | 1372 | if (ret) { |
1372 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1373 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1373 | goto out_free_bo; | 1374 | goto out_free_bo; |
@@ -1416,3 +1417,99 @@ void intel_cleanup_overlay(struct drm_device *dev) | |||
1416 | kfree(dev_priv->overlay); | 1417 | kfree(dev_priv->overlay); |
1417 | } | 1418 | } |
1418 | } | 1419 | } |
1420 | |||
1421 | struct intel_overlay_error_state { | ||
1422 | struct overlay_registers regs; | ||
1423 | unsigned long base; | ||
1424 | u32 dovsta; | ||
1425 | u32 isr; | ||
1426 | }; | ||
1427 | |||
1428 | struct intel_overlay_error_state * | ||
1429 | intel_overlay_capture_error_state(struct drm_device *dev) | ||
1430 | { | ||
1431 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1432 | struct intel_overlay *overlay = dev_priv->overlay; | ||
1433 | struct intel_overlay_error_state *error; | ||
1434 | struct overlay_registers __iomem *regs; | ||
1435 | |||
1436 | if (!overlay || !overlay->active) | ||
1437 | return NULL; | ||
1438 | |||
1439 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | ||
1440 | if (error == NULL) | ||
1441 | return NULL; | ||
1442 | |||
1443 | error->dovsta = I915_READ(DOVSTA); | ||
1444 | error->isr = I915_READ(ISR); | ||
1445 | if (OVERLAY_NONPHYSICAL(overlay->dev)) | ||
1446 | error->base = (long) overlay->reg_bo->gtt_offset; | ||
1447 | else | ||
1448 | error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; | ||
1449 | |||
1450 | regs = intel_overlay_map_regs_atomic(overlay); | ||
1451 | if (!regs) | ||
1452 | goto err; | ||
1453 | |||
1454 | memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); | ||
1455 | intel_overlay_unmap_regs_atomic(overlay); | ||
1456 | |||
1457 | return error; | ||
1458 | |||
1459 | err: | ||
1460 | kfree(error); | ||
1461 | return NULL; | ||
1462 | } | ||
1463 | |||
1464 | void | ||
1465 | intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error) | ||
1466 | { | ||
1467 | seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n", | ||
1468 | error->dovsta, error->isr); | ||
1469 | seq_printf(m, " Register file at 0x%08lx:\n", | ||
1470 | error->base); | ||
1471 | |||
1472 | #define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x) | ||
1473 | P(OBUF_0Y); | ||
1474 | P(OBUF_1Y); | ||
1475 | P(OBUF_0U); | ||
1476 | P(OBUF_0V); | ||
1477 | P(OBUF_1U); | ||
1478 | P(OBUF_1V); | ||
1479 | P(OSTRIDE); | ||
1480 | P(YRGB_VPH); | ||
1481 | P(UV_VPH); | ||
1482 | P(HORZ_PH); | ||
1483 | P(INIT_PHS); | ||
1484 | P(DWINPOS); | ||
1485 | P(DWINSZ); | ||
1486 | P(SWIDTH); | ||
1487 | P(SWIDTHSW); | ||
1488 | P(SHEIGHT); | ||
1489 | P(YRGBSCALE); | ||
1490 | P(UVSCALE); | ||
1491 | P(OCLRC0); | ||
1492 | P(OCLRC1); | ||
1493 | P(DCLRKV); | ||
1494 | P(DCLRKM); | ||
1495 | P(SCLRKVH); | ||
1496 | P(SCLRKVL); | ||
1497 | P(SCLRKEN); | ||
1498 | P(OCONFIG); | ||
1499 | P(OCMD); | ||
1500 | P(OSTART_0Y); | ||
1501 | P(OSTART_1Y); | ||
1502 | P(OSTART_0U); | ||
1503 | P(OSTART_0V); | ||
1504 | P(OSTART_1U); | ||
1505 | P(OSTART_1V); | ||
1506 | P(OTILEOFF_0Y); | ||
1507 | P(OTILEOFF_1Y); | ||
1508 | P(OTILEOFF_0U); | ||
1509 | P(OTILEOFF_0V); | ||
1510 | P(OTILEOFF_1U); | ||
1511 | P(OTILEOFF_1V); | ||
1512 | P(FASTHSCALE); | ||
1513 | P(UVSCALEV); | ||
1514 | #undef P | ||
1515 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c new file mode 100644 index 000000000000..e7f5299d9d57 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Copyright © 2006-2010 Intel Corporation | ||
3 | * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | * Authors: | ||
25 | * Eric Anholt <eric@anholt.net> | ||
26 | * Dave Airlie <airlied@linux.ie> | ||
27 | * Jesse Barnes <jesse.barnes@intel.com> | ||
28 | * Chris Wilson <chris@chris-wilson.co.uk> | ||
29 | */ | ||
30 | |||
31 | #include "intel_drv.h" | ||
32 | |||
33 | void | ||
34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | ||
35 | struct drm_display_mode *adjusted_mode) | ||
36 | { | ||
37 | adjusted_mode->hdisplay = fixed_mode->hdisplay; | ||
38 | adjusted_mode->hsync_start = fixed_mode->hsync_start; | ||
39 | adjusted_mode->hsync_end = fixed_mode->hsync_end; | ||
40 | adjusted_mode->htotal = fixed_mode->htotal; | ||
41 | |||
42 | adjusted_mode->vdisplay = fixed_mode->vdisplay; | ||
43 | adjusted_mode->vsync_start = fixed_mode->vsync_start; | ||
44 | adjusted_mode->vsync_end = fixed_mode->vsync_end; | ||
45 | adjusted_mode->vtotal = fixed_mode->vtotal; | ||
46 | |||
47 | adjusted_mode->clock = fixed_mode->clock; | ||
48 | |||
49 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
50 | } | ||
51 | |||
52 | /* adjusted_mode has been preset to be the panel's fixed mode */ | ||
53 | void | ||
54 | intel_pch_panel_fitting(struct drm_device *dev, | ||
55 | int fitting_mode, | ||
56 | struct drm_display_mode *mode, | ||
57 | struct drm_display_mode *adjusted_mode) | ||
58 | { | ||
59 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
60 | int x, y, width, height; | ||
61 | |||
62 | x = y = width = height = 0; | ||
63 | |||
64 | /* Native modes don't need fitting */ | ||
65 | if (adjusted_mode->hdisplay == mode->hdisplay && | ||
66 | adjusted_mode->vdisplay == mode->vdisplay) | ||
67 | goto done; | ||
68 | |||
69 | switch (fitting_mode) { | ||
70 | case DRM_MODE_SCALE_CENTER: | ||
71 | width = mode->hdisplay; | ||
72 | height = mode->vdisplay; | ||
73 | x = (adjusted_mode->hdisplay - width + 1)/2; | ||
74 | y = (adjusted_mode->vdisplay - height + 1)/2; | ||
75 | break; | ||
76 | |||
77 | case DRM_MODE_SCALE_ASPECT: | ||
78 | /* Scale but preserve the aspect ratio */ | ||
79 | { | ||
80 | u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; | ||
81 | u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; | ||
82 | if (scaled_width > scaled_height) { /* pillar */ | ||
83 | width = scaled_height / mode->vdisplay; | ||
84 | x = (adjusted_mode->hdisplay - width + 1) / 2; | ||
85 | y = 0; | ||
86 | height = adjusted_mode->vdisplay; | ||
87 | } else if (scaled_width < scaled_height) { /* letter */ | ||
88 | height = scaled_width / mode->hdisplay; | ||
89 | y = (adjusted_mode->vdisplay - height + 1) / 2; | ||
90 | x = 0; | ||
91 | width = adjusted_mode->hdisplay; | ||
92 | } else { | ||
93 | x = y = 0; | ||
94 | width = adjusted_mode->hdisplay; | ||
95 | height = adjusted_mode->vdisplay; | ||
96 | } | ||
97 | } | ||
98 | break; | ||
99 | |||
100 | default: | ||
101 | case DRM_MODE_SCALE_FULLSCREEN: | ||
102 | x = y = 0; | ||
103 | width = adjusted_mode->hdisplay; | ||
104 | height = adjusted_mode->vdisplay; | ||
105 | break; | ||
106 | } | ||
107 | |||
108 | done: | ||
109 | dev_priv->pch_pf_pos = (x << 16) | y; | ||
110 | dev_priv->pch_pf_size = (width << 16) | height; | ||
111 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 26362f8495a8..51e9c9e718c4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -33,18 +33,35 @@ | |||
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | 35 | ||
36 | static u32 i915_gem_get_seqno(struct drm_device *dev) | ||
37 | { | ||
38 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
39 | u32 seqno; | ||
40 | |||
41 | seqno = dev_priv->next_seqno; | ||
42 | |||
43 | /* reserve 0 for non-seqno */ | ||
44 | if (++dev_priv->next_seqno == 0) | ||
45 | dev_priv->next_seqno = 1; | ||
46 | |||
47 | return seqno; | ||
48 | } | ||
49 | |||
36 | static void | 50 | static void |
37 | render_ring_flush(struct drm_device *dev, | 51 | render_ring_flush(struct drm_device *dev, |
38 | struct intel_ring_buffer *ring, | 52 | struct intel_ring_buffer *ring, |
39 | u32 invalidate_domains, | 53 | u32 invalidate_domains, |
40 | u32 flush_domains) | 54 | u32 flush_domains) |
41 | { | 55 | { |
56 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
57 | u32 cmd; | ||
58 | |||
42 | #if WATCH_EXEC | 59 | #if WATCH_EXEC |
43 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 60 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
44 | invalidate_domains, flush_domains); | 61 | invalidate_domains, flush_domains); |
45 | #endif | 62 | #endif |
46 | u32 cmd; | 63 | |
47 | trace_i915_gem_request_flush(dev, ring->next_seqno, | 64 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, |
48 | invalidate_domains, flush_domains); | 65 | invalidate_domains, flush_domains); |
49 | 66 | ||
50 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 67 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
@@ -233,9 +250,10 @@ render_ring_add_request(struct drm_device *dev, | |||
233 | struct drm_file *file_priv, | 250 | struct drm_file *file_priv, |
234 | u32 flush_domains) | 251 | u32 flush_domains) |
235 | { | 252 | { |
236 | u32 seqno; | ||
237 | drm_i915_private_t *dev_priv = dev->dev_private; | 253 | drm_i915_private_t *dev_priv = dev->dev_private; |
238 | seqno = intel_ring_get_seqno(dev, ring); | 254 | u32 seqno; |
255 | |||
256 | seqno = i915_gem_get_seqno(dev); | ||
239 | 257 | ||
240 | if (IS_GEN6(dev)) { | 258 | if (IS_GEN6(dev)) { |
241 | BEGIN_LP_RING(6); | 259 | BEGIN_LP_RING(6); |
@@ -405,7 +423,9 @@ bsd_ring_add_request(struct drm_device *dev, | |||
405 | u32 flush_domains) | 423 | u32 flush_domains) |
406 | { | 424 | { |
407 | u32 seqno; | 425 | u32 seqno; |
408 | seqno = intel_ring_get_seqno(dev, ring); | 426 | |
427 | seqno = i915_gem_get_seqno(dev); | ||
428 | |||
409 | intel_ring_begin(dev, ring, 4); | 429 | intel_ring_begin(dev, ring, 4); |
410 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 430 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); |
411 | intel_ring_emit(dev, ring, | 431 | intel_ring_emit(dev, ring, |
@@ -479,7 +499,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
479 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 499 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
480 | exec_len = (uint32_t) exec->batch_len; | 500 | exec_len = (uint32_t) exec->batch_len; |
481 | 501 | ||
482 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); | 502 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
483 | 503 | ||
484 | count = nbox ? nbox : 1; | 504 | count = nbox ? nbox : 1; |
485 | 505 | ||
@@ -515,7 +535,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
515 | intel_ring_advance(dev, ring); | 535 | intel_ring_advance(dev, ring); |
516 | } | 536 | } |
517 | 537 | ||
538 | if (IS_G4X(dev) || IS_IRONLAKE(dev)) { | ||
539 | intel_ring_begin(dev, ring, 2); | ||
540 | intel_ring_emit(dev, ring, MI_FLUSH | | ||
541 | MI_NO_WRITE_FLUSH | | ||
542 | MI_INVALIDATE_ISP ); | ||
543 | intel_ring_emit(dev, ring, MI_NOOP); | ||
544 | intel_ring_advance(dev, ring); | ||
545 | } | ||
518 | /* XXX breadcrumb */ | 546 | /* XXX breadcrumb */ |
547 | |||
519 | return 0; | 548 | return 0; |
520 | } | 549 | } |
521 | 550 | ||
@@ -588,9 +617,10 @@ err: | |||
588 | int intel_init_ring_buffer(struct drm_device *dev, | 617 | int intel_init_ring_buffer(struct drm_device *dev, |
589 | struct intel_ring_buffer *ring) | 618 | struct intel_ring_buffer *ring) |
590 | { | 619 | { |
591 | int ret; | ||
592 | struct drm_i915_gem_object *obj_priv; | 620 | struct drm_i915_gem_object *obj_priv; |
593 | struct drm_gem_object *obj; | 621 | struct drm_gem_object *obj; |
622 | int ret; | ||
623 | |||
594 | ring->dev = dev; | 624 | ring->dev = dev; |
595 | 625 | ||
596 | if (I915_NEED_GFX_HWS(dev)) { | 626 | if (I915_NEED_GFX_HWS(dev)) { |
@@ -603,16 +633,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
603 | if (obj == NULL) { | 633 | if (obj == NULL) { |
604 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 634 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
605 | ret = -ENOMEM; | 635 | ret = -ENOMEM; |
606 | goto cleanup; | 636 | goto err_hws; |
607 | } | 637 | } |
608 | 638 | ||
609 | ring->gem_object = obj; | 639 | ring->gem_object = obj; |
610 | 640 | ||
611 | ret = i915_gem_object_pin(obj, ring->alignment); | 641 | ret = i915_gem_object_pin(obj, ring->alignment); |
612 | if (ret != 0) { | 642 | if (ret) |
613 | drm_gem_object_unreference(obj); | 643 | goto err_unref; |
614 | goto cleanup; | ||
615 | } | ||
616 | 644 | ||
617 | obj_priv = to_intel_bo(obj); | 645 | obj_priv = to_intel_bo(obj); |
618 | ring->map.size = ring->size; | 646 | ring->map.size = ring->size; |
@@ -624,18 +652,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
624 | drm_core_ioremap_wc(&ring->map, dev); | 652 | drm_core_ioremap_wc(&ring->map, dev); |
625 | if (ring->map.handle == NULL) { | 653 | if (ring->map.handle == NULL) { |
626 | DRM_ERROR("Failed to map ringbuffer.\n"); | 654 | DRM_ERROR("Failed to map ringbuffer.\n"); |
627 | i915_gem_object_unpin(obj); | ||
628 | drm_gem_object_unreference(obj); | ||
629 | ret = -EINVAL; | 655 | ret = -EINVAL; |
630 | goto cleanup; | 656 | goto err_unpin; |
631 | } | 657 | } |
632 | 658 | ||
633 | ring->virtual_start = ring->map.handle; | 659 | ring->virtual_start = ring->map.handle; |
634 | ret = ring->init(dev, ring); | 660 | ret = ring->init(dev, ring); |
635 | if (ret != 0) { | 661 | if (ret) |
636 | intel_cleanup_ring_buffer(dev, ring); | 662 | goto err_unmap; |
637 | return ret; | ||
638 | } | ||
639 | 663 | ||
640 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 664 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
641 | i915_kernel_lost_context(dev); | 665 | i915_kernel_lost_context(dev); |
@@ -649,7 +673,15 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
649 | INIT_LIST_HEAD(&ring->active_list); | 673 | INIT_LIST_HEAD(&ring->active_list); |
650 | INIT_LIST_HEAD(&ring->request_list); | 674 | INIT_LIST_HEAD(&ring->request_list); |
651 | return ret; | 675 | return ret; |
652 | cleanup: | 676 | |
677 | err_unmap: | ||
678 | drm_core_ioremapfree(&ring->map, dev); | ||
679 | err_unpin: | ||
680 | i915_gem_object_unpin(obj); | ||
681 | err_unref: | ||
682 | drm_gem_object_unreference(obj); | ||
683 | ring->gem_object = NULL; | ||
684 | err_hws: | ||
653 | cleanup_status_page(dev, ring); | 685 | cleanup_status_page(dev, ring); |
654 | return ret; | 686 | return ret; |
655 | } | 687 | } |
@@ -682,9 +714,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
682 | } | 714 | } |
683 | 715 | ||
684 | virt = (unsigned int *)(ring->virtual_start + ring->tail); | 716 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
685 | rem /= 4; | 717 | rem /= 8; |
686 | while (rem--) | 718 | while (rem--) { |
719 | *virt++ = MI_NOOP; | ||
687 | *virt++ = MI_NOOP; | 720 | *virt++ = MI_NOOP; |
721 | } | ||
688 | 722 | ||
689 | ring->tail = 0; | 723 | ring->tail = 0; |
690 | ring->space = ring->head - 8; | 724 | ring->space = ring->head - 8; |
@@ -729,21 +763,14 @@ void intel_ring_begin(struct drm_device *dev, | |||
729 | intel_wrap_ring_buffer(dev, ring); | 763 | intel_wrap_ring_buffer(dev, ring); |
730 | if (unlikely(ring->space < n)) | 764 | if (unlikely(ring->space < n)) |
731 | intel_wait_ring_buffer(dev, ring, n); | 765 | intel_wait_ring_buffer(dev, ring, n); |
732 | } | ||
733 | 766 | ||
734 | void intel_ring_emit(struct drm_device *dev, | 767 | ring->space -= n; |
735 | struct intel_ring_buffer *ring, unsigned int data) | ||
736 | { | ||
737 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
738 | *virt = data; | ||
739 | ring->tail += 4; | ||
740 | ring->tail &= ring->size - 1; | ||
741 | ring->space -= 4; | ||
742 | } | 768 | } |
743 | 769 | ||
744 | void intel_ring_advance(struct drm_device *dev, | 770 | void intel_ring_advance(struct drm_device *dev, |
745 | struct intel_ring_buffer *ring) | 771 | struct intel_ring_buffer *ring) |
746 | { | 772 | { |
773 | ring->tail &= ring->size - 1; | ||
747 | ring->advance_ring(dev, ring); | 774 | ring->advance_ring(dev, ring); |
748 | } | 775 | } |
749 | 776 | ||
@@ -762,18 +789,6 @@ void intel_fill_struct(struct drm_device *dev, | |||
762 | intel_ring_advance(dev, ring); | 789 | intel_ring_advance(dev, ring); |
763 | } | 790 | } |
764 | 791 | ||
765 | u32 intel_ring_get_seqno(struct drm_device *dev, | ||
766 | struct intel_ring_buffer *ring) | ||
767 | { | ||
768 | u32 seqno; | ||
769 | seqno = ring->next_seqno; | ||
770 | |||
771 | /* reserve 0 for non-seqno */ | ||
772 | if (++ring->next_seqno == 0) | ||
773 | ring->next_seqno = 1; | ||
774 | return seqno; | ||
775 | } | ||
776 | |||
777 | struct intel_ring_buffer render_ring = { | 792 | struct intel_ring_buffer render_ring = { |
778 | .name = "render ring", | 793 | .name = "render ring", |
779 | .regs = { | 794 | .regs = { |
@@ -791,7 +806,6 @@ struct intel_ring_buffer render_ring = { | |||
791 | .head = 0, | 806 | .head = 0, |
792 | .tail = 0, | 807 | .tail = 0, |
793 | .space = 0, | 808 | .space = 0, |
794 | .next_seqno = 1, | ||
795 | .user_irq_refcount = 0, | 809 | .user_irq_refcount = 0, |
796 | .irq_gem_seqno = 0, | 810 | .irq_gem_seqno = 0, |
797 | .waiting_gem_seqno = 0, | 811 | .waiting_gem_seqno = 0, |
@@ -830,7 +844,6 @@ struct intel_ring_buffer bsd_ring = { | |||
830 | .head = 0, | 844 | .head = 0, |
831 | .tail = 0, | 845 | .tail = 0, |
832 | .space = 0, | 846 | .space = 0, |
833 | .next_seqno = 1, | ||
834 | .user_irq_refcount = 0, | 847 | .user_irq_refcount = 0, |
835 | .irq_gem_seqno = 0, | 848 | .irq_gem_seqno = 0, |
836 | .waiting_gem_seqno = 0, | 849 | .waiting_gem_seqno = 0, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index d5568d3766de..525e7d3edda8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -26,7 +26,6 @@ struct intel_ring_buffer { | |||
26 | unsigned int head; | 26 | unsigned int head; |
27 | unsigned int tail; | 27 | unsigned int tail; |
28 | unsigned int space; | 28 | unsigned int space; |
29 | u32 next_seqno; | ||
30 | struct intel_hw_status_page status_page; | 29 | struct intel_hw_status_page status_page; |
31 | 30 | ||
32 | u32 irq_gem_seqno; /* last seq seem at irq time */ | 31 | u32 irq_gem_seqno; /* last seq seem at irq time */ |
@@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
106 | struct intel_ring_buffer *ring); | 105 | struct intel_ring_buffer *ring); |
107 | void intel_ring_begin(struct drm_device *dev, | 106 | void intel_ring_begin(struct drm_device *dev, |
108 | struct intel_ring_buffer *ring, int n); | 107 | struct intel_ring_buffer *ring, int n); |
109 | void intel_ring_emit(struct drm_device *dev, | 108 | |
110 | struct intel_ring_buffer *ring, u32 data); | 109 | static inline void intel_ring_emit(struct drm_device *dev, |
110 | struct intel_ring_buffer *ring, | ||
111 | unsigned int data) | ||
112 | { | ||
113 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
114 | *virt = data; | ||
115 | ring->tail += 4; | ||
116 | } | ||
117 | |||
111 | void intel_fill_struct(struct drm_device *dev, | 118 | void intel_fill_struct(struct drm_device *dev, |
112 | struct intel_ring_buffer *ring, | 119 | struct intel_ring_buffer *ring, |
113 | void *data, | 120 | void *data, |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d9d4d51aa89e..093e914e8a41 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -31,8 +31,8 @@ | |||
31 | #include "drmP.h" | 31 | #include "drmP.h" |
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "drm_crtc.h" | 33 | #include "drm_crtc.h" |
34 | #include "intel_drv.h" | ||
35 | #include "drm_edid.h" | 34 | #include "drm_edid.h" |
35 | #include "intel_drv.h" | ||
36 | #include "i915_drm.h" | 36 | #include "i915_drm.h" |
37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
38 | #include "intel_sdvo_regs.h" | 38 | #include "intel_sdvo_regs.h" |
@@ -47,9 +47,10 @@ | |||
47 | 47 | ||
48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) | 48 | #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) |
49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) | 49 | #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) |
50 | #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) | ||
50 | 51 | ||
51 | 52 | ||
52 | static char *tv_format_names[] = { | 53 | static const char *tv_format_names[] = { |
53 | "NTSC_M" , "NTSC_J" , "NTSC_443", | 54 | "NTSC_M" , "NTSC_J" , "NTSC_443", |
54 | "PAL_B" , "PAL_D" , "PAL_G" , | 55 | "PAL_B" , "PAL_D" , "PAL_G" , |
55 | "PAL_H" , "PAL_I" , "PAL_M" , | 56 | "PAL_H" , "PAL_I" , "PAL_M" , |
@@ -61,7 +62,9 @@ static char *tv_format_names[] = { | |||
61 | 62 | ||
62 | #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) | 63 | #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) |
63 | 64 | ||
64 | struct intel_sdvo_priv { | 65 | struct intel_sdvo { |
66 | struct intel_encoder base; | ||
67 | |||
65 | u8 slave_addr; | 68 | u8 slave_addr; |
66 | 69 | ||
67 | /* Register for the SDVO device: SDVOB or SDVOC */ | 70 | /* Register for the SDVO device: SDVOB or SDVOC */ |
@@ -95,7 +98,7 @@ struct intel_sdvo_priv { | |||
95 | bool is_tv; | 98 | bool is_tv; |
96 | 99 | ||
97 | /* This is for current tv format name */ | 100 | /* This is for current tv format name */ |
98 | char *tv_format_name; | 101 | int tv_format_index; |
99 | 102 | ||
100 | /** | 103 | /** |
101 | * This is set if we treat the device as HDMI, instead of DVI. | 104 | * This is set if we treat the device as HDMI, instead of DVI. |
@@ -132,37 +135,40 @@ struct intel_sdvo_priv { | |||
132 | }; | 135 | }; |
133 | 136 | ||
134 | struct intel_sdvo_connector { | 137 | struct intel_sdvo_connector { |
138 | struct intel_connector base; | ||
139 | |||
135 | /* Mark the type of connector */ | 140 | /* Mark the type of connector */ |
136 | uint16_t output_flag; | 141 | uint16_t output_flag; |
137 | 142 | ||
138 | /* This contains all current supported TV format */ | 143 | /* This contains all current supported TV format */ |
139 | char *tv_format_supported[TV_FORMAT_NUM]; | 144 | u8 tv_format_supported[TV_FORMAT_NUM]; |
140 | int format_supported_num; | 145 | int format_supported_num; |
141 | struct drm_property *tv_format_property; | 146 | struct drm_property *tv_format; |
142 | struct drm_property *tv_format_name_property[TV_FORMAT_NUM]; | ||
143 | |||
144 | /** | ||
145 | * Returned SDTV resolutions allowed for the current format, if the | ||
146 | * device reported it. | ||
147 | */ | ||
148 | struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions; | ||
149 | 147 | ||
150 | /* add the property for the SDVO-TV */ | 148 | /* add the property for the SDVO-TV */ |
151 | struct drm_property *left_property; | 149 | struct drm_property *left; |
152 | struct drm_property *right_property; | 150 | struct drm_property *right; |
153 | struct drm_property *top_property; | 151 | struct drm_property *top; |
154 | struct drm_property *bottom_property; | 152 | struct drm_property *bottom; |
155 | struct drm_property *hpos_property; | 153 | struct drm_property *hpos; |
156 | struct drm_property *vpos_property; | 154 | struct drm_property *vpos; |
155 | struct drm_property *contrast; | ||
156 | struct drm_property *saturation; | ||
157 | struct drm_property *hue; | ||
158 | struct drm_property *sharpness; | ||
159 | struct drm_property *flicker_filter; | ||
160 | struct drm_property *flicker_filter_adaptive; | ||
161 | struct drm_property *flicker_filter_2d; | ||
162 | struct drm_property *tv_chroma_filter; | ||
163 | struct drm_property *tv_luma_filter; | ||
164 | struct drm_property *dot_crawl; | ||
157 | 165 | ||
158 | /* add the property for the SDVO-TV/LVDS */ | 166 | /* add the property for the SDVO-TV/LVDS */ |
159 | struct drm_property *brightness_property; | 167 | struct drm_property *brightness; |
160 | struct drm_property *contrast_property; | ||
161 | struct drm_property *saturation_property; | ||
162 | struct drm_property *hue_property; | ||
163 | 168 | ||
164 | /* Add variable to record current setting for the above property */ | 169 | /* Add variable to record current setting for the above property */ |
165 | u32 left_margin, right_margin, top_margin, bottom_margin; | 170 | u32 left_margin, right_margin, top_margin, bottom_margin; |
171 | |||
166 | /* this is to get the range of margin.*/ | 172 | /* this is to get the range of margin.*/ |
167 | u32 max_hscan, max_vscan; | 173 | u32 max_hscan, max_vscan; |
168 | u32 max_hpos, cur_hpos; | 174 | u32 max_hpos, cur_hpos; |
@@ -171,36 +177,54 @@ struct intel_sdvo_connector { | |||
171 | u32 cur_contrast, max_contrast; | 177 | u32 cur_contrast, max_contrast; |
172 | u32 cur_saturation, max_saturation; | 178 | u32 cur_saturation, max_saturation; |
173 | u32 cur_hue, max_hue; | 179 | u32 cur_hue, max_hue; |
180 | u32 cur_sharpness, max_sharpness; | ||
181 | u32 cur_flicker_filter, max_flicker_filter; | ||
182 | u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; | ||
183 | u32 cur_flicker_filter_2d, max_flicker_filter_2d; | ||
184 | u32 cur_tv_chroma_filter, max_tv_chroma_filter; | ||
185 | u32 cur_tv_luma_filter, max_tv_luma_filter; | ||
186 | u32 cur_dot_crawl, max_dot_crawl; | ||
174 | }; | 187 | }; |
175 | 188 | ||
189 | static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) | ||
190 | { | ||
191 | return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); | ||
192 | } | ||
193 | |||
194 | static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) | ||
195 | { | ||
196 | return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); | ||
197 | } | ||
198 | |||
176 | static bool | 199 | static bool |
177 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, | 200 | intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); |
178 | uint16_t flags); | 201 | static bool |
179 | static void | 202 | intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, |
180 | intel_sdvo_tv_create_property(struct drm_connector *connector, int type); | 203 | struct intel_sdvo_connector *intel_sdvo_connector, |
181 | static void | 204 | int type); |
182 | intel_sdvo_create_enhance_property(struct drm_connector *connector); | 205 | static bool |
206 | intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, | ||
207 | struct intel_sdvo_connector *intel_sdvo_connector); | ||
183 | 208 | ||
184 | /** | 209 | /** |
185 | * Writes the SDVOB or SDVOC with the given value, but always writes both | 210 | * Writes the SDVOB or SDVOC with the given value, but always writes both |
186 | * SDVOB and SDVOC to work around apparent hardware issues (according to | 211 | * SDVOB and SDVOC to work around apparent hardware issues (according to |
187 | * comments in the BIOS). | 212 | * comments in the BIOS). |
188 | */ | 213 | */ |
189 | static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) | 214 | static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) |
190 | { | 215 | { |
191 | struct drm_device *dev = intel_encoder->enc.dev; | 216 | struct drm_device *dev = intel_sdvo->base.enc.dev; |
192 | struct drm_i915_private *dev_priv = dev->dev_private; | 217 | struct drm_i915_private *dev_priv = dev->dev_private; |
193 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
194 | u32 bval = val, cval = val; | 218 | u32 bval = val, cval = val; |
195 | int i; | 219 | int i; |
196 | 220 | ||
197 | if (sdvo_priv->sdvo_reg == PCH_SDVOB) { | 221 | if (intel_sdvo->sdvo_reg == PCH_SDVOB) { |
198 | I915_WRITE(sdvo_priv->sdvo_reg, val); | 222 | I915_WRITE(intel_sdvo->sdvo_reg, val); |
199 | I915_READ(sdvo_priv->sdvo_reg); | 223 | I915_READ(intel_sdvo->sdvo_reg); |
200 | return; | 224 | return; |
201 | } | 225 | } |
202 | 226 | ||
203 | if (sdvo_priv->sdvo_reg == SDVOB) { | 227 | if (intel_sdvo->sdvo_reg == SDVOB) { |
204 | cval = I915_READ(SDVOC); | 228 | cval = I915_READ(SDVOC); |
205 | } else { | 229 | } else { |
206 | bval = I915_READ(SDVOB); | 230 | bval = I915_READ(SDVOB); |
@@ -219,33 +243,27 @@ static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) | |||
219 | } | 243 | } |
220 | } | 244 | } |
221 | 245 | ||
222 | static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, | 246 | static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) |
223 | u8 *ch) | ||
224 | { | 247 | { |
225 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 248 | u8 out_buf[2] = { addr, 0 }; |
226 | u8 out_buf[2]; | ||
227 | u8 buf[2]; | 249 | u8 buf[2]; |
228 | int ret; | ||
229 | |||
230 | struct i2c_msg msgs[] = { | 250 | struct i2c_msg msgs[] = { |
231 | { | 251 | { |
232 | .addr = sdvo_priv->slave_addr >> 1, | 252 | .addr = intel_sdvo->slave_addr >> 1, |
233 | .flags = 0, | 253 | .flags = 0, |
234 | .len = 1, | 254 | .len = 1, |
235 | .buf = out_buf, | 255 | .buf = out_buf, |
236 | }, | 256 | }, |
237 | { | 257 | { |
238 | .addr = sdvo_priv->slave_addr >> 1, | 258 | .addr = intel_sdvo->slave_addr >> 1, |
239 | .flags = I2C_M_RD, | 259 | .flags = I2C_M_RD, |
240 | .len = 1, | 260 | .len = 1, |
241 | .buf = buf, | 261 | .buf = buf, |
242 | } | 262 | } |
243 | }; | 263 | }; |
264 | int ret; | ||
244 | 265 | ||
245 | out_buf[0] = addr; | 266 | if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) |
246 | out_buf[1] = 0; | ||
247 | |||
248 | if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2) | ||
249 | { | 267 | { |
250 | *ch = buf[0]; | 268 | *ch = buf[0]; |
251 | return true; | 269 | return true; |
@@ -255,35 +273,26 @@ static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, | |||
255 | return false; | 273 | return false; |
256 | } | 274 | } |
257 | 275 | ||
258 | static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, | 276 | static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch) |
259 | u8 ch) | ||
260 | { | 277 | { |
261 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 278 | u8 out_buf[2] = { addr, ch }; |
262 | u8 out_buf[2]; | ||
263 | struct i2c_msg msgs[] = { | 279 | struct i2c_msg msgs[] = { |
264 | { | 280 | { |
265 | .addr = sdvo_priv->slave_addr >> 1, | 281 | .addr = intel_sdvo->slave_addr >> 1, |
266 | .flags = 0, | 282 | .flags = 0, |
267 | .len = 2, | 283 | .len = 2, |
268 | .buf = out_buf, | 284 | .buf = out_buf, |
269 | } | 285 | } |
270 | }; | 286 | }; |
271 | 287 | ||
272 | out_buf[0] = addr; | 288 | return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1; |
273 | out_buf[1] = ch; | ||
274 | |||
275 | if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1) | ||
276 | { | ||
277 | return true; | ||
278 | } | ||
279 | return false; | ||
280 | } | 289 | } |
281 | 290 | ||
282 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} | 291 | #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} |
283 | /** Mapping of command numbers to names, for debug output */ | 292 | /** Mapping of command numbers to names, for debug output */ |
284 | static const struct _sdvo_cmd_name { | 293 | static const struct _sdvo_cmd_name { |
285 | u8 cmd; | 294 | u8 cmd; |
286 | char *name; | 295 | const char *name; |
287 | } sdvo_cmd_names[] = { | 296 | } sdvo_cmd_names[] = { |
288 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), | 297 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), |
289 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), | 298 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), |
@@ -328,13 +337,14 @@ static const struct _sdvo_cmd_name { | |||
328 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), | 337 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), |
329 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), | 338 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), |
330 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), | 339 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), |
340 | |||
331 | /* Add the op code for SDVO enhancements */ | 341 | /* Add the op code for SDVO enhancements */ |
332 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), | 342 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), |
333 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), | 343 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), |
334 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), | 344 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), |
335 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), | 345 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), |
336 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), | 346 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), |
337 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), | 347 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), |
338 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), | 348 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), |
339 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), | 349 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), |
340 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), | 350 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), |
@@ -353,6 +363,27 @@ static const struct _sdvo_cmd_name { | |||
353 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), | 363 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), |
354 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), | 364 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), |
355 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), | 365 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), |
366 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), | ||
367 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), | ||
368 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), | ||
369 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), | ||
370 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), | ||
371 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), | ||
372 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), | ||
373 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), | ||
374 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), | ||
375 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), | ||
376 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), | ||
377 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), | ||
378 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), | ||
379 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), | ||
380 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), | ||
381 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), | ||
382 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), | ||
383 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), | ||
384 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), | ||
385 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), | ||
386 | |||
356 | /* HDMI op code */ | 387 | /* HDMI op code */ |
357 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), | 388 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), |
358 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), | 389 | SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), |
@@ -377,17 +408,15 @@ static const struct _sdvo_cmd_name { | |||
377 | }; | 408 | }; |
378 | 409 | ||
379 | #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) | 410 | #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) |
380 | #define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") | 411 | #define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") |
381 | #define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv) | ||
382 | 412 | ||
383 | static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, | 413 | static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, |
384 | void *args, int args_len) | 414 | const void *args, int args_len) |
385 | { | 415 | { |
386 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
387 | int i; | 416 | int i; |
388 | 417 | ||
389 | DRM_DEBUG_KMS("%s: W: %02X ", | 418 | DRM_DEBUG_KMS("%s: W: %02X ", |
390 | SDVO_NAME(sdvo_priv), cmd); | 419 | SDVO_NAME(intel_sdvo), cmd); |
391 | for (i = 0; i < args_len; i++) | 420 | for (i = 0; i < args_len; i++) |
392 | DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); | 421 | DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); |
393 | for (; i < 8; i++) | 422 | for (; i < 8; i++) |
@@ -403,19 +432,20 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, | |||
403 | DRM_LOG_KMS("\n"); | 432 | DRM_LOG_KMS("\n"); |
404 | } | 433 | } |
405 | 434 | ||
406 | static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, | 435 | static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, |
407 | void *args, int args_len) | 436 | const void *args, int args_len) |
408 | { | 437 | { |
409 | int i; | 438 | int i; |
410 | 439 | ||
411 | intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); | 440 | intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); |
412 | 441 | ||
413 | for (i = 0; i < args_len; i++) { | 442 | for (i = 0; i < args_len; i++) { |
414 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, | 443 | if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i, |
415 | ((u8*)args)[i]); | 444 | ((u8*)args)[i])) |
445 | return false; | ||
416 | } | 446 | } |
417 | 447 | ||
418 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); | 448 | return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd); |
419 | } | 449 | } |
420 | 450 | ||
421 | static const char *cmd_status_names[] = { | 451 | static const char *cmd_status_names[] = { |
@@ -428,14 +458,13 @@ static const char *cmd_status_names[] = { | |||
428 | "Scaling not supported" | 458 | "Scaling not supported" |
429 | }; | 459 | }; |
430 | 460 | ||
431 | static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, | 461 | static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, |
432 | void *response, int response_len, | 462 | void *response, int response_len, |
433 | u8 status) | 463 | u8 status) |
434 | { | 464 | { |
435 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
436 | int i; | 465 | int i; |
437 | 466 | ||
438 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); | 467 | DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); |
439 | for (i = 0; i < response_len; i++) | 468 | for (i = 0; i < response_len; i++) |
440 | DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); | 469 | DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); |
441 | for (; i < 8; i++) | 470 | for (; i < 8; i++) |
@@ -447,8 +476,8 @@ static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, | |||
447 | DRM_LOG_KMS("\n"); | 476 | DRM_LOG_KMS("\n"); |
448 | } | 477 | } |
449 | 478 | ||
450 | static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, | 479 | static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, |
451 | void *response, int response_len) | 480 | void *response, int response_len) |
452 | { | 481 | { |
453 | int i; | 482 | int i; |
454 | u8 status; | 483 | u8 status; |
@@ -457,24 +486,26 @@ static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, | |||
457 | while (retry--) { | 486 | while (retry--) { |
458 | /* Read the command response */ | 487 | /* Read the command response */ |
459 | for (i = 0; i < response_len; i++) { | 488 | for (i = 0; i < response_len; i++) { |
460 | intel_sdvo_read_byte(intel_encoder, | 489 | if (!intel_sdvo_read_byte(intel_sdvo, |
461 | SDVO_I2C_RETURN_0 + i, | 490 | SDVO_I2C_RETURN_0 + i, |
462 | &((u8 *)response)[i]); | 491 | &((u8 *)response)[i])) |
492 | return false; | ||
463 | } | 493 | } |
464 | 494 | ||
465 | /* read the return status */ | 495 | /* read the return status */ |
466 | intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, | 496 | if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, |
467 | &status); | 497 | &status)) |
498 | return false; | ||
468 | 499 | ||
469 | intel_sdvo_debug_response(intel_encoder, response, response_len, | 500 | intel_sdvo_debug_response(intel_sdvo, response, response_len, |
470 | status); | 501 | status); |
471 | if (status != SDVO_CMD_STATUS_PENDING) | 502 | if (status != SDVO_CMD_STATUS_PENDING) |
472 | return status; | 503 | break; |
473 | 504 | ||
474 | mdelay(50); | 505 | mdelay(50); |
475 | } | 506 | } |
476 | 507 | ||
477 | return status; | 508 | return status == SDVO_CMD_STATUS_SUCCESS; |
478 | } | 509 | } |
479 | 510 | ||
480 | static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | 511 | static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) |
@@ -494,37 +525,36 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
494 | * another I2C transaction after issuing the DDC bus switch, it will be | 525 | * another I2C transaction after issuing the DDC bus switch, it will be |
495 | * switched to the internal SDVO register. | 526 | * switched to the internal SDVO register. |
496 | */ | 527 | */ |
497 | static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, | 528 | static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, |
498 | u8 target) | 529 | u8 target) |
499 | { | 530 | { |
500 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
501 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | 531 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; |
502 | struct i2c_msg msgs[] = { | 532 | struct i2c_msg msgs[] = { |
503 | { | 533 | { |
504 | .addr = sdvo_priv->slave_addr >> 1, | 534 | .addr = intel_sdvo->slave_addr >> 1, |
505 | .flags = 0, | 535 | .flags = 0, |
506 | .len = 2, | 536 | .len = 2, |
507 | .buf = out_buf, | 537 | .buf = out_buf, |
508 | }, | 538 | }, |
509 | /* the following two are to read the response */ | 539 | /* the following two are to read the response */ |
510 | { | 540 | { |
511 | .addr = sdvo_priv->slave_addr >> 1, | 541 | .addr = intel_sdvo->slave_addr >> 1, |
512 | .flags = 0, | 542 | .flags = 0, |
513 | .len = 1, | 543 | .len = 1, |
514 | .buf = cmd_buf, | 544 | .buf = cmd_buf, |
515 | }, | 545 | }, |
516 | { | 546 | { |
517 | .addr = sdvo_priv->slave_addr >> 1, | 547 | .addr = intel_sdvo->slave_addr >> 1, |
518 | .flags = I2C_M_RD, | 548 | .flags = I2C_M_RD, |
519 | .len = 1, | 549 | .len = 1, |
520 | .buf = ret_value, | 550 | .buf = ret_value, |
521 | }, | 551 | }, |
522 | }; | 552 | }; |
523 | 553 | ||
524 | intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | 554 | intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, |
525 | &target, 1); | 555 | &target, 1); |
526 | /* write the DDC switch command argument */ | 556 | /* write the DDC switch command argument */ |
527 | intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); | 557 | intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target); |
528 | 558 | ||
529 | out_buf[0] = SDVO_I2C_OPCODE; | 559 | out_buf[0] = SDVO_I2C_OPCODE; |
530 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | 560 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; |
@@ -533,7 +563,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode | |||
533 | ret_value[0] = 0; | 563 | ret_value[0] = 0; |
534 | ret_value[1] = 0; | 564 | ret_value[1] = 0; |
535 | 565 | ||
536 | ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); | 566 | ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); |
537 | if (ret != 3) { | 567 | if (ret != 3) { |
538 | /* failure in I2C transfer */ | 568 | /* failure in I2C transfer */ |
539 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | 569 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); |
@@ -547,23 +577,29 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode | |||
547 | return; | 577 | return; |
548 | } | 578 | } |
549 | 579 | ||
550 | static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) | 580 | static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) |
551 | { | 581 | { |
552 | struct intel_sdvo_set_target_input_args targets = {0}; | 582 | if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) |
553 | u8 status; | 583 | return false; |
554 | |||
555 | if (target_0 && target_1) | ||
556 | return SDVO_CMD_STATUS_NOTSUPP; | ||
557 | 584 | ||
558 | if (target_1) | 585 | return intel_sdvo_read_response(intel_sdvo, NULL, 0); |
559 | targets.target_1 = 1; | 586 | } |
560 | 587 | ||
561 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, | 588 | static bool |
562 | sizeof(targets)); | 589 | intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) |
590 | { | ||
591 | if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) | ||
592 | return false; | ||
563 | 593 | ||
564 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | 594 | return intel_sdvo_read_response(intel_sdvo, value, len); |
595 | } | ||
565 | 596 | ||
566 | return (status == SDVO_CMD_STATUS_SUCCESS); | 597 | static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) |
598 | { | ||
599 | struct intel_sdvo_set_target_input_args targets = {0}; | ||
600 | return intel_sdvo_set_value(intel_sdvo, | ||
601 | SDVO_CMD_SET_TARGET_INPUT, | ||
602 | &targets, sizeof(targets)); | ||
567 | } | 603 | } |
568 | 604 | ||
569 | /** | 605 | /** |
@@ -572,14 +608,12 @@ static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, boo | |||
572 | * This function is making an assumption about the layout of the response, | 608 | * This function is making an assumption about the layout of the response, |
573 | * which should be checked against the docs. | 609 | * which should be checked against the docs. |
574 | */ | 610 | */ |
575 | static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) | 611 | static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) |
576 | { | 612 | { |
577 | struct intel_sdvo_get_trained_inputs_response response; | 613 | struct intel_sdvo_get_trained_inputs_response response; |
578 | u8 status; | ||
579 | 614 | ||
580 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); | 615 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, |
581 | status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); | 616 | &response, sizeof(response))) |
582 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
583 | return false; | 617 | return false; |
584 | 618 | ||
585 | *input_1 = response.input0_trained; | 619 | *input_1 = response.input0_trained; |
@@ -587,21 +621,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b | |||
587 | return true; | 621 | return true; |
588 | } | 622 | } |
589 | 623 | ||
590 | static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, | 624 | static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, |
591 | u16 outputs) | 625 | u16 outputs) |
592 | { | 626 | { |
593 | u8 status; | 627 | return intel_sdvo_set_value(intel_sdvo, |
594 | 628 | SDVO_CMD_SET_ACTIVE_OUTPUTS, | |
595 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, | 629 | &outputs, sizeof(outputs)); |
596 | sizeof(outputs)); | ||
597 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
598 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
599 | } | 630 | } |
600 | 631 | ||
601 | static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, | 632 | static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, |
602 | int mode) | 633 | int mode) |
603 | { | 634 | { |
604 | u8 status, state = SDVO_ENCODER_STATE_ON; | 635 | u8 state = SDVO_ENCODER_STATE_ON; |
605 | 636 | ||
606 | switch (mode) { | 637 | switch (mode) { |
607 | case DRM_MODE_DPMS_ON: | 638 | case DRM_MODE_DPMS_ON: |
@@ -618,88 +649,63 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encod | |||
618 | break; | 649 | break; |
619 | } | 650 | } |
620 | 651 | ||
621 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, | 652 | return intel_sdvo_set_value(intel_sdvo, |
622 | sizeof(state)); | 653 | SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); |
623 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
624 | |||
625 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
626 | } | 654 | } |
627 | 655 | ||
628 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, | 656 | static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, |
629 | int *clock_min, | 657 | int *clock_min, |
630 | int *clock_max) | 658 | int *clock_max) |
631 | { | 659 | { |
632 | struct intel_sdvo_pixel_clock_range clocks; | 660 | struct intel_sdvo_pixel_clock_range clocks; |
633 | u8 status; | ||
634 | |||
635 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | ||
636 | NULL, 0); | ||
637 | |||
638 | status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks)); | ||
639 | 661 | ||
640 | if (status != SDVO_CMD_STATUS_SUCCESS) | 662 | if (!intel_sdvo_get_value(intel_sdvo, |
663 | SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, | ||
664 | &clocks, sizeof(clocks))) | ||
641 | return false; | 665 | return false; |
642 | 666 | ||
643 | /* Convert the values from units of 10 kHz to kHz. */ | 667 | /* Convert the values from units of 10 kHz to kHz. */ |
644 | *clock_min = clocks.min * 10; | 668 | *clock_min = clocks.min * 10; |
645 | *clock_max = clocks.max * 10; | 669 | *clock_max = clocks.max * 10; |
646 | |||
647 | return true; | 670 | return true; |
648 | } | 671 | } |
649 | 672 | ||
650 | static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, | 673 | static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, |
651 | u16 outputs) | 674 | u16 outputs) |
652 | { | 675 | { |
653 | u8 status; | 676 | return intel_sdvo_set_value(intel_sdvo, |
654 | 677 | SDVO_CMD_SET_TARGET_OUTPUT, | |
655 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, | 678 | &outputs, sizeof(outputs)); |
656 | sizeof(outputs)); | ||
657 | |||
658 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
659 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
660 | } | 679 | } |
661 | 680 | ||
662 | static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, | 681 | static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, |
663 | struct intel_sdvo_dtd *dtd) | 682 | struct intel_sdvo_dtd *dtd) |
664 | { | 683 | { |
665 | u8 status; | 684 | return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && |
666 | 685 | intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | |
667 | intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1)); | ||
668 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
669 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
670 | return false; | ||
671 | |||
672 | intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2)); | ||
673 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
674 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
675 | return false; | ||
676 | |||
677 | return true; | ||
678 | } | 686 | } |
679 | 687 | ||
680 | static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, | 688 | static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, |
681 | struct intel_sdvo_dtd *dtd) | 689 | struct intel_sdvo_dtd *dtd) |
682 | { | 690 | { |
683 | return intel_sdvo_set_timing(intel_encoder, | 691 | return intel_sdvo_set_timing(intel_sdvo, |
684 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); | 692 | SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); |
685 | } | 693 | } |
686 | 694 | ||
687 | static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, | 695 | static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, |
688 | struct intel_sdvo_dtd *dtd) | 696 | struct intel_sdvo_dtd *dtd) |
689 | { | 697 | { |
690 | return intel_sdvo_set_timing(intel_encoder, | 698 | return intel_sdvo_set_timing(intel_sdvo, |
691 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); | 699 | SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); |
692 | } | 700 | } |
693 | 701 | ||
694 | static bool | 702 | static bool |
695 | intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, | 703 | intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, |
696 | uint16_t clock, | 704 | uint16_t clock, |
697 | uint16_t width, | 705 | uint16_t width, |
698 | uint16_t height) | 706 | uint16_t height) |
699 | { | 707 | { |
700 | struct intel_sdvo_preferred_input_timing_args args; | 708 | struct intel_sdvo_preferred_input_timing_args args; |
701 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
702 | uint8_t status; | ||
703 | 709 | ||
704 | memset(&args, 0, sizeof(args)); | 710 | memset(&args, 0, sizeof(args)); |
705 | args.clock = clock; | 711 | args.clock = clock; |
@@ -707,59 +713,32 @@ intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, | |||
707 | args.height = height; | 713 | args.height = height; |
708 | args.interlace = 0; | 714 | args.interlace = 0; |
709 | 715 | ||
710 | if (sdvo_priv->is_lvds && | 716 | if (intel_sdvo->is_lvds && |
711 | (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || | 717 | (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || |
712 | sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) | 718 | intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) |
713 | args.scaled = 1; | 719 | args.scaled = 1; |
714 | 720 | ||
715 | intel_sdvo_write_cmd(intel_encoder, | 721 | return intel_sdvo_set_value(intel_sdvo, |
716 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 722 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, |
717 | &args, sizeof(args)); | 723 | &args, sizeof(args)); |
718 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
719 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
720 | return false; | ||
721 | |||
722 | return true; | ||
723 | } | 724 | } |
724 | 725 | ||
725 | static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, | 726 | static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, |
726 | struct intel_sdvo_dtd *dtd) | 727 | struct intel_sdvo_dtd *dtd) |
727 | { | 728 | { |
728 | bool status; | 729 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, |
729 | 730 | &dtd->part1, sizeof(dtd->part1)) && | |
730 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, | 731 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, |
731 | NULL, 0); | 732 | &dtd->part2, sizeof(dtd->part2)); |
732 | |||
733 | status = intel_sdvo_read_response(intel_encoder, &dtd->part1, | ||
734 | sizeof(dtd->part1)); | ||
735 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
736 | return false; | ||
737 | |||
738 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, | ||
739 | NULL, 0); | ||
740 | |||
741 | status = intel_sdvo_read_response(intel_encoder, &dtd->part2, | ||
742 | sizeof(dtd->part2)); | ||
743 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
744 | return false; | ||
745 | |||
746 | return false; | ||
747 | } | 733 | } |
748 | 734 | ||
749 | static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) | 735 | static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) |
750 | { | 736 | { |
751 | u8 status; | 737 | return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); |
752 | |||
753 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); | ||
754 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
755 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
756 | return false; | ||
757 | |||
758 | return true; | ||
759 | } | 738 | } |
760 | 739 | ||
761 | static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | 740 | static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, |
762 | struct drm_display_mode *mode) | 741 | const struct drm_display_mode *mode) |
763 | { | 742 | { |
764 | uint16_t width, height; | 743 | uint16_t width, height; |
765 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; | 744 | uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; |
@@ -808,7 +787,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, | |||
808 | } | 787 | } |
809 | 788 | ||
810 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | 789 | static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, |
811 | struct intel_sdvo_dtd *dtd) | 790 | const struct intel_sdvo_dtd *dtd) |
812 | { | 791 | { |
813 | mode->hdisplay = dtd->part1.h_active; | 792 | mode->hdisplay = dtd->part1.h_active; |
814 | mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; | 793 | mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; |
@@ -840,45 +819,33 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, | |||
840 | mode->flags |= DRM_MODE_FLAG_PVSYNC; | 819 | mode->flags |= DRM_MODE_FLAG_PVSYNC; |
841 | } | 820 | } |
842 | 821 | ||
843 | static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, | 822 | static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, |
844 | struct intel_sdvo_encode *encode) | 823 | struct intel_sdvo_encode *encode) |
845 | { | 824 | { |
846 | uint8_t status; | 825 | if (intel_sdvo_get_value(intel_sdvo, |
847 | 826 | SDVO_CMD_GET_SUPP_ENCODE, | |
848 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); | 827 | encode, sizeof(*encode))) |
849 | status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); | 828 | return true; |
850 | if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ | ||
851 | memset(encode, 0, sizeof(*encode)); | ||
852 | return false; | ||
853 | } | ||
854 | 829 | ||
855 | return true; | 830 | /* non-support means DVI */ |
831 | memset(encode, 0, sizeof(*encode)); | ||
832 | return false; | ||
856 | } | 833 | } |
857 | 834 | ||
858 | static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, | 835 | static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, |
859 | uint8_t mode) | 836 | uint8_t mode) |
860 | { | 837 | { |
861 | uint8_t status; | 838 | return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); |
862 | |||
863 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1); | ||
864 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
865 | |||
866 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
867 | } | 839 | } |
868 | 840 | ||
869 | static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, | 841 | static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, |
870 | uint8_t mode) | 842 | uint8_t mode) |
871 | { | 843 | { |
872 | uint8_t status; | 844 | return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); |
873 | |||
874 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1); | ||
875 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | ||
876 | |||
877 | return (status == SDVO_CMD_STATUS_SUCCESS); | ||
878 | } | 845 | } |
879 | 846 | ||
880 | #if 0 | 847 | #if 0 |
881 | static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) | 848 | static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) |
882 | { | 849 | { |
883 | int i, j; | 850 | int i, j; |
884 | uint8_t set_buf_index[2]; | 851 | uint8_t set_buf_index[2]; |
@@ -887,8 +854,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) | |||
887 | uint8_t buf[48]; | 854 | uint8_t buf[48]; |
888 | uint8_t *pos; | 855 | uint8_t *pos; |
889 | 856 | ||
890 | intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); | 857 | intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); |
891 | intel_sdvo_read_response(encoder, &av_split, 1); | ||
892 | 858 | ||
893 | for (i = 0; i <= av_split; i++) { | 859 | for (i = 0; i <= av_split; i++) { |
894 | set_buf_index[0] = i; set_buf_index[1] = 0; | 860 | set_buf_index[0] = i; set_buf_index[1] = 0; |
@@ -908,7 +874,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) | |||
908 | } | 874 | } |
909 | #endif | 875 | #endif |
910 | 876 | ||
911 | static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, | 877 | static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, |
912 | int index, | 878 | int index, |
913 | uint8_t *data, int8_t size, uint8_t tx_rate) | 879 | uint8_t *data, int8_t size, uint8_t tx_rate) |
914 | { | 880 | { |
@@ -917,15 +883,18 @@ static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, | |||
917 | set_buf_index[0] = index; | 883 | set_buf_index[0] = index; |
918 | set_buf_index[1] = 0; | 884 | set_buf_index[1] = 0; |
919 | 885 | ||
920 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, | 886 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, |
921 | set_buf_index, 2); | 887 | set_buf_index, 2)) |
888 | return false; | ||
922 | 889 | ||
923 | for (; size > 0; size -= 8) { | 890 | for (; size > 0; size -= 8) { |
924 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); | 891 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) |
892 | return false; | ||
893 | |||
925 | data += 8; | 894 | data += 8; |
926 | } | 895 | } |
927 | 896 | ||
928 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); | 897 | return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); |
929 | } | 898 | } |
930 | 899 | ||
931 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) | 900 | static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) |
@@ -1000,7 +969,7 @@ struct dip_infoframe { | |||
1000 | } __attribute__ ((packed)) u; | 969 | } __attribute__ ((packed)) u; |
1001 | } __attribute__((packed)); | 970 | } __attribute__((packed)); |
1002 | 971 | ||
1003 | static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, | 972 | static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, |
1004 | struct drm_display_mode * mode) | 973 | struct drm_display_mode * mode) |
1005 | { | 974 | { |
1006 | struct dip_infoframe avi_if = { | 975 | struct dip_infoframe avi_if = { |
@@ -1011,133 +980,105 @@ static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, | |||
1011 | 980 | ||
1012 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, | 981 | avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, |
1013 | 4 + avi_if.len); | 982 | 4 + avi_if.len); |
1014 | intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, | 983 | return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, |
1015 | 4 + avi_if.len, | 984 | 4 + avi_if.len, |
1016 | SDVO_HBUF_TX_VSYNC); | 985 | SDVO_HBUF_TX_VSYNC); |
1017 | } | 986 | } |
1018 | 987 | ||
1019 | static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) | 988 | static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) |
1020 | { | 989 | { |
1021 | |||
1022 | struct intel_sdvo_tv_format format; | 990 | struct intel_sdvo_tv_format format; |
1023 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 991 | uint32_t format_map; |
1024 | uint32_t format_map, i; | ||
1025 | uint8_t status; | ||
1026 | 992 | ||
1027 | for (i = 0; i < TV_FORMAT_NUM; i++) | 993 | format_map = 1 << intel_sdvo->tv_format_index; |
1028 | if (tv_format_names[i] == sdvo_priv->tv_format_name) | ||
1029 | break; | ||
1030 | |||
1031 | format_map = 1 << i; | ||
1032 | memset(&format, 0, sizeof(format)); | 994 | memset(&format, 0, sizeof(format)); |
1033 | memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? | 995 | memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); |
1034 | sizeof(format) : sizeof(format_map)); | ||
1035 | |||
1036 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, | ||
1037 | sizeof(format)); | ||
1038 | 996 | ||
1039 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | 997 | BUILD_BUG_ON(sizeof(format) != 6); |
1040 | if (status != SDVO_CMD_STATUS_SUCCESS) | 998 | return intel_sdvo_set_value(intel_sdvo, |
1041 | DRM_DEBUG_KMS("%s: Failed to set TV format\n", | 999 | SDVO_CMD_SET_TV_FORMAT, |
1042 | SDVO_NAME(sdvo_priv)); | 1000 | &format, sizeof(format)); |
1043 | } | 1001 | } |
1044 | 1002 | ||
1045 | static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, | 1003 | static bool |
1046 | struct drm_display_mode *mode, | 1004 | intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, |
1047 | struct drm_display_mode *adjusted_mode) | 1005 | struct drm_display_mode *mode) |
1048 | { | 1006 | { |
1049 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1007 | struct intel_sdvo_dtd output_dtd; |
1050 | struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv; | ||
1051 | 1008 | ||
1052 | if (dev_priv->is_tv) { | 1009 | if (!intel_sdvo_set_target_output(intel_sdvo, |
1053 | struct intel_sdvo_dtd output_dtd; | 1010 | intel_sdvo->attached_output)) |
1054 | bool success; | 1011 | return false; |
1055 | 1012 | ||
1056 | /* We need to construct preferred input timings based on our | 1013 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
1057 | * output timings. To do that, we have to set the output | 1014 | if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) |
1058 | * timings, even though this isn't really the right place in | 1015 | return false; |
1059 | * the sequence to do it. Oh well. | ||
1060 | */ | ||
1061 | 1016 | ||
1017 | return true; | ||
1018 | } | ||
1062 | 1019 | ||
1063 | /* Set output timings */ | 1020 | static bool |
1064 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1021 | intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, |
1065 | intel_sdvo_set_target_output(intel_encoder, | 1022 | struct drm_display_mode *mode, |
1066 | dev_priv->attached_output); | 1023 | struct drm_display_mode *adjusted_mode) |
1067 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); | 1024 | { |
1025 | struct intel_sdvo_dtd input_dtd; | ||
1068 | 1026 | ||
1069 | /* Set the input timing to the screen. Assume always input 0. */ | 1027 | /* Reset the input timing to the screen. Assume always input 0. */ |
1070 | intel_sdvo_set_target_input(intel_encoder, true, false); | 1028 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1029 | return false; | ||
1071 | 1030 | ||
1031 | if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, | ||
1032 | mode->clock / 10, | ||
1033 | mode->hdisplay, | ||
1034 | mode->vdisplay)) | ||
1035 | return false; | ||
1072 | 1036 | ||
1073 | success = intel_sdvo_create_preferred_input_timing(intel_encoder, | 1037 | if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, |
1074 | mode->clock / 10, | 1038 | &input_dtd)) |
1075 | mode->hdisplay, | 1039 | return false; |
1076 | mode->vdisplay); | ||
1077 | if (success) { | ||
1078 | struct intel_sdvo_dtd input_dtd; | ||
1079 | 1040 | ||
1080 | intel_sdvo_get_preferred_input_timing(intel_encoder, | 1041 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); |
1081 | &input_dtd); | 1042 | intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags; |
1082 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | ||
1083 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | ||
1084 | 1043 | ||
1085 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 1044 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
1045 | mode->clock = adjusted_mode->clock; | ||
1046 | return true; | ||
1047 | } | ||
1086 | 1048 | ||
1087 | mode->clock = adjusted_mode->clock; | 1049 | static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, |
1050 | struct drm_display_mode *mode, | ||
1051 | struct drm_display_mode *adjusted_mode) | ||
1052 | { | ||
1053 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); | ||
1088 | 1054 | ||
1089 | adjusted_mode->clock *= | 1055 | /* We need to construct preferred input timings based on our |
1090 | intel_sdvo_get_pixel_multiplier(mode); | 1056 | * output timings. To do that, we have to set the output |
1091 | } else { | 1057 | * timings, even though this isn't really the right place in |
1058 | * the sequence to do it. Oh well. | ||
1059 | */ | ||
1060 | if (intel_sdvo->is_tv) { | ||
1061 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) | ||
1092 | return false; | 1062 | return false; |
1093 | } | ||
1094 | } else if (dev_priv->is_lvds) { | ||
1095 | struct intel_sdvo_dtd output_dtd; | ||
1096 | bool success; | ||
1097 | |||
1098 | drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0); | ||
1099 | /* Set output timings */ | ||
1100 | intel_sdvo_get_dtd_from_mode(&output_dtd, | ||
1101 | dev_priv->sdvo_lvds_fixed_mode); | ||
1102 | |||
1103 | intel_sdvo_set_target_output(intel_encoder, | ||
1104 | dev_priv->attached_output); | ||
1105 | intel_sdvo_set_output_timing(intel_encoder, &output_dtd); | ||
1106 | |||
1107 | /* Set the input timing to the screen. Assume always input 0. */ | ||
1108 | intel_sdvo_set_target_input(intel_encoder, true, false); | ||
1109 | |||
1110 | |||
1111 | success = intel_sdvo_create_preferred_input_timing( | ||
1112 | intel_encoder, | ||
1113 | mode->clock / 10, | ||
1114 | mode->hdisplay, | ||
1115 | mode->vdisplay); | ||
1116 | 1063 | ||
1117 | if (success) { | 1064 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) |
1118 | struct intel_sdvo_dtd input_dtd; | 1065 | return false; |
1119 | 1066 | } else if (intel_sdvo->is_lvds) { | |
1120 | intel_sdvo_get_preferred_input_timing(intel_encoder, | 1067 | drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); |
1121 | &input_dtd); | ||
1122 | intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); | ||
1123 | dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; | ||
1124 | |||
1125 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
1126 | |||
1127 | mode->clock = adjusted_mode->clock; | ||
1128 | 1068 | ||
1129 | adjusted_mode->clock *= | 1069 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, |
1130 | intel_sdvo_get_pixel_multiplier(mode); | 1070 | intel_sdvo->sdvo_lvds_fixed_mode)) |
1131 | } else { | ||
1132 | return false; | 1071 | return false; |
1133 | } | ||
1134 | 1072 | ||
1135 | } else { | 1073 | if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) |
1136 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | 1074 | return false; |
1137 | * SDVO device will be told of the multiplier during mode_set. | ||
1138 | */ | ||
1139 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
1140 | } | 1075 | } |
1076 | |||
1077 | /* Make the CRTC code factor in the SDVO pixel multiplier. The | ||
1078 | * SDVO device will be told of the multiplier during mode_set. | ||
1079 | */ | ||
1080 | adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); | ||
1081 | |||
1141 | return true; | 1082 | return true; |
1142 | } | 1083 | } |
1143 | 1084 | ||
@@ -1149,13 +1090,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1149 | struct drm_i915_private *dev_priv = dev->dev_private; | 1090 | struct drm_i915_private *dev_priv = dev->dev_private; |
1150 | struct drm_crtc *crtc = encoder->crtc; | 1091 | struct drm_crtc *crtc = encoder->crtc; |
1151 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1092 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1152 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1093 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1153 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1154 | u32 sdvox = 0; | 1094 | u32 sdvox = 0; |
1155 | int sdvo_pixel_multiply; | 1095 | int sdvo_pixel_multiply, rate; |
1156 | struct intel_sdvo_in_out_map in_out; | 1096 | struct intel_sdvo_in_out_map in_out; |
1157 | struct intel_sdvo_dtd input_dtd; | 1097 | struct intel_sdvo_dtd input_dtd; |
1158 | u8 status; | ||
1159 | 1098 | ||
1160 | if (!mode) | 1099 | if (!mode) |
1161 | return; | 1100 | return; |
@@ -1166,41 +1105,50 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1166 | * channel on the motherboard. In a two-input device, the first input | 1105 | * channel on the motherboard. In a two-input device, the first input |
1167 | * will be SDVOB and the second SDVOC. | 1106 | * will be SDVOB and the second SDVOC. |
1168 | */ | 1107 | */ |
1169 | in_out.in0 = sdvo_priv->attached_output; | 1108 | in_out.in0 = intel_sdvo->attached_output; |
1170 | in_out.in1 = 0; | 1109 | in_out.in1 = 0; |
1171 | 1110 | ||
1172 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, | 1111 | if (!intel_sdvo_set_value(intel_sdvo, |
1173 | &in_out, sizeof(in_out)); | 1112 | SDVO_CMD_SET_IN_OUT_MAP, |
1174 | status = intel_sdvo_read_response(intel_encoder, NULL, 0); | 1113 | &in_out, sizeof(in_out))) |
1114 | return; | ||
1115 | |||
1116 | if (intel_sdvo->is_hdmi) { | ||
1117 | if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) | ||
1118 | return; | ||
1175 | 1119 | ||
1176 | if (sdvo_priv->is_hdmi) { | ||
1177 | intel_sdvo_set_avi_infoframe(intel_encoder, mode); | ||
1178 | sdvox |= SDVO_AUDIO_ENABLE; | 1120 | sdvox |= SDVO_AUDIO_ENABLE; |
1179 | } | 1121 | } |
1180 | 1122 | ||
1181 | /* We have tried to get input timing in mode_fixup, and filled into | 1123 | /* We have tried to get input timing in mode_fixup, and filled into |
1182 | adjusted_mode */ | 1124 | adjusted_mode */ |
1183 | if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { | 1125 | if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { |
1184 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); | 1126 | intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); |
1185 | input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; | 1127 | input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; |
1186 | } else | 1128 | } else |
1187 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); | 1129 | intel_sdvo_get_dtd_from_mode(&input_dtd, mode); |
1188 | 1130 | ||
1189 | /* If it's a TV, we already set the output timing in mode_fixup. | 1131 | /* If it's a TV, we already set the output timing in mode_fixup. |
1190 | * Otherwise, the output timing is equal to the input timing. | 1132 | * Otherwise, the output timing is equal to the input timing. |
1191 | */ | 1133 | */ |
1192 | if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { | 1134 | if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { |
1193 | /* Set the output timing to the screen */ | 1135 | /* Set the output timing to the screen */ |
1194 | intel_sdvo_set_target_output(intel_encoder, | 1136 | if (!intel_sdvo_set_target_output(intel_sdvo, |
1195 | sdvo_priv->attached_output); | 1137 | intel_sdvo->attached_output)) |
1196 | intel_sdvo_set_output_timing(intel_encoder, &input_dtd); | 1138 | return; |
1139 | |||
1140 | if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) | ||
1141 | return; | ||
1197 | } | 1142 | } |
1198 | 1143 | ||
1199 | /* Set the input timing to the screen. Assume always input 0. */ | 1144 | /* Set the input timing to the screen. Assume always input 0. */ |
1200 | intel_sdvo_set_target_input(intel_encoder, true, false); | 1145 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1146 | return; | ||
1201 | 1147 | ||
1202 | if (sdvo_priv->is_tv) | 1148 | if (intel_sdvo->is_tv) { |
1203 | intel_sdvo_set_tv_format(intel_encoder); | 1149 | if (!intel_sdvo_set_tv_format(intel_sdvo)) |
1150 | return; | ||
1151 | } | ||
1204 | 1152 | ||
1205 | /* We would like to use intel_sdvo_create_preferred_input_timing() to | 1153 | /* We would like to use intel_sdvo_create_preferred_input_timing() to |
1206 | * provide the device with a timing it can support, if it supports that | 1154 | * provide the device with a timing it can support, if it supports that |
@@ -1217,23 +1165,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1217 | intel_sdvo_set_input_timing(encoder, &input_dtd); | 1165 | intel_sdvo_set_input_timing(encoder, &input_dtd); |
1218 | } | 1166 | } |
1219 | #else | 1167 | #else |
1220 | intel_sdvo_set_input_timing(intel_encoder, &input_dtd); | 1168 | if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) |
1169 | return; | ||
1221 | #endif | 1170 | #endif |
1222 | 1171 | ||
1223 | switch (intel_sdvo_get_pixel_multiplier(mode)) { | 1172 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); |
1224 | case 1: | 1173 | switch (sdvo_pixel_multiply) { |
1225 | intel_sdvo_set_clock_rate_mult(intel_encoder, | 1174 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; |
1226 | SDVO_CLOCK_RATE_MULT_1X); | 1175 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; |
1227 | break; | 1176 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; |
1228 | case 2: | ||
1229 | intel_sdvo_set_clock_rate_mult(intel_encoder, | ||
1230 | SDVO_CLOCK_RATE_MULT_2X); | ||
1231 | break; | ||
1232 | case 4: | ||
1233 | intel_sdvo_set_clock_rate_mult(intel_encoder, | ||
1234 | SDVO_CLOCK_RATE_MULT_4X); | ||
1235 | break; | ||
1236 | } | 1177 | } |
1178 | if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) | ||
1179 | return; | ||
1237 | 1180 | ||
1238 | /* Set the SDVO control regs. */ | 1181 | /* Set the SDVO control regs. */ |
1239 | if (IS_I965G(dev)) { | 1182 | if (IS_I965G(dev)) { |
@@ -1243,8 +1186,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1243 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 1186 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1244 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; | 1187 | sdvox |= SDVO_HSYNC_ACTIVE_HIGH; |
1245 | } else { | 1188 | } else { |
1246 | sdvox |= I915_READ(sdvo_priv->sdvo_reg); | 1189 | sdvox |= I915_READ(intel_sdvo->sdvo_reg); |
1247 | switch (sdvo_priv->sdvo_reg) { | 1190 | switch (intel_sdvo->sdvo_reg) { |
1248 | case SDVOB: | 1191 | case SDVOB: |
1249 | sdvox &= SDVOB_PRESERVE_MASK; | 1192 | sdvox &= SDVOB_PRESERVE_MASK; |
1250 | break; | 1193 | break; |
@@ -1257,7 +1200,6 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1257 | if (intel_crtc->pipe == 1) | 1200 | if (intel_crtc->pipe == 1) |
1258 | sdvox |= SDVO_PIPE_B_SELECT; | 1201 | sdvox |= SDVO_PIPE_B_SELECT; |
1259 | 1202 | ||
1260 | sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); | ||
1261 | if (IS_I965G(dev)) { | 1203 | if (IS_I965G(dev)) { |
1262 | /* done in crtc_mode_set as the dpll_md reg must be written early */ | 1204 | /* done in crtc_mode_set as the dpll_md reg must be written early */ |
1263 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { | 1205 | } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { |
@@ -1266,28 +1208,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1266 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; | 1208 | sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; |
1267 | } | 1209 | } |
1268 | 1210 | ||
1269 | if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) | 1211 | if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) |
1270 | sdvox |= SDVO_STALL_SELECT; | 1212 | sdvox |= SDVO_STALL_SELECT; |
1271 | intel_sdvo_write_sdvox(intel_encoder, sdvox); | 1213 | intel_sdvo_write_sdvox(intel_sdvo, sdvox); |
1272 | } | 1214 | } |
1273 | 1215 | ||
1274 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | 1216 | static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) |
1275 | { | 1217 | { |
1276 | struct drm_device *dev = encoder->dev; | 1218 | struct drm_device *dev = encoder->dev; |
1277 | struct drm_i915_private *dev_priv = dev->dev_private; | 1219 | struct drm_i915_private *dev_priv = dev->dev_private; |
1278 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1220 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1279 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1221 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
1280 | u32 temp; | 1222 | u32 temp; |
1281 | 1223 | ||
1282 | if (mode != DRM_MODE_DPMS_ON) { | 1224 | if (mode != DRM_MODE_DPMS_ON) { |
1283 | intel_sdvo_set_active_outputs(intel_encoder, 0); | 1225 | intel_sdvo_set_active_outputs(intel_sdvo, 0); |
1284 | if (0) | 1226 | if (0) |
1285 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); | 1227 | intel_sdvo_set_encoder_power_state(intel_sdvo, mode); |
1286 | 1228 | ||
1287 | if (mode == DRM_MODE_DPMS_OFF) { | 1229 | if (mode == DRM_MODE_DPMS_OFF) { |
1288 | temp = I915_READ(sdvo_priv->sdvo_reg); | 1230 | temp = I915_READ(intel_sdvo->sdvo_reg); |
1289 | if ((temp & SDVO_ENABLE) != 0) { | 1231 | if ((temp & SDVO_ENABLE) != 0) { |
1290 | intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); | 1232 | intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); |
1291 | } | 1233 | } |
1292 | } | 1234 | } |
1293 | } else { | 1235 | } else { |
@@ -1295,28 +1237,25 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) | |||
1295 | int i; | 1237 | int i; |
1296 | u8 status; | 1238 | u8 status; |
1297 | 1239 | ||
1298 | temp = I915_READ(sdvo_priv->sdvo_reg); | 1240 | temp = I915_READ(intel_sdvo->sdvo_reg); |
1299 | if ((temp & SDVO_ENABLE) == 0) | 1241 | if ((temp & SDVO_ENABLE) == 0) |
1300 | intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); | 1242 | intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); |
1301 | for (i = 0; i < 2; i++) | 1243 | for (i = 0; i < 2; i++) |
1302 | intel_wait_for_vblank(dev); | 1244 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1303 | |||
1304 | status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, | ||
1305 | &input2); | ||
1306 | |||
1307 | 1245 | ||
1246 | status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); | ||
1308 | /* Warn if the device reported failure to sync. | 1247 | /* Warn if the device reported failure to sync. |
1309 | * A lot of SDVO devices fail to notify of sync, but it's | 1248 | * A lot of SDVO devices fail to notify of sync, but it's |
1310 | * a given it the status is a success, we succeeded. | 1249 | * a given it the status is a success, we succeeded. |
1311 | */ | 1250 | */ |
1312 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { | 1251 | if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { |
1313 | DRM_DEBUG_KMS("First %s output reported failure to " | 1252 | DRM_DEBUG_KMS("First %s output reported failure to " |
1314 | "sync\n", SDVO_NAME(sdvo_priv)); | 1253 | "sync\n", SDVO_NAME(intel_sdvo)); |
1315 | } | 1254 | } |
1316 | 1255 | ||
1317 | if (0) | 1256 | if (0) |
1318 | intel_sdvo_set_encoder_power_state(intel_encoder, mode); | 1257 | intel_sdvo_set_encoder_power_state(intel_sdvo, mode); |
1319 | intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); | 1258 | intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); |
1320 | } | 1259 | } |
1321 | return; | 1260 | return; |
1322 | } | 1261 | } |
@@ -1325,42 +1264,31 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1325 | struct drm_display_mode *mode) | 1264 | struct drm_display_mode *mode) |
1326 | { | 1265 | { |
1327 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1266 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1328 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1267 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1329 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1330 | 1268 | ||
1331 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1269 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
1332 | return MODE_NO_DBLESCAN; | 1270 | return MODE_NO_DBLESCAN; |
1333 | 1271 | ||
1334 | if (sdvo_priv->pixel_clock_min > mode->clock) | 1272 | if (intel_sdvo->pixel_clock_min > mode->clock) |
1335 | return MODE_CLOCK_LOW; | 1273 | return MODE_CLOCK_LOW; |
1336 | 1274 | ||
1337 | if (sdvo_priv->pixel_clock_max < mode->clock) | 1275 | if (intel_sdvo->pixel_clock_max < mode->clock) |
1338 | return MODE_CLOCK_HIGH; | 1276 | return MODE_CLOCK_HIGH; |
1339 | 1277 | ||
1340 | if (sdvo_priv->is_lvds == true) { | 1278 | if (intel_sdvo->is_lvds) { |
1341 | if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) | 1279 | if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) |
1342 | return MODE_PANEL; | 1280 | return MODE_PANEL; |
1343 | 1281 | ||
1344 | if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) | 1282 | if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) |
1345 | return MODE_PANEL; | ||
1346 | |||
1347 | if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay) | ||
1348 | return MODE_PANEL; | 1283 | return MODE_PANEL; |
1349 | } | 1284 | } |
1350 | 1285 | ||
1351 | return MODE_OK; | 1286 | return MODE_OK; |
1352 | } | 1287 | } |
1353 | 1288 | ||
1354 | static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) | 1289 | static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) |
1355 | { | 1290 | { |
1356 | u8 status; | 1291 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); |
1357 | |||
1358 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); | ||
1359 | status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps)); | ||
1360 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1361 | return false; | ||
1362 | |||
1363 | return true; | ||
1364 | } | 1292 | } |
1365 | 1293 | ||
1366 | /* No use! */ | 1294 | /* No use! */ |
@@ -1368,12 +1296,12 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str | |||
1368 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | 1296 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) |
1369 | { | 1297 | { |
1370 | struct drm_connector *connector = NULL; | 1298 | struct drm_connector *connector = NULL; |
1371 | struct intel_encoder *iout = NULL; | 1299 | struct intel_sdvo *iout = NULL; |
1372 | struct intel_sdvo_priv *sdvo; | 1300 | struct intel_sdvo *sdvo; |
1373 | 1301 | ||
1374 | /* find the sdvo connector */ | 1302 | /* find the sdvo connector */ |
1375 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1303 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1376 | iout = to_intel_encoder(connector); | 1304 | iout = to_intel_sdvo(connector); |
1377 | 1305 | ||
1378 | if (iout->type != INTEL_OUTPUT_SDVO) | 1306 | if (iout->type != INTEL_OUTPUT_SDVO) |
1379 | continue; | 1307 | continue; |
@@ -1395,75 +1323,69 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector) | |||
1395 | { | 1323 | { |
1396 | u8 response[2]; | 1324 | u8 response[2]; |
1397 | u8 status; | 1325 | u8 status; |
1398 | struct intel_encoder *intel_encoder; | 1326 | struct intel_sdvo *intel_sdvo; |
1399 | DRM_DEBUG_KMS("\n"); | 1327 | DRM_DEBUG_KMS("\n"); |
1400 | 1328 | ||
1401 | if (!connector) | 1329 | if (!connector) |
1402 | return 0; | 1330 | return 0; |
1403 | 1331 | ||
1404 | intel_encoder = to_intel_encoder(connector); | 1332 | intel_sdvo = to_intel_sdvo(connector); |
1405 | |||
1406 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1407 | status = intel_sdvo_read_response(intel_encoder, &response, 2); | ||
1408 | |||
1409 | if (response[0] !=0) | ||
1410 | return 1; | ||
1411 | 1333 | ||
1412 | return 0; | 1334 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1335 | &response, 2) && response[0]; | ||
1413 | } | 1336 | } |
1414 | 1337 | ||
1415 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1338 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) |
1416 | { | 1339 | { |
1417 | u8 response[2]; | 1340 | u8 response[2]; |
1418 | u8 status; | 1341 | u8 status; |
1419 | struct intel_encoder *intel_encoder = to_intel_encoder(connector); | 1342 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); |
1420 | 1343 | ||
1421 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1344 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1422 | intel_sdvo_read_response(intel_encoder, &response, 2); | 1345 | intel_sdvo_read_response(intel_sdvo, &response, 2); |
1423 | 1346 | ||
1424 | if (on) { | 1347 | if (on) { |
1425 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | 1348 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); |
1426 | status = intel_sdvo_read_response(intel_encoder, &response, 2); | 1349 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); |
1427 | 1350 | ||
1428 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1351 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1429 | } else { | 1352 | } else { |
1430 | response[0] = 0; | 1353 | response[0] = 0; |
1431 | response[1] = 0; | 1354 | response[1] = 0; |
1432 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | 1355 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); |
1433 | } | 1356 | } |
1434 | 1357 | ||
1435 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1358 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); |
1436 | intel_sdvo_read_response(intel_encoder, &response, 2); | 1359 | intel_sdvo_read_response(intel_sdvo, &response, 2); |
1437 | } | 1360 | } |
1438 | #endif | 1361 | #endif |
1439 | 1362 | ||
1440 | static bool | 1363 | static bool |
1441 | intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) | 1364 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
1442 | { | 1365 | { |
1443 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1444 | int caps = 0; | 1366 | int caps = 0; |
1445 | 1367 | ||
1446 | if (sdvo_priv->caps.output_flags & | 1368 | if (intel_sdvo->caps.output_flags & |
1447 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) | 1369 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) |
1448 | caps++; | 1370 | caps++; |
1449 | if (sdvo_priv->caps.output_flags & | 1371 | if (intel_sdvo->caps.output_flags & |
1450 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) | 1372 | (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) |
1451 | caps++; | 1373 | caps++; |
1452 | if (sdvo_priv->caps.output_flags & | 1374 | if (intel_sdvo->caps.output_flags & |
1453 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) | 1375 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) |
1454 | caps++; | 1376 | caps++; |
1455 | if (sdvo_priv->caps.output_flags & | 1377 | if (intel_sdvo->caps.output_flags & |
1456 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) | 1378 | (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) |
1457 | caps++; | 1379 | caps++; |
1458 | if (sdvo_priv->caps.output_flags & | 1380 | if (intel_sdvo->caps.output_flags & |
1459 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) | 1381 | (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) |
1460 | caps++; | 1382 | caps++; |
1461 | 1383 | ||
1462 | if (sdvo_priv->caps.output_flags & | 1384 | if (intel_sdvo->caps.output_flags & |
1463 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) | 1385 | (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) |
1464 | caps++; | 1386 | caps++; |
1465 | 1387 | ||
1466 | if (sdvo_priv->caps.output_flags & | 1388 | if (intel_sdvo->caps.output_flags & |
1467 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) | 1389 | (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) |
1468 | caps++; | 1390 | caps++; |
1469 | 1391 | ||
@@ -1475,11 +1397,11 @@ intel_find_analog_connector(struct drm_device *dev) | |||
1475 | { | 1397 | { |
1476 | struct drm_connector *connector; | 1398 | struct drm_connector *connector; |
1477 | struct drm_encoder *encoder; | 1399 | struct drm_encoder *encoder; |
1478 | struct intel_encoder *intel_encoder; | 1400 | struct intel_sdvo *intel_sdvo; |
1479 | 1401 | ||
1480 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1402 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1481 | intel_encoder = enc_to_intel_encoder(encoder); | 1403 | intel_sdvo = enc_to_intel_sdvo(encoder); |
1482 | if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { | 1404 | if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { |
1483 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 1405 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1484 | if (encoder == intel_attached_encoder(connector)) | 1406 | if (encoder == intel_attached_encoder(connector)) |
1485 | return connector; | 1407 | return connector; |
@@ -1493,8 +1415,8 @@ static int | |||
1493 | intel_analog_is_connected(struct drm_device *dev) | 1415 | intel_analog_is_connected(struct drm_device *dev) |
1494 | { | 1416 | { |
1495 | struct drm_connector *analog_connector; | 1417 | struct drm_connector *analog_connector; |
1496 | analog_connector = intel_find_analog_connector(dev); | ||
1497 | 1418 | ||
1419 | analog_connector = intel_find_analog_connector(dev); | ||
1498 | if (!analog_connector) | 1420 | if (!analog_connector) |
1499 | return false; | 1421 | return false; |
1500 | 1422 | ||
@@ -1509,54 +1431,52 @@ enum drm_connector_status | |||
1509 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1431 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) |
1510 | { | 1432 | { |
1511 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1433 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1512 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1434 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1513 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1435 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1514 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1515 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1516 | enum drm_connector_status status = connector_status_connected; | 1436 | enum drm_connector_status status = connector_status_connected; |
1517 | struct edid *edid = NULL; | 1437 | struct edid *edid = NULL; |
1518 | 1438 | ||
1519 | edid = drm_get_edid(connector, intel_encoder->ddc_bus); | 1439 | edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); |
1520 | 1440 | ||
1521 | /* This is only applied to SDVO cards with multiple outputs */ | 1441 | /* This is only applied to SDVO cards with multiple outputs */ |
1522 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { | 1442 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { |
1523 | uint8_t saved_ddc, temp_ddc; | 1443 | uint8_t saved_ddc, temp_ddc; |
1524 | saved_ddc = sdvo_priv->ddc_bus; | 1444 | saved_ddc = intel_sdvo->ddc_bus; |
1525 | temp_ddc = sdvo_priv->ddc_bus >> 1; | 1445 | temp_ddc = intel_sdvo->ddc_bus >> 1; |
1526 | /* | 1446 | /* |
1527 | * Don't use the 1 as the argument of DDC bus switch to get | 1447 | * Don't use the 1 as the argument of DDC bus switch to get |
1528 | * the EDID. It is used for SDVO SPD ROM. | 1448 | * the EDID. It is used for SDVO SPD ROM. |
1529 | */ | 1449 | */ |
1530 | while(temp_ddc > 1) { | 1450 | while(temp_ddc > 1) { |
1531 | sdvo_priv->ddc_bus = temp_ddc; | 1451 | intel_sdvo->ddc_bus = temp_ddc; |
1532 | edid = drm_get_edid(connector, intel_encoder->ddc_bus); | 1452 | edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); |
1533 | if (edid) { | 1453 | if (edid) { |
1534 | /* | 1454 | /* |
1535 | * When we can get the EDID, maybe it is the | 1455 | * When we can get the EDID, maybe it is the |
1536 | * correct DDC bus. Update it. | 1456 | * correct DDC bus. Update it. |
1537 | */ | 1457 | */ |
1538 | sdvo_priv->ddc_bus = temp_ddc; | 1458 | intel_sdvo->ddc_bus = temp_ddc; |
1539 | break; | 1459 | break; |
1540 | } | 1460 | } |
1541 | temp_ddc >>= 1; | 1461 | temp_ddc >>= 1; |
1542 | } | 1462 | } |
1543 | if (edid == NULL) | 1463 | if (edid == NULL) |
1544 | sdvo_priv->ddc_bus = saved_ddc; | 1464 | intel_sdvo->ddc_bus = saved_ddc; |
1545 | } | 1465 | } |
1546 | /* when there is no edid and no monitor is connected with VGA | 1466 | /* when there is no edid and no monitor is connected with VGA |
1547 | * port, try to use the CRT ddc to read the EDID for DVI-connector | 1467 | * port, try to use the CRT ddc to read the EDID for DVI-connector |
1548 | */ | 1468 | */ |
1549 | if (edid == NULL && sdvo_priv->analog_ddc_bus && | 1469 | if (edid == NULL && intel_sdvo->analog_ddc_bus && |
1550 | !intel_analog_is_connected(connector->dev)) | 1470 | !intel_analog_is_connected(connector->dev)) |
1551 | edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); | 1471 | edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); |
1552 | 1472 | ||
1553 | if (edid != NULL) { | 1473 | if (edid != NULL) { |
1554 | bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); | 1474 | bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); |
1555 | bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); | 1475 | bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK); |
1556 | 1476 | ||
1557 | /* DDC bus is shared, match EDID to connector type */ | 1477 | /* DDC bus is shared, match EDID to connector type */ |
1558 | if (is_digital && need_digital) | 1478 | if (is_digital && need_digital) |
1559 | sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); | 1479 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); |
1560 | else if (is_digital != need_digital) | 1480 | else if (is_digital != need_digital) |
1561 | status = connector_status_disconnected; | 1481 | status = connector_status_disconnected; |
1562 | 1482 | ||
@@ -1572,33 +1492,29 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1572 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) | 1492 | static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) |
1573 | { | 1493 | { |
1574 | uint16_t response; | 1494 | uint16_t response; |
1575 | u8 status; | ||
1576 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1495 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1577 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1496 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1578 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1497 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1579 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1580 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1581 | enum drm_connector_status ret; | 1498 | enum drm_connector_status ret; |
1582 | 1499 | ||
1583 | intel_sdvo_write_cmd(intel_encoder, | 1500 | if (!intel_sdvo_write_cmd(intel_sdvo, |
1584 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); | 1501 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) |
1585 | if (sdvo_priv->is_tv) { | 1502 | return connector_status_unknown; |
1503 | if (intel_sdvo->is_tv) { | ||
1586 | /* add 30ms delay when the output type is SDVO-TV */ | 1504 | /* add 30ms delay when the output type is SDVO-TV */ |
1587 | mdelay(30); | 1505 | mdelay(30); |
1588 | } | 1506 | } |
1589 | status = intel_sdvo_read_response(intel_encoder, &response, 2); | 1507 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) |
1508 | return connector_status_unknown; | ||
1590 | 1509 | ||
1591 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); | 1510 | DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); |
1592 | 1511 | ||
1593 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
1594 | return connector_status_unknown; | ||
1595 | |||
1596 | if (response == 0) | 1512 | if (response == 0) |
1597 | return connector_status_disconnected; | 1513 | return connector_status_disconnected; |
1598 | 1514 | ||
1599 | sdvo_priv->attached_output = response; | 1515 | intel_sdvo->attached_output = response; |
1600 | 1516 | ||
1601 | if ((sdvo_connector->output_flag & response) == 0) | 1517 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1602 | ret = connector_status_disconnected; | 1518 | ret = connector_status_disconnected; |
1603 | else if (response & SDVO_TMDS_MASK) | 1519 | else if (response & SDVO_TMDS_MASK) |
1604 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1520 | ret = intel_sdvo_hdmi_sink_detect(connector); |
@@ -1607,16 +1523,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1607 | 1523 | ||
1608 | /* May update encoder flag for like clock for SDVO TV, etc.*/ | 1524 | /* May update encoder flag for like clock for SDVO TV, etc.*/ |
1609 | if (ret == connector_status_connected) { | 1525 | if (ret == connector_status_connected) { |
1610 | sdvo_priv->is_tv = false; | 1526 | intel_sdvo->is_tv = false; |
1611 | sdvo_priv->is_lvds = false; | 1527 | intel_sdvo->is_lvds = false; |
1612 | intel_encoder->needs_tv_clock = false; | 1528 | intel_sdvo->base.needs_tv_clock = false; |
1613 | 1529 | ||
1614 | if (response & SDVO_TV_MASK) { | 1530 | if (response & SDVO_TV_MASK) { |
1615 | sdvo_priv->is_tv = true; | 1531 | intel_sdvo->is_tv = true; |
1616 | intel_encoder->needs_tv_clock = true; | 1532 | intel_sdvo->base.needs_tv_clock = true; |
1617 | } | 1533 | } |
1618 | if (response & SDVO_LVDS_MASK) | 1534 | if (response & SDVO_LVDS_MASK) |
1619 | sdvo_priv->is_lvds = true; | 1535 | intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; |
1620 | } | 1536 | } |
1621 | 1537 | ||
1622 | return ret; | 1538 | return ret; |
@@ -1625,12 +1541,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect | |||
1625 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | 1541 | static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) |
1626 | { | 1542 | { |
1627 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1543 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1628 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1544 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1629 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1630 | int num_modes; | 1545 | int num_modes; |
1631 | 1546 | ||
1632 | /* set the bus switch and get the modes */ | 1547 | /* set the bus switch and get the modes */ |
1633 | num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 1548 | num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); |
1634 | 1549 | ||
1635 | /* | 1550 | /* |
1636 | * Mac mini hack. On this device, the DVI-I connector shares one DDC | 1551 | * Mac mini hack. On this device, the DVI-I connector shares one DDC |
@@ -1639,11 +1554,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1639 | * which case we'll look there for the digital DDC data. | 1554 | * which case we'll look there for the digital DDC data. |
1640 | */ | 1555 | */ |
1641 | if (num_modes == 0 && | 1556 | if (num_modes == 0 && |
1642 | sdvo_priv->analog_ddc_bus && | 1557 | intel_sdvo->analog_ddc_bus && |
1643 | !intel_analog_is_connected(connector->dev)) { | 1558 | !intel_analog_is_connected(connector->dev)) { |
1644 | /* Switch to the analog ddc bus and try that | 1559 | /* Switch to the analog ddc bus and try that |
1645 | */ | 1560 | */ |
1646 | (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); | 1561 | (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); |
1647 | } | 1562 | } |
1648 | } | 1563 | } |
1649 | 1564 | ||
@@ -1715,52 +1630,43 @@ struct drm_display_mode sdvo_tv_modes[] = { | |||
1715 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) | 1630 | static void intel_sdvo_get_tv_modes(struct drm_connector *connector) |
1716 | { | 1631 | { |
1717 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1632 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1718 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1633 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1719 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1720 | struct intel_sdvo_sdtv_resolution_request tv_res; | 1634 | struct intel_sdvo_sdtv_resolution_request tv_res; |
1721 | uint32_t reply = 0, format_map = 0; | 1635 | uint32_t reply = 0, format_map = 0; |
1722 | int i; | 1636 | int i; |
1723 | uint8_t status; | ||
1724 | |||
1725 | 1637 | ||
1726 | /* Read the list of supported input resolutions for the selected TV | 1638 | /* Read the list of supported input resolutions for the selected TV |
1727 | * format. | 1639 | * format. |
1728 | */ | 1640 | */ |
1729 | for (i = 0; i < TV_FORMAT_NUM; i++) | 1641 | format_map = 1 << intel_sdvo->tv_format_index; |
1730 | if (tv_format_names[i] == sdvo_priv->tv_format_name) | ||
1731 | break; | ||
1732 | |||
1733 | format_map = (1 << i); | ||
1734 | memcpy(&tv_res, &format_map, | 1642 | memcpy(&tv_res, &format_map, |
1735 | sizeof(struct intel_sdvo_sdtv_resolution_request) > | 1643 | min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); |
1736 | sizeof(format_map) ? sizeof(format_map) : | ||
1737 | sizeof(struct intel_sdvo_sdtv_resolution_request)); | ||
1738 | 1644 | ||
1739 | intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); | 1645 | if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) |
1646 | return; | ||
1740 | 1647 | ||
1741 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, | 1648 | BUILD_BUG_ON(sizeof(tv_res) != 3); |
1742 | &tv_res, sizeof(tv_res)); | 1649 | if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, |
1743 | status = intel_sdvo_read_response(intel_encoder, &reply, 3); | 1650 | &tv_res, sizeof(tv_res))) |
1744 | if (status != SDVO_CMD_STATUS_SUCCESS) | 1651 | return; |
1652 | if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) | ||
1745 | return; | 1653 | return; |
1746 | 1654 | ||
1747 | for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) | 1655 | for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) |
1748 | if (reply & (1 << i)) { | 1656 | if (reply & (1 << i)) { |
1749 | struct drm_display_mode *nmode; | 1657 | struct drm_display_mode *nmode; |
1750 | nmode = drm_mode_duplicate(connector->dev, | 1658 | nmode = drm_mode_duplicate(connector->dev, |
1751 | &sdvo_tv_modes[i]); | 1659 | &sdvo_tv_modes[i]); |
1752 | if (nmode) | 1660 | if (nmode) |
1753 | drm_mode_probed_add(connector, nmode); | 1661 | drm_mode_probed_add(connector, nmode); |
1754 | } | 1662 | } |
1755 | |||
1756 | } | 1663 | } |
1757 | 1664 | ||
1758 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | 1665 | static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) |
1759 | { | 1666 | { |
1760 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1667 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1761 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1668 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1762 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1669 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1763 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
1764 | struct drm_display_mode *newmode; | 1670 | struct drm_display_mode *newmode; |
1765 | 1671 | ||
1766 | /* | 1672 | /* |
@@ -1768,7 +1674,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1768 | * Assume that the preferred modes are | 1674 | * Assume that the preferred modes are |
1769 | * arranged in priority order. | 1675 | * arranged in priority order. |
1770 | */ | 1676 | */ |
1771 | intel_ddc_get_modes(connector, intel_encoder->ddc_bus); | 1677 | intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); |
1772 | if (list_empty(&connector->probed_modes) == false) | 1678 | if (list_empty(&connector->probed_modes) == false) |
1773 | goto end; | 1679 | goto end; |
1774 | 1680 | ||
@@ -1787,8 +1693,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) | |||
1787 | end: | 1693 | end: |
1788 | list_for_each_entry(newmode, &connector->probed_modes, head) { | 1694 | list_for_each_entry(newmode, &connector->probed_modes, head) { |
1789 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | 1695 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { |
1790 | sdvo_priv->sdvo_lvds_fixed_mode = | 1696 | intel_sdvo->sdvo_lvds_fixed_mode = |
1791 | drm_mode_duplicate(connector->dev, newmode); | 1697 | drm_mode_duplicate(connector->dev, newmode); |
1698 | intel_sdvo->is_lvds = true; | ||
1792 | break; | 1699 | break; |
1793 | } | 1700 | } |
1794 | } | 1701 | } |
@@ -1797,66 +1704,67 @@ end: | |||
1797 | 1704 | ||
1798 | static int intel_sdvo_get_modes(struct drm_connector *connector) | 1705 | static int intel_sdvo_get_modes(struct drm_connector *connector) |
1799 | { | 1706 | { |
1800 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1707 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1801 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1802 | 1708 | ||
1803 | if (IS_TV(sdvo_connector)) | 1709 | if (IS_TV(intel_sdvo_connector)) |
1804 | intel_sdvo_get_tv_modes(connector); | 1710 | intel_sdvo_get_tv_modes(connector); |
1805 | else if (IS_LVDS(sdvo_connector)) | 1711 | else if (IS_LVDS(intel_sdvo_connector)) |
1806 | intel_sdvo_get_lvds_modes(connector); | 1712 | intel_sdvo_get_lvds_modes(connector); |
1807 | else | 1713 | else |
1808 | intel_sdvo_get_ddc_modes(connector); | 1714 | intel_sdvo_get_ddc_modes(connector); |
1809 | 1715 | ||
1810 | if (list_empty(&connector->probed_modes)) | 1716 | return !list_empty(&connector->probed_modes); |
1811 | return 0; | ||
1812 | return 1; | ||
1813 | } | 1717 | } |
1814 | 1718 | ||
1815 | static | 1719 | static void |
1816 | void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) | 1720 | intel_sdvo_destroy_enhance_property(struct drm_connector *connector) |
1817 | { | 1721 | { |
1818 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1722 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1819 | struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; | ||
1820 | struct drm_device *dev = connector->dev; | 1723 | struct drm_device *dev = connector->dev; |
1821 | 1724 | ||
1822 | if (IS_TV(sdvo_priv)) { | 1725 | if (intel_sdvo_connector->left) |
1823 | if (sdvo_priv->left_property) | 1726 | drm_property_destroy(dev, intel_sdvo_connector->left); |
1824 | drm_property_destroy(dev, sdvo_priv->left_property); | 1727 | if (intel_sdvo_connector->right) |
1825 | if (sdvo_priv->right_property) | 1728 | drm_property_destroy(dev, intel_sdvo_connector->right); |
1826 | drm_property_destroy(dev, sdvo_priv->right_property); | 1729 | if (intel_sdvo_connector->top) |
1827 | if (sdvo_priv->top_property) | 1730 | drm_property_destroy(dev, intel_sdvo_connector->top); |
1828 | drm_property_destroy(dev, sdvo_priv->top_property); | 1731 | if (intel_sdvo_connector->bottom) |
1829 | if (sdvo_priv->bottom_property) | 1732 | drm_property_destroy(dev, intel_sdvo_connector->bottom); |
1830 | drm_property_destroy(dev, sdvo_priv->bottom_property); | 1733 | if (intel_sdvo_connector->hpos) |
1831 | if (sdvo_priv->hpos_property) | 1734 | drm_property_destroy(dev, intel_sdvo_connector->hpos); |
1832 | drm_property_destroy(dev, sdvo_priv->hpos_property); | 1735 | if (intel_sdvo_connector->vpos) |
1833 | if (sdvo_priv->vpos_property) | 1736 | drm_property_destroy(dev, intel_sdvo_connector->vpos); |
1834 | drm_property_destroy(dev, sdvo_priv->vpos_property); | 1737 | if (intel_sdvo_connector->saturation) |
1835 | if (sdvo_priv->saturation_property) | 1738 | drm_property_destroy(dev, intel_sdvo_connector->saturation); |
1836 | drm_property_destroy(dev, | 1739 | if (intel_sdvo_connector->contrast) |
1837 | sdvo_priv->saturation_property); | 1740 | drm_property_destroy(dev, intel_sdvo_connector->contrast); |
1838 | if (sdvo_priv->contrast_property) | 1741 | if (intel_sdvo_connector->hue) |
1839 | drm_property_destroy(dev, | 1742 | drm_property_destroy(dev, intel_sdvo_connector->hue); |
1840 | sdvo_priv->contrast_property); | 1743 | if (intel_sdvo_connector->sharpness) |
1841 | if (sdvo_priv->hue_property) | 1744 | drm_property_destroy(dev, intel_sdvo_connector->sharpness); |
1842 | drm_property_destroy(dev, sdvo_priv->hue_property); | 1745 | if (intel_sdvo_connector->flicker_filter) |
1843 | } | 1746 | drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); |
1844 | if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { | 1747 | if (intel_sdvo_connector->flicker_filter_2d) |
1845 | if (sdvo_priv->brightness_property) | 1748 | drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); |
1846 | drm_property_destroy(dev, | 1749 | if (intel_sdvo_connector->flicker_filter_adaptive) |
1847 | sdvo_priv->brightness_property); | 1750 | drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); |
1848 | } | 1751 | if (intel_sdvo_connector->tv_luma_filter) |
1849 | return; | 1752 | drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); |
1753 | if (intel_sdvo_connector->tv_chroma_filter) | ||
1754 | drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); | ||
1755 | if (intel_sdvo_connector->dot_crawl) | ||
1756 | drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); | ||
1757 | if (intel_sdvo_connector->brightness) | ||
1758 | drm_property_destroy(dev, intel_sdvo_connector->brightness); | ||
1850 | } | 1759 | } |
1851 | 1760 | ||
1852 | static void intel_sdvo_destroy(struct drm_connector *connector) | 1761 | static void intel_sdvo_destroy(struct drm_connector *connector) |
1853 | { | 1762 | { |
1854 | struct intel_connector *intel_connector = to_intel_connector(connector); | 1763 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1855 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1856 | 1764 | ||
1857 | if (sdvo_connector->tv_format_property) | 1765 | if (intel_sdvo_connector->tv_format) |
1858 | drm_property_destroy(connector->dev, | 1766 | drm_property_destroy(connector->dev, |
1859 | sdvo_connector->tv_format_property); | 1767 | intel_sdvo_connector->tv_format); |
1860 | 1768 | ||
1861 | intel_sdvo_destroy_enhance_property(connector); | 1769 | intel_sdvo_destroy_enhance_property(connector); |
1862 | drm_sysfs_connector_remove(connector); | 1770 | drm_sysfs_connector_remove(connector); |
@@ -1870,132 +1778,118 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1870 | uint64_t val) | 1778 | uint64_t val) |
1871 | { | 1779 | { |
1872 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1780 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1873 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1781 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
1874 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 1782 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1875 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1876 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
1877 | struct drm_crtc *crtc = encoder->crtc; | ||
1878 | int ret = 0; | ||
1879 | bool changed = false; | ||
1880 | uint8_t cmd, status; | ||
1881 | uint16_t temp_value; | 1783 | uint16_t temp_value; |
1784 | uint8_t cmd; | ||
1785 | int ret; | ||
1882 | 1786 | ||
1883 | ret = drm_connector_property_set_value(connector, property, val); | 1787 | ret = drm_connector_property_set_value(connector, property, val); |
1884 | if (ret < 0) | 1788 | if (ret) |
1885 | goto out; | 1789 | return ret; |
1790 | |||
1791 | #define CHECK_PROPERTY(name, NAME) \ | ||
1792 | if (intel_sdvo_connector->name == property) { \ | ||
1793 | if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ | ||
1794 | if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ | ||
1795 | cmd = SDVO_CMD_SET_##NAME; \ | ||
1796 | intel_sdvo_connector->cur_##name = temp_value; \ | ||
1797 | goto set_value; \ | ||
1798 | } | ||
1886 | 1799 | ||
1887 | if (property == sdvo_connector->tv_format_property) { | 1800 | if (property == intel_sdvo_connector->tv_format) { |
1888 | if (val >= TV_FORMAT_NUM) { | 1801 | if (val >= TV_FORMAT_NUM) |
1889 | ret = -EINVAL; | 1802 | return -EINVAL; |
1890 | goto out; | ||
1891 | } | ||
1892 | if (sdvo_priv->tv_format_name == | ||
1893 | sdvo_connector->tv_format_supported[val]) | ||
1894 | goto out; | ||
1895 | 1803 | ||
1896 | sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; | 1804 | if (intel_sdvo->tv_format_index == |
1897 | changed = true; | 1805 | intel_sdvo_connector->tv_format_supported[val]) |
1898 | } | 1806 | return 0; |
1899 | 1807 | ||
1900 | if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { | 1808 | intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; |
1901 | cmd = 0; | 1809 | goto done; |
1810 | } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { | ||
1902 | temp_value = val; | 1811 | temp_value = val; |
1903 | if (sdvo_connector->left_property == property) { | 1812 | if (intel_sdvo_connector->left == property) { |
1904 | drm_connector_property_set_value(connector, | 1813 | drm_connector_property_set_value(connector, |
1905 | sdvo_connector->right_property, val); | 1814 | intel_sdvo_connector->right, val); |
1906 | if (sdvo_connector->left_margin == temp_value) | 1815 | if (intel_sdvo_connector->left_margin == temp_value) |
1907 | goto out; | 1816 | return 0; |
1908 | 1817 | ||
1909 | sdvo_connector->left_margin = temp_value; | 1818 | intel_sdvo_connector->left_margin = temp_value; |
1910 | sdvo_connector->right_margin = temp_value; | 1819 | intel_sdvo_connector->right_margin = temp_value; |
1911 | temp_value = sdvo_connector->max_hscan - | 1820 | temp_value = intel_sdvo_connector->max_hscan - |
1912 | sdvo_connector->left_margin; | 1821 | intel_sdvo_connector->left_margin; |
1913 | cmd = SDVO_CMD_SET_OVERSCAN_H; | 1822 | cmd = SDVO_CMD_SET_OVERSCAN_H; |
1914 | } else if (sdvo_connector->right_property == property) { | 1823 | goto set_value; |
1824 | } else if (intel_sdvo_connector->right == property) { | ||
1915 | drm_connector_property_set_value(connector, | 1825 | drm_connector_property_set_value(connector, |
1916 | sdvo_connector->left_property, val); | 1826 | intel_sdvo_connector->left, val); |
1917 | if (sdvo_connector->right_margin == temp_value) | 1827 | if (intel_sdvo_connector->right_margin == temp_value) |
1918 | goto out; | 1828 | return 0; |
1919 | 1829 | ||
1920 | sdvo_connector->left_margin = temp_value; | 1830 | intel_sdvo_connector->left_margin = temp_value; |
1921 | sdvo_connector->right_margin = temp_value; | 1831 | intel_sdvo_connector->right_margin = temp_value; |
1922 | temp_value = sdvo_connector->max_hscan - | 1832 | temp_value = intel_sdvo_connector->max_hscan - |
1923 | sdvo_connector->left_margin; | 1833 | intel_sdvo_connector->left_margin; |
1924 | cmd = SDVO_CMD_SET_OVERSCAN_H; | 1834 | cmd = SDVO_CMD_SET_OVERSCAN_H; |
1925 | } else if (sdvo_connector->top_property == property) { | 1835 | goto set_value; |
1836 | } else if (intel_sdvo_connector->top == property) { | ||
1926 | drm_connector_property_set_value(connector, | 1837 | drm_connector_property_set_value(connector, |
1927 | sdvo_connector->bottom_property, val); | 1838 | intel_sdvo_connector->bottom, val); |
1928 | if (sdvo_connector->top_margin == temp_value) | 1839 | if (intel_sdvo_connector->top_margin == temp_value) |
1929 | goto out; | 1840 | return 0; |
1930 | 1841 | ||
1931 | sdvo_connector->top_margin = temp_value; | 1842 | intel_sdvo_connector->top_margin = temp_value; |
1932 | sdvo_connector->bottom_margin = temp_value; | 1843 | intel_sdvo_connector->bottom_margin = temp_value; |
1933 | temp_value = sdvo_connector->max_vscan - | 1844 | temp_value = intel_sdvo_connector->max_vscan - |
1934 | sdvo_connector->top_margin; | 1845 | intel_sdvo_connector->top_margin; |
1935 | cmd = SDVO_CMD_SET_OVERSCAN_V; | 1846 | cmd = SDVO_CMD_SET_OVERSCAN_V; |
1936 | } else if (sdvo_connector->bottom_property == property) { | 1847 | goto set_value; |
1848 | } else if (intel_sdvo_connector->bottom == property) { | ||
1937 | drm_connector_property_set_value(connector, | 1849 | drm_connector_property_set_value(connector, |
1938 | sdvo_connector->top_property, val); | 1850 | intel_sdvo_connector->top, val); |
1939 | if (sdvo_connector->bottom_margin == temp_value) | 1851 | if (intel_sdvo_connector->bottom_margin == temp_value) |
1940 | goto out; | 1852 | return 0; |
1941 | sdvo_connector->top_margin = temp_value; | 1853 | |
1942 | sdvo_connector->bottom_margin = temp_value; | 1854 | intel_sdvo_connector->top_margin = temp_value; |
1943 | temp_value = sdvo_connector->max_vscan - | 1855 | intel_sdvo_connector->bottom_margin = temp_value; |
1944 | sdvo_connector->top_margin; | 1856 | temp_value = intel_sdvo_connector->max_vscan - |
1857 | intel_sdvo_connector->top_margin; | ||
1945 | cmd = SDVO_CMD_SET_OVERSCAN_V; | 1858 | cmd = SDVO_CMD_SET_OVERSCAN_V; |
1946 | } else if (sdvo_connector->hpos_property == property) { | 1859 | goto set_value; |
1947 | if (sdvo_connector->cur_hpos == temp_value) | ||
1948 | goto out; | ||
1949 | |||
1950 | cmd = SDVO_CMD_SET_POSITION_H; | ||
1951 | sdvo_connector->cur_hpos = temp_value; | ||
1952 | } else if (sdvo_connector->vpos_property == property) { | ||
1953 | if (sdvo_connector->cur_vpos == temp_value) | ||
1954 | goto out; | ||
1955 | |||
1956 | cmd = SDVO_CMD_SET_POSITION_V; | ||
1957 | sdvo_connector->cur_vpos = temp_value; | ||
1958 | } else if (sdvo_connector->saturation_property == property) { | ||
1959 | if (sdvo_connector->cur_saturation == temp_value) | ||
1960 | goto out; | ||
1961 | |||
1962 | cmd = SDVO_CMD_SET_SATURATION; | ||
1963 | sdvo_connector->cur_saturation = temp_value; | ||
1964 | } else if (sdvo_connector->contrast_property == property) { | ||
1965 | if (sdvo_connector->cur_contrast == temp_value) | ||
1966 | goto out; | ||
1967 | |||
1968 | cmd = SDVO_CMD_SET_CONTRAST; | ||
1969 | sdvo_connector->cur_contrast = temp_value; | ||
1970 | } else if (sdvo_connector->hue_property == property) { | ||
1971 | if (sdvo_connector->cur_hue == temp_value) | ||
1972 | goto out; | ||
1973 | |||
1974 | cmd = SDVO_CMD_SET_HUE; | ||
1975 | sdvo_connector->cur_hue = temp_value; | ||
1976 | } else if (sdvo_connector->brightness_property == property) { | ||
1977 | if (sdvo_connector->cur_brightness == temp_value) | ||
1978 | goto out; | ||
1979 | |||
1980 | cmd = SDVO_CMD_SET_BRIGHTNESS; | ||
1981 | sdvo_connector->cur_brightness = temp_value; | ||
1982 | } | ||
1983 | if (cmd) { | ||
1984 | intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2); | ||
1985 | status = intel_sdvo_read_response(intel_encoder, | ||
1986 | NULL, 0); | ||
1987 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
1988 | DRM_DEBUG_KMS("Incorrect SDVO command \n"); | ||
1989 | return -EINVAL; | ||
1990 | } | ||
1991 | changed = true; | ||
1992 | } | 1860 | } |
1861 | CHECK_PROPERTY(hpos, HPOS) | ||
1862 | CHECK_PROPERTY(vpos, VPOS) | ||
1863 | CHECK_PROPERTY(saturation, SATURATION) | ||
1864 | CHECK_PROPERTY(contrast, CONTRAST) | ||
1865 | CHECK_PROPERTY(hue, HUE) | ||
1866 | CHECK_PROPERTY(brightness, BRIGHTNESS) | ||
1867 | CHECK_PROPERTY(sharpness, SHARPNESS) | ||
1868 | CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) | ||
1869 | CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) | ||
1870 | CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) | ||
1871 | CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) | ||
1872 | CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) | ||
1873 | CHECK_PROPERTY(dot_crawl, DOT_CRAWL) | ||
1993 | } | 1874 | } |
1994 | if (changed && crtc) | 1875 | |
1876 | return -EINVAL; /* unknown property */ | ||
1877 | |||
1878 | set_value: | ||
1879 | if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) | ||
1880 | return -EIO; | ||
1881 | |||
1882 | |||
1883 | done: | ||
1884 | if (encoder->crtc) { | ||
1885 | struct drm_crtc *crtc = encoder->crtc; | ||
1886 | |||
1995 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, | 1887 | drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, |
1996 | crtc->y, crtc->fb); | 1888 | crtc->y, crtc->fb); |
1997 | out: | 1889 | } |
1998 | return ret; | 1890 | |
1891 | return 0; | ||
1892 | #undef CHECK_PROPERTY | ||
1999 | } | 1893 | } |
2000 | 1894 | ||
2001 | static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { | 1895 | static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { |
@@ -2022,22 +1916,16 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs | |||
2022 | 1916 | ||
2023 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) | 1917 | static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) |
2024 | { | 1918 | { |
2025 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1919 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
2026 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2027 | 1920 | ||
2028 | if (intel_encoder->i2c_bus) | 1921 | if (intel_sdvo->analog_ddc_bus) |
2029 | intel_i2c_destroy(intel_encoder->i2c_bus); | 1922 | intel_i2c_destroy(intel_sdvo->analog_ddc_bus); |
2030 | if (intel_encoder->ddc_bus) | ||
2031 | intel_i2c_destroy(intel_encoder->ddc_bus); | ||
2032 | if (sdvo_priv->analog_ddc_bus) | ||
2033 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | ||
2034 | 1923 | ||
2035 | if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) | 1924 | if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) |
2036 | drm_mode_destroy(encoder->dev, | 1925 | drm_mode_destroy(encoder->dev, |
2037 | sdvo_priv->sdvo_lvds_fixed_mode); | 1926 | intel_sdvo->sdvo_lvds_fixed_mode); |
2038 | 1927 | ||
2039 | drm_encoder_cleanup(encoder); | 1928 | intel_encoder_destroy(encoder); |
2040 | kfree(intel_encoder); | ||
2041 | } | 1929 | } |
2042 | 1930 | ||
2043 | static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | 1931 | static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { |
@@ -2054,7 +1942,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { | |||
2054 | */ | 1942 | */ |
2055 | static void | 1943 | static void |
2056 | intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, | 1944 | intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, |
2057 | struct intel_sdvo_priv *sdvo, u32 reg) | 1945 | struct intel_sdvo *sdvo, u32 reg) |
2058 | { | 1946 | { |
2059 | struct sdvo_device_mapping *mapping; | 1947 | struct sdvo_device_mapping *mapping; |
2060 | 1948 | ||
@@ -2067,57 +1955,46 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, | |||
2067 | } | 1955 | } |
2068 | 1956 | ||
2069 | static bool | 1957 | static bool |
2070 | intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) | 1958 | intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) |
2071 | { | 1959 | { |
2072 | struct intel_sdvo_priv *sdvo_priv = output->dev_priv; | 1960 | return intel_sdvo_set_target_output(intel_sdvo, |
2073 | uint8_t status; | 1961 | device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && |
2074 | 1962 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, | |
2075 | if (device == 0) | 1963 | &intel_sdvo->is_hdmi, 1); |
2076 | intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0); | ||
2077 | else | ||
2078 | intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1); | ||
2079 | |||
2080 | intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0); | ||
2081 | status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1); | ||
2082 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
2083 | return false; | ||
2084 | return true; | ||
2085 | } | 1964 | } |
2086 | 1965 | ||
2087 | static struct intel_encoder * | 1966 | static struct intel_sdvo * |
2088 | intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) | 1967 | intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) |
2089 | { | 1968 | { |
2090 | struct drm_device *dev = chan->drm_dev; | 1969 | struct drm_device *dev = chan->drm_dev; |
2091 | struct drm_encoder *encoder; | 1970 | struct drm_encoder *encoder; |
2092 | struct intel_encoder *intel_encoder = NULL; | ||
2093 | 1971 | ||
2094 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1972 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
2095 | intel_encoder = enc_to_intel_encoder(encoder); | 1973 | struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); |
2096 | if (intel_encoder->ddc_bus == &chan->adapter) | 1974 | if (intel_sdvo->base.ddc_bus == &chan->adapter) |
2097 | break; | 1975 | return intel_sdvo; |
2098 | } | 1976 | } |
2099 | return intel_encoder; | 1977 | |
1978 | return NULL; | ||
2100 | } | 1979 | } |
2101 | 1980 | ||
2102 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, | 1981 | static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, |
2103 | struct i2c_msg msgs[], int num) | 1982 | struct i2c_msg msgs[], int num) |
2104 | { | 1983 | { |
2105 | struct intel_encoder *intel_encoder; | 1984 | struct intel_sdvo *intel_sdvo; |
2106 | struct intel_sdvo_priv *sdvo_priv; | ||
2107 | struct i2c_algo_bit_data *algo_data; | 1985 | struct i2c_algo_bit_data *algo_data; |
2108 | const struct i2c_algorithm *algo; | 1986 | const struct i2c_algorithm *algo; |
2109 | 1987 | ||
2110 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; | 1988 | algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; |
2111 | intel_encoder = | 1989 | intel_sdvo = |
2112 | intel_sdvo_chan_to_intel_encoder( | 1990 | intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *) |
2113 | (struct intel_i2c_chan *)(algo_data->data)); | 1991 | (algo_data->data)); |
2114 | if (intel_encoder == NULL) | 1992 | if (intel_sdvo == NULL) |
2115 | return -EINVAL; | 1993 | return -EINVAL; |
2116 | 1994 | ||
2117 | sdvo_priv = intel_encoder->dev_priv; | 1995 | algo = intel_sdvo->base.i2c_bus->algo; |
2118 | algo = intel_encoder->i2c_bus->algo; | ||
2119 | 1996 | ||
2120 | intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); | 1997 | intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); |
2121 | return algo->master_xfer(i2c_adap, msgs, num); | 1998 | return algo->master_xfer(i2c_adap, msgs, num); |
2122 | } | 1999 | } |
2123 | 2000 | ||
@@ -2162,27 +2039,9 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) | |||
2162 | return 0x72; | 2039 | return 0x72; |
2163 | } | 2040 | } |
2164 | 2041 | ||
2165 | static bool | ||
2166 | intel_sdvo_connector_alloc (struct intel_connector **ret) | ||
2167 | { | ||
2168 | struct intel_connector *intel_connector; | ||
2169 | struct intel_sdvo_connector *sdvo_connector; | ||
2170 | |||
2171 | *ret = kzalloc(sizeof(*intel_connector) + | ||
2172 | sizeof(*sdvo_connector), GFP_KERNEL); | ||
2173 | if (!*ret) | ||
2174 | return false; | ||
2175 | |||
2176 | intel_connector = *ret; | ||
2177 | sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1); | ||
2178 | intel_connector->dev_priv = sdvo_connector; | ||
2179 | |||
2180 | return true; | ||
2181 | } | ||
2182 | |||
2183 | static void | 2042 | static void |
2184 | intel_sdvo_connector_create (struct drm_encoder *encoder, | 2043 | intel_sdvo_connector_init(struct drm_encoder *encoder, |
2185 | struct drm_connector *connector) | 2044 | struct drm_connector *connector) |
2186 | { | 2045 | { |
2187 | drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, | 2046 | drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, |
2188 | connector->connector_type); | 2047 | connector->connector_type); |
@@ -2198,582 +2057,470 @@ intel_sdvo_connector_create (struct drm_encoder *encoder, | |||
2198 | } | 2057 | } |
2199 | 2058 | ||
2200 | static bool | 2059 | static bool |
2201 | intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) | 2060 | intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) |
2202 | { | 2061 | { |
2203 | struct drm_encoder *encoder = &intel_encoder->enc; | 2062 | struct drm_encoder *encoder = &intel_sdvo->base.enc; |
2204 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2205 | struct drm_connector *connector; | 2063 | struct drm_connector *connector; |
2206 | struct intel_connector *intel_connector; | 2064 | struct intel_connector *intel_connector; |
2207 | struct intel_sdvo_connector *sdvo_connector; | 2065 | struct intel_sdvo_connector *intel_sdvo_connector; |
2208 | 2066 | ||
2209 | if (!intel_sdvo_connector_alloc(&intel_connector)) | 2067 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2068 | if (!intel_sdvo_connector) | ||
2210 | return false; | 2069 | return false; |
2211 | 2070 | ||
2212 | sdvo_connector = intel_connector->dev_priv; | ||
2213 | |||
2214 | if (device == 0) { | 2071 | if (device == 0) { |
2215 | sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; | 2072 | intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; |
2216 | sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; | 2073 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; |
2217 | } else if (device == 1) { | 2074 | } else if (device == 1) { |
2218 | sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; | 2075 | intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; |
2219 | sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; | 2076 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; |
2220 | } | 2077 | } |
2221 | 2078 | ||
2079 | intel_connector = &intel_sdvo_connector->base; | ||
2222 | connector = &intel_connector->base; | 2080 | connector = &intel_connector->base; |
2223 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2081 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; |
2224 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2082 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2225 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2083 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2226 | 2084 | ||
2227 | if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) | 2085 | if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) |
2228 | && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) | 2086 | && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) |
2229 | && sdvo_priv->is_hdmi) { | 2087 | && intel_sdvo->is_hdmi) { |
2230 | /* enable hdmi encoding mode if supported */ | 2088 | /* enable hdmi encoding mode if supported */ |
2231 | intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); | 2089 | intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
2232 | intel_sdvo_set_colorimetry(intel_encoder, | 2090 | intel_sdvo_set_colorimetry(intel_sdvo, |
2233 | SDVO_COLORIMETRY_RGB256); | 2091 | SDVO_COLORIMETRY_RGB256); |
2234 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2092 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2235 | } | 2093 | } |
2236 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2094 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2237 | (1 << INTEL_ANALOG_CLONE_BIT); | 2095 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2238 | 2096 | ||
2239 | intel_sdvo_connector_create(encoder, connector); | 2097 | intel_sdvo_connector_init(encoder, connector); |
2240 | 2098 | ||
2241 | return true; | 2099 | return true; |
2242 | } | 2100 | } |
2243 | 2101 | ||
2244 | static bool | 2102 | static bool |
2245 | intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) | 2103 | intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) |
2246 | { | 2104 | { |
2247 | struct drm_encoder *encoder = &intel_encoder->enc; | 2105 | struct drm_encoder *encoder = &intel_sdvo->base.enc; |
2248 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2249 | struct drm_connector *connector; | 2106 | struct drm_connector *connector; |
2250 | struct intel_connector *intel_connector; | 2107 | struct intel_connector *intel_connector; |
2251 | struct intel_sdvo_connector *sdvo_connector; | 2108 | struct intel_sdvo_connector *intel_sdvo_connector; |
2252 | 2109 | ||
2253 | if (!intel_sdvo_connector_alloc(&intel_connector)) | 2110 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2254 | return false; | 2111 | if (!intel_sdvo_connector) |
2112 | return false; | ||
2255 | 2113 | ||
2114 | intel_connector = &intel_sdvo_connector->base; | ||
2256 | connector = &intel_connector->base; | 2115 | connector = &intel_connector->base; |
2257 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | 2116 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; |
2258 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | 2117 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; |
2259 | sdvo_connector = intel_connector->dev_priv; | ||
2260 | 2118 | ||
2261 | sdvo_priv->controlled_output |= type; | 2119 | intel_sdvo->controlled_output |= type; |
2262 | sdvo_connector->output_flag = type; | 2120 | intel_sdvo_connector->output_flag = type; |
2263 | 2121 | ||
2264 | sdvo_priv->is_tv = true; | 2122 | intel_sdvo->is_tv = true; |
2265 | intel_encoder->needs_tv_clock = true; | 2123 | intel_sdvo->base.needs_tv_clock = true; |
2266 | intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | 2124 | intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; |
2267 | 2125 | ||
2268 | intel_sdvo_connector_create(encoder, connector); | 2126 | intel_sdvo_connector_init(encoder, connector); |
2269 | 2127 | ||
2270 | intel_sdvo_tv_create_property(connector, type); | 2128 | if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) |
2129 | goto err; | ||
2271 | 2130 | ||
2272 | intel_sdvo_create_enhance_property(connector); | 2131 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
2132 | goto err; | ||
2273 | 2133 | ||
2274 | return true; | 2134 | return true; |
2135 | |||
2136 | err: | ||
2137 | intel_sdvo_destroy_enhance_property(connector); | ||
2138 | kfree(intel_sdvo_connector); | ||
2139 | return false; | ||
2275 | } | 2140 | } |
2276 | 2141 | ||
2277 | static bool | 2142 | static bool |
2278 | intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) | 2143 | intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) |
2279 | { | 2144 | { |
2280 | struct drm_encoder *encoder = &intel_encoder->enc; | 2145 | struct drm_encoder *encoder = &intel_sdvo->base.enc; |
2281 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2282 | struct drm_connector *connector; | 2146 | struct drm_connector *connector; |
2283 | struct intel_connector *intel_connector; | 2147 | struct intel_connector *intel_connector; |
2284 | struct intel_sdvo_connector *sdvo_connector; | 2148 | struct intel_sdvo_connector *intel_sdvo_connector; |
2285 | 2149 | ||
2286 | if (!intel_sdvo_connector_alloc(&intel_connector)) | 2150 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2287 | return false; | 2151 | if (!intel_sdvo_connector) |
2152 | return false; | ||
2288 | 2153 | ||
2154 | intel_connector = &intel_sdvo_connector->base; | ||
2289 | connector = &intel_connector->base; | 2155 | connector = &intel_connector->base; |
2290 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 2156 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
2291 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2157 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2292 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2158 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2293 | sdvo_connector = intel_connector->dev_priv; | ||
2294 | 2159 | ||
2295 | if (device == 0) { | 2160 | if (device == 0) { |
2296 | sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; | 2161 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; |
2297 | sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; | 2162 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; |
2298 | } else if (device == 1) { | 2163 | } else if (device == 1) { |
2299 | sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; | 2164 | intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; |
2300 | sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; | 2165 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; |
2301 | } | 2166 | } |
2302 | 2167 | ||
2303 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2168 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2304 | (1 << INTEL_ANALOG_CLONE_BIT); | 2169 | (1 << INTEL_ANALOG_CLONE_BIT)); |
2305 | 2170 | ||
2306 | intel_sdvo_connector_create(encoder, connector); | 2171 | intel_sdvo_connector_init(encoder, connector); |
2307 | return true; | 2172 | return true; |
2308 | } | 2173 | } |
2309 | 2174 | ||
2310 | static bool | 2175 | static bool |
2311 | intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) | 2176 | intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) |
2312 | { | 2177 | { |
2313 | struct drm_encoder *encoder = &intel_encoder->enc; | 2178 | struct drm_encoder *encoder = &intel_sdvo->base.enc; |
2314 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2315 | struct drm_connector *connector; | 2179 | struct drm_connector *connector; |
2316 | struct intel_connector *intel_connector; | 2180 | struct intel_connector *intel_connector; |
2317 | struct intel_sdvo_connector *sdvo_connector; | 2181 | struct intel_sdvo_connector *intel_sdvo_connector; |
2318 | 2182 | ||
2319 | if (!intel_sdvo_connector_alloc(&intel_connector)) | 2183 | intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); |
2320 | return false; | 2184 | if (!intel_sdvo_connector) |
2185 | return false; | ||
2321 | 2186 | ||
2322 | connector = &intel_connector->base; | 2187 | intel_connector = &intel_sdvo_connector->base; |
2188 | connector = &intel_connector->base; | ||
2323 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; | 2189 | encoder->encoder_type = DRM_MODE_ENCODER_LVDS; |
2324 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; | 2190 | connector->connector_type = DRM_MODE_CONNECTOR_LVDS; |
2325 | sdvo_connector = intel_connector->dev_priv; | ||
2326 | |||
2327 | sdvo_priv->is_lvds = true; | ||
2328 | 2191 | ||
2329 | if (device == 0) { | 2192 | if (device == 0) { |
2330 | sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; | 2193 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; |
2331 | sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; | 2194 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; |
2332 | } else if (device == 1) { | 2195 | } else if (device == 1) { |
2333 | sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; | 2196 | intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; |
2334 | sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; | 2197 | intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; |
2335 | } | 2198 | } |
2336 | 2199 | ||
2337 | intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | | 2200 | intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | |
2338 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 2201 | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); |
2339 | 2202 | ||
2340 | intel_sdvo_connector_create(encoder, connector); | 2203 | intel_sdvo_connector_init(encoder, connector); |
2341 | intel_sdvo_create_enhance_property(connector); | 2204 | if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) |
2342 | return true; | 2205 | goto err; |
2206 | |||
2207 | return true; | ||
2208 | |||
2209 | err: | ||
2210 | intel_sdvo_destroy_enhance_property(connector); | ||
2211 | kfree(intel_sdvo_connector); | ||
2212 | return false; | ||
2343 | } | 2213 | } |
2344 | 2214 | ||
2345 | static bool | 2215 | static bool |
2346 | intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) | 2216 | intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) |
2347 | { | 2217 | { |
2348 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | 2218 | intel_sdvo->is_tv = false; |
2349 | 2219 | intel_sdvo->base.needs_tv_clock = false; | |
2350 | sdvo_priv->is_tv = false; | 2220 | intel_sdvo->is_lvds = false; |
2351 | intel_encoder->needs_tv_clock = false; | ||
2352 | sdvo_priv->is_lvds = false; | ||
2353 | 2221 | ||
2354 | /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ | 2222 | /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ |
2355 | 2223 | ||
2356 | if (flags & SDVO_OUTPUT_TMDS0) | 2224 | if (flags & SDVO_OUTPUT_TMDS0) |
2357 | if (!intel_sdvo_dvi_init(intel_encoder, 0)) | 2225 | if (!intel_sdvo_dvi_init(intel_sdvo, 0)) |
2358 | return false; | 2226 | return false; |
2359 | 2227 | ||
2360 | if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) | 2228 | if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) |
2361 | if (!intel_sdvo_dvi_init(intel_encoder, 1)) | 2229 | if (!intel_sdvo_dvi_init(intel_sdvo, 1)) |
2362 | return false; | 2230 | return false; |
2363 | 2231 | ||
2364 | /* TV has no XXX1 function block */ | 2232 | /* TV has no XXX1 function block */ |
2365 | if (flags & SDVO_OUTPUT_SVID0) | 2233 | if (flags & SDVO_OUTPUT_SVID0) |
2366 | if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) | 2234 | if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) |
2367 | return false; | 2235 | return false; |
2368 | 2236 | ||
2369 | if (flags & SDVO_OUTPUT_CVBS0) | 2237 | if (flags & SDVO_OUTPUT_CVBS0) |
2370 | if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) | 2238 | if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) |
2371 | return false; | 2239 | return false; |
2372 | 2240 | ||
2373 | if (flags & SDVO_OUTPUT_RGB0) | 2241 | if (flags & SDVO_OUTPUT_RGB0) |
2374 | if (!intel_sdvo_analog_init(intel_encoder, 0)) | 2242 | if (!intel_sdvo_analog_init(intel_sdvo, 0)) |
2375 | return false; | 2243 | return false; |
2376 | 2244 | ||
2377 | if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) | 2245 | if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) |
2378 | if (!intel_sdvo_analog_init(intel_encoder, 1)) | 2246 | if (!intel_sdvo_analog_init(intel_sdvo, 1)) |
2379 | return false; | 2247 | return false; |
2380 | 2248 | ||
2381 | if (flags & SDVO_OUTPUT_LVDS0) | 2249 | if (flags & SDVO_OUTPUT_LVDS0) |
2382 | if (!intel_sdvo_lvds_init(intel_encoder, 0)) | 2250 | if (!intel_sdvo_lvds_init(intel_sdvo, 0)) |
2383 | return false; | 2251 | return false; |
2384 | 2252 | ||
2385 | if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) | 2253 | if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) |
2386 | if (!intel_sdvo_lvds_init(intel_encoder, 1)) | 2254 | if (!intel_sdvo_lvds_init(intel_sdvo, 1)) |
2387 | return false; | 2255 | return false; |
2388 | 2256 | ||
2389 | if ((flags & SDVO_OUTPUT_MASK) == 0) { | 2257 | if ((flags & SDVO_OUTPUT_MASK) == 0) { |
2390 | unsigned char bytes[2]; | 2258 | unsigned char bytes[2]; |
2391 | 2259 | ||
2392 | sdvo_priv->controlled_output = 0; | 2260 | intel_sdvo->controlled_output = 0; |
2393 | memcpy(bytes, &sdvo_priv->caps.output_flags, 2); | 2261 | memcpy(bytes, &intel_sdvo->caps.output_flags, 2); |
2394 | DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", | 2262 | DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", |
2395 | SDVO_NAME(sdvo_priv), | 2263 | SDVO_NAME(intel_sdvo), |
2396 | bytes[0], bytes[1]); | 2264 | bytes[0], bytes[1]); |
2397 | return false; | 2265 | return false; |
2398 | } | 2266 | } |
2399 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 2267 | intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1); |
2400 | 2268 | ||
2401 | return true; | 2269 | return true; |
2402 | } | 2270 | } |
2403 | 2271 | ||
2404 | static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) | 2272 | static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, |
2273 | struct intel_sdvo_connector *intel_sdvo_connector, | ||
2274 | int type) | ||
2405 | { | 2275 | { |
2406 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 2276 | struct drm_device *dev = intel_sdvo->base.enc.dev; |
2407 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
2408 | struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; | ||
2409 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2410 | struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv; | ||
2411 | struct intel_sdvo_tv_format format; | 2277 | struct intel_sdvo_tv_format format; |
2412 | uint32_t format_map, i; | 2278 | uint32_t format_map, i; |
2413 | uint8_t status; | ||
2414 | 2279 | ||
2415 | intel_sdvo_set_target_output(intel_encoder, type); | 2280 | if (!intel_sdvo_set_target_output(intel_sdvo, type)) |
2281 | return false; | ||
2416 | 2282 | ||
2417 | intel_sdvo_write_cmd(intel_encoder, | 2283 | if (!intel_sdvo_get_value(intel_sdvo, |
2418 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); | 2284 | SDVO_CMD_GET_SUPPORTED_TV_FORMATS, |
2419 | status = intel_sdvo_read_response(intel_encoder, | 2285 | &format, sizeof(format))) |
2420 | &format, sizeof(format)); | 2286 | return false; |
2421 | if (status != SDVO_CMD_STATUS_SUCCESS) | ||
2422 | return; | ||
2423 | 2287 | ||
2424 | memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ? | 2288 | memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); |
2425 | sizeof(format_map) : sizeof(format)); | ||
2426 | 2289 | ||
2427 | if (format_map == 0) | 2290 | if (format_map == 0) |
2428 | return; | 2291 | return false; |
2429 | 2292 | ||
2430 | sdvo_connector->format_supported_num = 0; | 2293 | intel_sdvo_connector->format_supported_num = 0; |
2431 | for (i = 0 ; i < TV_FORMAT_NUM; i++) | 2294 | for (i = 0 ; i < TV_FORMAT_NUM; i++) |
2432 | if (format_map & (1 << i)) { | 2295 | if (format_map & (1 << i)) |
2433 | sdvo_connector->tv_format_supported | 2296 | intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; |
2434 | [sdvo_connector->format_supported_num++] = | ||
2435 | tv_format_names[i]; | ||
2436 | } | ||
2437 | 2297 | ||
2438 | 2298 | ||
2439 | sdvo_connector->tv_format_property = | 2299 | intel_sdvo_connector->tv_format = |
2440 | drm_property_create( | 2300 | drm_property_create(dev, DRM_MODE_PROP_ENUM, |
2441 | connector->dev, DRM_MODE_PROP_ENUM, | 2301 | "mode", intel_sdvo_connector->format_supported_num); |
2442 | "mode", sdvo_connector->format_supported_num); | 2302 | if (!intel_sdvo_connector->tv_format) |
2303 | return false; | ||
2443 | 2304 | ||
2444 | for (i = 0; i < sdvo_connector->format_supported_num; i++) | 2305 | for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) |
2445 | drm_property_add_enum( | 2306 | drm_property_add_enum( |
2446 | sdvo_connector->tv_format_property, i, | 2307 | intel_sdvo_connector->tv_format, i, |
2447 | i, sdvo_connector->tv_format_supported[i]); | 2308 | i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); |
2448 | 2309 | ||
2449 | sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; | 2310 | intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; |
2450 | drm_connector_attach_property( | 2311 | drm_connector_attach_property(&intel_sdvo_connector->base.base, |
2451 | connector, sdvo_connector->tv_format_property, 0); | 2312 | intel_sdvo_connector->tv_format, 0); |
2313 | return true; | ||
2452 | 2314 | ||
2453 | } | 2315 | } |
2454 | 2316 | ||
2455 | static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | 2317 | #define ENHANCEMENT(name, NAME) do { \ |
2318 | if (enhancements.name) { \ | ||
2319 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ | ||
2320 | !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ | ||
2321 | return false; \ | ||
2322 | intel_sdvo_connector->max_##name = data_value[0]; \ | ||
2323 | intel_sdvo_connector->cur_##name = response; \ | ||
2324 | intel_sdvo_connector->name = \ | ||
2325 | drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \ | ||
2326 | if (!intel_sdvo_connector->name) return false; \ | ||
2327 | intel_sdvo_connector->name->values[0] = 0; \ | ||
2328 | intel_sdvo_connector->name->values[1] = data_value[0]; \ | ||
2329 | drm_connector_attach_property(connector, \ | ||
2330 | intel_sdvo_connector->name, \ | ||
2331 | intel_sdvo_connector->cur_##name); \ | ||
2332 | DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ | ||
2333 | data_value[0], data_value[1], response); \ | ||
2334 | } \ | ||
2335 | } while(0) | ||
2336 | |||
2337 | static bool | ||
2338 | intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, | ||
2339 | struct intel_sdvo_connector *intel_sdvo_connector, | ||
2340 | struct intel_sdvo_enhancements_reply enhancements) | ||
2456 | { | 2341 | { |
2457 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 2342 | struct drm_device *dev = intel_sdvo->base.enc.dev; |
2458 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 2343 | struct drm_connector *connector = &intel_sdvo_connector->base.base; |
2459 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
2460 | struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv; | ||
2461 | struct intel_sdvo_enhancements_reply sdvo_data; | ||
2462 | struct drm_device *dev = connector->dev; | ||
2463 | uint8_t status; | ||
2464 | uint16_t response, data_value[2]; | 2344 | uint16_t response, data_value[2]; |
2465 | 2345 | ||
2466 | intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, | 2346 | /* when horizontal overscan is supported, Add the left/right property */ |
2467 | NULL, 0); | 2347 | if (enhancements.overscan_h) { |
2468 | status = intel_sdvo_read_response(intel_encoder, &sdvo_data, | 2348 | if (!intel_sdvo_get_value(intel_sdvo, |
2469 | sizeof(sdvo_data)); | 2349 | SDVO_CMD_GET_MAX_OVERSCAN_H, |
2470 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2350 | &data_value, 4)) |
2471 | DRM_DEBUG_KMS(" incorrect response is returned\n"); | 2351 | return false; |
2472 | return; | 2352 | |
2353 | if (!intel_sdvo_get_value(intel_sdvo, | ||
2354 | SDVO_CMD_GET_OVERSCAN_H, | ||
2355 | &response, 2)) | ||
2356 | return false; | ||
2357 | |||
2358 | intel_sdvo_connector->max_hscan = data_value[0]; | ||
2359 | intel_sdvo_connector->left_margin = data_value[0] - response; | ||
2360 | intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; | ||
2361 | intel_sdvo_connector->left = | ||
2362 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2363 | "left_margin", 2); | ||
2364 | if (!intel_sdvo_connector->left) | ||
2365 | return false; | ||
2366 | |||
2367 | intel_sdvo_connector->left->values[0] = 0; | ||
2368 | intel_sdvo_connector->left->values[1] = data_value[0]; | ||
2369 | drm_connector_attach_property(connector, | ||
2370 | intel_sdvo_connector->left, | ||
2371 | intel_sdvo_connector->left_margin); | ||
2372 | |||
2373 | intel_sdvo_connector->right = | ||
2374 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2375 | "right_margin", 2); | ||
2376 | if (!intel_sdvo_connector->right) | ||
2377 | return false; | ||
2378 | |||
2379 | intel_sdvo_connector->right->values[0] = 0; | ||
2380 | intel_sdvo_connector->right->values[1] = data_value[0]; | ||
2381 | drm_connector_attach_property(connector, | ||
2382 | intel_sdvo_connector->right, | ||
2383 | intel_sdvo_connector->right_margin); | ||
2384 | DRM_DEBUG_KMS("h_overscan: max %d, " | ||
2385 | "default %d, current %d\n", | ||
2386 | data_value[0], data_value[1], response); | ||
2473 | } | 2387 | } |
2474 | response = *((uint16_t *)&sdvo_data); | 2388 | |
2475 | if (!response) { | 2389 | if (enhancements.overscan_v) { |
2476 | DRM_DEBUG_KMS("No enhancement is supported\n"); | 2390 | if (!intel_sdvo_get_value(intel_sdvo, |
2477 | return; | 2391 | SDVO_CMD_GET_MAX_OVERSCAN_V, |
2392 | &data_value, 4)) | ||
2393 | return false; | ||
2394 | |||
2395 | if (!intel_sdvo_get_value(intel_sdvo, | ||
2396 | SDVO_CMD_GET_OVERSCAN_V, | ||
2397 | &response, 2)) | ||
2398 | return false; | ||
2399 | |||
2400 | intel_sdvo_connector->max_vscan = data_value[0]; | ||
2401 | intel_sdvo_connector->top_margin = data_value[0] - response; | ||
2402 | intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; | ||
2403 | intel_sdvo_connector->top = | ||
2404 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2405 | "top_margin", 2); | ||
2406 | if (!intel_sdvo_connector->top) | ||
2407 | return false; | ||
2408 | |||
2409 | intel_sdvo_connector->top->values[0] = 0; | ||
2410 | intel_sdvo_connector->top->values[1] = data_value[0]; | ||
2411 | drm_connector_attach_property(connector, | ||
2412 | intel_sdvo_connector->top, | ||
2413 | intel_sdvo_connector->top_margin); | ||
2414 | |||
2415 | intel_sdvo_connector->bottom = | ||
2416 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2417 | "bottom_margin", 2); | ||
2418 | if (!intel_sdvo_connector->bottom) | ||
2419 | return false; | ||
2420 | |||
2421 | intel_sdvo_connector->bottom->values[0] = 0; | ||
2422 | intel_sdvo_connector->bottom->values[1] = data_value[0]; | ||
2423 | drm_connector_attach_property(connector, | ||
2424 | intel_sdvo_connector->bottom, | ||
2425 | intel_sdvo_connector->bottom_margin); | ||
2426 | DRM_DEBUG_KMS("v_overscan: max %d, " | ||
2427 | "default %d, current %d\n", | ||
2428 | data_value[0], data_value[1], response); | ||
2478 | } | 2429 | } |
2479 | if (IS_TV(sdvo_priv)) { | 2430 | |
2480 | /* when horizontal overscan is supported, Add the left/right | 2431 | ENHANCEMENT(hpos, HPOS); |
2481 | * property | 2432 | ENHANCEMENT(vpos, VPOS); |
2482 | */ | 2433 | ENHANCEMENT(saturation, SATURATION); |
2483 | if (sdvo_data.overscan_h) { | 2434 | ENHANCEMENT(contrast, CONTRAST); |
2484 | intel_sdvo_write_cmd(intel_encoder, | 2435 | ENHANCEMENT(hue, HUE); |
2485 | SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); | 2436 | ENHANCEMENT(sharpness, SHARPNESS); |
2486 | status = intel_sdvo_read_response(intel_encoder, | 2437 | ENHANCEMENT(brightness, BRIGHTNESS); |
2487 | &data_value, 4); | 2438 | ENHANCEMENT(flicker_filter, FLICKER_FILTER); |
2488 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2439 | ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); |
2489 | DRM_DEBUG_KMS("Incorrect SDVO max " | 2440 | ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); |
2490 | "h_overscan\n"); | 2441 | ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); |
2491 | return; | 2442 | ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); |
2492 | } | 2443 | |
2493 | intel_sdvo_write_cmd(intel_encoder, | 2444 | if (enhancements.dot_crawl) { |
2494 | SDVO_CMD_GET_OVERSCAN_H, NULL, 0); | 2445 | if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) |
2495 | status = intel_sdvo_read_response(intel_encoder, | 2446 | return false; |
2496 | &response, 2); | 2447 | |
2497 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2448 | intel_sdvo_connector->max_dot_crawl = 1; |
2498 | DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); | 2449 | intel_sdvo_connector->cur_dot_crawl = response & 0x1; |
2499 | return; | 2450 | intel_sdvo_connector->dot_crawl = |
2500 | } | 2451 | drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2); |
2501 | sdvo_priv->max_hscan = data_value[0]; | 2452 | if (!intel_sdvo_connector->dot_crawl) |
2502 | sdvo_priv->left_margin = data_value[0] - response; | 2453 | return false; |
2503 | sdvo_priv->right_margin = sdvo_priv->left_margin; | 2454 | |
2504 | sdvo_priv->left_property = | 2455 | intel_sdvo_connector->dot_crawl->values[0] = 0; |
2505 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2456 | intel_sdvo_connector->dot_crawl->values[1] = 1; |
2506 | "left_margin", 2); | 2457 | drm_connector_attach_property(connector, |
2507 | sdvo_priv->left_property->values[0] = 0; | 2458 | intel_sdvo_connector->dot_crawl, |
2508 | sdvo_priv->left_property->values[1] = data_value[0]; | 2459 | intel_sdvo_connector->cur_dot_crawl); |
2509 | drm_connector_attach_property(connector, | 2460 | DRM_DEBUG_KMS("dot crawl: current %d\n", response); |
2510 | sdvo_priv->left_property, | ||
2511 | sdvo_priv->left_margin); | ||
2512 | sdvo_priv->right_property = | ||
2513 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2514 | "right_margin", 2); | ||
2515 | sdvo_priv->right_property->values[0] = 0; | ||
2516 | sdvo_priv->right_property->values[1] = data_value[0]; | ||
2517 | drm_connector_attach_property(connector, | ||
2518 | sdvo_priv->right_property, | ||
2519 | sdvo_priv->right_margin); | ||
2520 | DRM_DEBUG_KMS("h_overscan: max %d, " | ||
2521 | "default %d, current %d\n", | ||
2522 | data_value[0], data_value[1], response); | ||
2523 | } | ||
2524 | if (sdvo_data.overscan_v) { | ||
2525 | intel_sdvo_write_cmd(intel_encoder, | ||
2526 | SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); | ||
2527 | status = intel_sdvo_read_response(intel_encoder, | ||
2528 | &data_value, 4); | ||
2529 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2530 | DRM_DEBUG_KMS("Incorrect SDVO max " | ||
2531 | "v_overscan\n"); | ||
2532 | return; | ||
2533 | } | ||
2534 | intel_sdvo_write_cmd(intel_encoder, | ||
2535 | SDVO_CMD_GET_OVERSCAN_V, NULL, 0); | ||
2536 | status = intel_sdvo_read_response(intel_encoder, | ||
2537 | &response, 2); | ||
2538 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2539 | DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); | ||
2540 | return; | ||
2541 | } | ||
2542 | sdvo_priv->max_vscan = data_value[0]; | ||
2543 | sdvo_priv->top_margin = data_value[0] - response; | ||
2544 | sdvo_priv->bottom_margin = sdvo_priv->top_margin; | ||
2545 | sdvo_priv->top_property = | ||
2546 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2547 | "top_margin", 2); | ||
2548 | sdvo_priv->top_property->values[0] = 0; | ||
2549 | sdvo_priv->top_property->values[1] = data_value[0]; | ||
2550 | drm_connector_attach_property(connector, | ||
2551 | sdvo_priv->top_property, | ||
2552 | sdvo_priv->top_margin); | ||
2553 | sdvo_priv->bottom_property = | ||
2554 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2555 | "bottom_margin", 2); | ||
2556 | sdvo_priv->bottom_property->values[0] = 0; | ||
2557 | sdvo_priv->bottom_property->values[1] = data_value[0]; | ||
2558 | drm_connector_attach_property(connector, | ||
2559 | sdvo_priv->bottom_property, | ||
2560 | sdvo_priv->bottom_margin); | ||
2561 | DRM_DEBUG_KMS("v_overscan: max %d, " | ||
2562 | "default %d, current %d\n", | ||
2563 | data_value[0], data_value[1], response); | ||
2564 | } | ||
2565 | if (sdvo_data.position_h) { | ||
2566 | intel_sdvo_write_cmd(intel_encoder, | ||
2567 | SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); | ||
2568 | status = intel_sdvo_read_response(intel_encoder, | ||
2569 | &data_value, 4); | ||
2570 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2571 | DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); | ||
2572 | return; | ||
2573 | } | ||
2574 | intel_sdvo_write_cmd(intel_encoder, | ||
2575 | SDVO_CMD_GET_POSITION_H, NULL, 0); | ||
2576 | status = intel_sdvo_read_response(intel_encoder, | ||
2577 | &response, 2); | ||
2578 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2579 | DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); | ||
2580 | return; | ||
2581 | } | ||
2582 | sdvo_priv->max_hpos = data_value[0]; | ||
2583 | sdvo_priv->cur_hpos = response; | ||
2584 | sdvo_priv->hpos_property = | ||
2585 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2586 | "hpos", 2); | ||
2587 | sdvo_priv->hpos_property->values[0] = 0; | ||
2588 | sdvo_priv->hpos_property->values[1] = data_value[0]; | ||
2589 | drm_connector_attach_property(connector, | ||
2590 | sdvo_priv->hpos_property, | ||
2591 | sdvo_priv->cur_hpos); | ||
2592 | DRM_DEBUG_KMS("h_position: max %d, " | ||
2593 | "default %d, current %d\n", | ||
2594 | data_value[0], data_value[1], response); | ||
2595 | } | ||
2596 | if (sdvo_data.position_v) { | ||
2597 | intel_sdvo_write_cmd(intel_encoder, | ||
2598 | SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); | ||
2599 | status = intel_sdvo_read_response(intel_encoder, | ||
2600 | &data_value, 4); | ||
2601 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2602 | DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); | ||
2603 | return; | ||
2604 | } | ||
2605 | intel_sdvo_write_cmd(intel_encoder, | ||
2606 | SDVO_CMD_GET_POSITION_V, NULL, 0); | ||
2607 | status = intel_sdvo_read_response(intel_encoder, | ||
2608 | &response, 2); | ||
2609 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2610 | DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); | ||
2611 | return; | ||
2612 | } | ||
2613 | sdvo_priv->max_vpos = data_value[0]; | ||
2614 | sdvo_priv->cur_vpos = response; | ||
2615 | sdvo_priv->vpos_property = | ||
2616 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2617 | "vpos", 2); | ||
2618 | sdvo_priv->vpos_property->values[0] = 0; | ||
2619 | sdvo_priv->vpos_property->values[1] = data_value[0]; | ||
2620 | drm_connector_attach_property(connector, | ||
2621 | sdvo_priv->vpos_property, | ||
2622 | sdvo_priv->cur_vpos); | ||
2623 | DRM_DEBUG_KMS("v_position: max %d, " | ||
2624 | "default %d, current %d\n", | ||
2625 | data_value[0], data_value[1], response); | ||
2626 | } | ||
2627 | if (sdvo_data.saturation) { | ||
2628 | intel_sdvo_write_cmd(intel_encoder, | ||
2629 | SDVO_CMD_GET_MAX_SATURATION, NULL, 0); | ||
2630 | status = intel_sdvo_read_response(intel_encoder, | ||
2631 | &data_value, 4); | ||
2632 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2633 | DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); | ||
2634 | return; | ||
2635 | } | ||
2636 | intel_sdvo_write_cmd(intel_encoder, | ||
2637 | SDVO_CMD_GET_SATURATION, NULL, 0); | ||
2638 | status = intel_sdvo_read_response(intel_encoder, | ||
2639 | &response, 2); | ||
2640 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2641 | DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); | ||
2642 | return; | ||
2643 | } | ||
2644 | sdvo_priv->max_saturation = data_value[0]; | ||
2645 | sdvo_priv->cur_saturation = response; | ||
2646 | sdvo_priv->saturation_property = | ||
2647 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2648 | "saturation", 2); | ||
2649 | sdvo_priv->saturation_property->values[0] = 0; | ||
2650 | sdvo_priv->saturation_property->values[1] = | ||
2651 | data_value[0]; | ||
2652 | drm_connector_attach_property(connector, | ||
2653 | sdvo_priv->saturation_property, | ||
2654 | sdvo_priv->cur_saturation); | ||
2655 | DRM_DEBUG_KMS("saturation: max %d, " | ||
2656 | "default %d, current %d\n", | ||
2657 | data_value[0], data_value[1], response); | ||
2658 | } | ||
2659 | if (sdvo_data.contrast) { | ||
2660 | intel_sdvo_write_cmd(intel_encoder, | ||
2661 | SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); | ||
2662 | status = intel_sdvo_read_response(intel_encoder, | ||
2663 | &data_value, 4); | ||
2664 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2665 | DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); | ||
2666 | return; | ||
2667 | } | ||
2668 | intel_sdvo_write_cmd(intel_encoder, | ||
2669 | SDVO_CMD_GET_CONTRAST, NULL, 0); | ||
2670 | status = intel_sdvo_read_response(intel_encoder, | ||
2671 | &response, 2); | ||
2672 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2673 | DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); | ||
2674 | return; | ||
2675 | } | ||
2676 | sdvo_priv->max_contrast = data_value[0]; | ||
2677 | sdvo_priv->cur_contrast = response; | ||
2678 | sdvo_priv->contrast_property = | ||
2679 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2680 | "contrast", 2); | ||
2681 | sdvo_priv->contrast_property->values[0] = 0; | ||
2682 | sdvo_priv->contrast_property->values[1] = data_value[0]; | ||
2683 | drm_connector_attach_property(connector, | ||
2684 | sdvo_priv->contrast_property, | ||
2685 | sdvo_priv->cur_contrast); | ||
2686 | DRM_DEBUG_KMS("contrast: max %d, " | ||
2687 | "default %d, current %d\n", | ||
2688 | data_value[0], data_value[1], response); | ||
2689 | } | ||
2690 | if (sdvo_data.hue) { | ||
2691 | intel_sdvo_write_cmd(intel_encoder, | ||
2692 | SDVO_CMD_GET_MAX_HUE, NULL, 0); | ||
2693 | status = intel_sdvo_read_response(intel_encoder, | ||
2694 | &data_value, 4); | ||
2695 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2696 | DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); | ||
2697 | return; | ||
2698 | } | ||
2699 | intel_sdvo_write_cmd(intel_encoder, | ||
2700 | SDVO_CMD_GET_HUE, NULL, 0); | ||
2701 | status = intel_sdvo_read_response(intel_encoder, | ||
2702 | &response, 2); | ||
2703 | if (status != SDVO_CMD_STATUS_SUCCESS) { | ||
2704 | DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); | ||
2705 | return; | ||
2706 | } | ||
2707 | sdvo_priv->max_hue = data_value[0]; | ||
2708 | sdvo_priv->cur_hue = response; | ||
2709 | sdvo_priv->hue_property = | ||
2710 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | ||
2711 | "hue", 2); | ||
2712 | sdvo_priv->hue_property->values[0] = 0; | ||
2713 | sdvo_priv->hue_property->values[1] = | ||
2714 | data_value[0]; | ||
2715 | drm_connector_attach_property(connector, | ||
2716 | sdvo_priv->hue_property, | ||
2717 | sdvo_priv->cur_hue); | ||
2718 | DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n", | ||
2719 | data_value[0], data_value[1], response); | ||
2720 | } | ||
2721 | } | 2461 | } |
2722 | if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { | 2462 | |
2723 | if (sdvo_data.brightness) { | 2463 | return true; |
2724 | intel_sdvo_write_cmd(intel_encoder, | 2464 | } |
2725 | SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); | 2465 | |
2726 | status = intel_sdvo_read_response(intel_encoder, | 2466 | static bool |
2727 | &data_value, 4); | 2467 | intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, |
2728 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2468 | struct intel_sdvo_connector *intel_sdvo_connector, |
2729 | DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); | 2469 | struct intel_sdvo_enhancements_reply enhancements) |
2730 | return; | 2470 | { |
2731 | } | 2471 | struct drm_device *dev = intel_sdvo->base.enc.dev; |
2732 | intel_sdvo_write_cmd(intel_encoder, | 2472 | struct drm_connector *connector = &intel_sdvo_connector->base.base; |
2733 | SDVO_CMD_GET_BRIGHTNESS, NULL, 0); | 2473 | uint16_t response, data_value[2]; |
2734 | status = intel_sdvo_read_response(intel_encoder, | 2474 | |
2735 | &response, 2); | 2475 | ENHANCEMENT(brightness, BRIGHTNESS); |
2736 | if (status != SDVO_CMD_STATUS_SUCCESS) { | 2476 | |
2737 | DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); | 2477 | return true; |
2738 | return; | 2478 | } |
2739 | } | 2479 | #undef ENHANCEMENT |
2740 | sdvo_priv->max_brightness = data_value[0]; | 2480 | |
2741 | sdvo_priv->cur_brightness = response; | 2481 | static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, |
2742 | sdvo_priv->brightness_property = | 2482 | struct intel_sdvo_connector *intel_sdvo_connector) |
2743 | drm_property_create(dev, DRM_MODE_PROP_RANGE, | 2483 | { |
2744 | "brightness", 2); | 2484 | union { |
2745 | sdvo_priv->brightness_property->values[0] = 0; | 2485 | struct intel_sdvo_enhancements_reply reply; |
2746 | sdvo_priv->brightness_property->values[1] = | 2486 | uint16_t response; |
2747 | data_value[0]; | 2487 | } enhancements; |
2748 | drm_connector_attach_property(connector, | 2488 | |
2749 | sdvo_priv->brightness_property, | 2489 | if (!intel_sdvo_get_value(intel_sdvo, |
2750 | sdvo_priv->cur_brightness); | 2490 | SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, |
2751 | DRM_DEBUG_KMS("brightness: max %d, " | 2491 | &enhancements, sizeof(enhancements))) |
2752 | "default %d, current %d\n", | 2492 | return false; |
2753 | data_value[0], data_value[1], response); | 2493 | |
2754 | } | 2494 | if (enhancements.response == 0) { |
2495 | DRM_DEBUG_KMS("No enhancement is supported\n"); | ||
2496 | return true; | ||
2755 | } | 2497 | } |
2756 | return; | 2498 | |
2499 | if (IS_TV(intel_sdvo_connector)) | ||
2500 | return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); | ||
2501 | else if(IS_LVDS(intel_sdvo_connector)) | ||
2502 | return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); | ||
2503 | else | ||
2504 | return true; | ||
2505 | |||
2757 | } | 2506 | } |
2758 | 2507 | ||
2759 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | 2508 | bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) |
2760 | { | 2509 | { |
2761 | struct drm_i915_private *dev_priv = dev->dev_private; | 2510 | struct drm_i915_private *dev_priv = dev->dev_private; |
2762 | struct intel_encoder *intel_encoder; | 2511 | struct intel_encoder *intel_encoder; |
2763 | struct intel_sdvo_priv *sdvo_priv; | 2512 | struct intel_sdvo *intel_sdvo; |
2764 | u8 ch[0x40]; | 2513 | u8 ch[0x40]; |
2765 | int i; | 2514 | int i; |
2766 | u32 i2c_reg, ddc_reg, analog_ddc_reg; | 2515 | u32 i2c_reg, ddc_reg, analog_ddc_reg; |
2767 | 2516 | ||
2768 | intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); | 2517 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); |
2769 | if (!intel_encoder) { | 2518 | if (!intel_sdvo) |
2770 | return false; | 2519 | return false; |
2771 | } | ||
2772 | 2520 | ||
2773 | sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); | 2521 | intel_sdvo->sdvo_reg = sdvo_reg; |
2774 | sdvo_priv->sdvo_reg = sdvo_reg; | ||
2775 | 2522 | ||
2776 | intel_encoder->dev_priv = sdvo_priv; | 2523 | intel_encoder = &intel_sdvo->base; |
2777 | intel_encoder->type = INTEL_OUTPUT_SDVO; | 2524 | intel_encoder->type = INTEL_OUTPUT_SDVO; |
2778 | 2525 | ||
2779 | if (HAS_PCH_SPLIT(dev)) { | 2526 | if (HAS_PCH_SPLIT(dev)) { |
@@ -2795,14 +2542,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2795 | if (!intel_encoder->i2c_bus) | 2542 | if (!intel_encoder->i2c_bus) |
2796 | goto err_inteloutput; | 2543 | goto err_inteloutput; |
2797 | 2544 | ||
2798 | sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); | 2545 | intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); |
2799 | 2546 | ||
2800 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ | 2547 | /* Save the bit-banging i2c functionality for use by the DDC wrapper */ |
2801 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; | 2548 | intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; |
2802 | 2549 | ||
2803 | /* Read the regs to test if we can talk to the device */ | 2550 | /* Read the regs to test if we can talk to the device */ |
2804 | for (i = 0; i < 0x40; i++) { | 2551 | for (i = 0; i < 0x40; i++) { |
2805 | if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { | 2552 | if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { |
2806 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", | 2553 | DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", |
2807 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); | 2554 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2808 | goto err_i2c; | 2555 | goto err_i2c; |
@@ -2812,17 +2559,16 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2812 | /* setup the DDC bus. */ | 2559 | /* setup the DDC bus. */ |
2813 | if (IS_SDVOB(sdvo_reg)) { | 2560 | if (IS_SDVOB(sdvo_reg)) { |
2814 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); | 2561 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); |
2815 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, | 2562 | intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, |
2816 | "SDVOB/VGA DDC BUS"); | 2563 | "SDVOB/VGA DDC BUS"); |
2817 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | 2564 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; |
2818 | } else { | 2565 | } else { |
2819 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); | 2566 | intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); |
2820 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, | 2567 | intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, |
2821 | "SDVOC/VGA DDC BUS"); | 2568 | "SDVOC/VGA DDC BUS"); |
2822 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | 2569 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; |
2823 | } | 2570 | } |
2824 | 2571 | if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) | |
2825 | if (intel_encoder->ddc_bus == NULL) | ||
2826 | goto err_i2c; | 2572 | goto err_i2c; |
2827 | 2573 | ||
2828 | /* Wrap with our custom algo which switches to DDC mode */ | 2574 | /* Wrap with our custom algo which switches to DDC mode */ |
@@ -2833,53 +2579,56 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2833 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); | 2579 | drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); |
2834 | 2580 | ||
2835 | /* In default case sdvo lvds is false */ | 2581 | /* In default case sdvo lvds is false */ |
2836 | intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); | 2582 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2583 | goto err_enc; | ||
2837 | 2584 | ||
2838 | if (intel_sdvo_output_setup(intel_encoder, | 2585 | if (intel_sdvo_output_setup(intel_sdvo, |
2839 | sdvo_priv->caps.output_flags) != true) { | 2586 | intel_sdvo->caps.output_flags) != true) { |
2840 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2587 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
2841 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); | 2588 | IS_SDVOB(sdvo_reg) ? 'B' : 'C'); |
2842 | goto err_i2c; | 2589 | goto err_enc; |
2843 | } | 2590 | } |
2844 | 2591 | ||
2845 | intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); | 2592 | intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); |
2846 | 2593 | ||
2847 | /* Set the input timing to the screen. Assume always input 0. */ | 2594 | /* Set the input timing to the screen. Assume always input 0. */ |
2848 | intel_sdvo_set_target_input(intel_encoder, true, false); | 2595 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
2849 | 2596 | goto err_enc; | |
2850 | intel_sdvo_get_input_pixel_clock_range(intel_encoder, | ||
2851 | &sdvo_priv->pixel_clock_min, | ||
2852 | &sdvo_priv->pixel_clock_max); | ||
2853 | 2597 | ||
2598 | if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, | ||
2599 | &intel_sdvo->pixel_clock_min, | ||
2600 | &intel_sdvo->pixel_clock_max)) | ||
2601 | goto err_enc; | ||
2854 | 2602 | ||
2855 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " | 2603 | DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " |
2856 | "clock range %dMHz - %dMHz, " | 2604 | "clock range %dMHz - %dMHz, " |
2857 | "input 1: %c, input 2: %c, " | 2605 | "input 1: %c, input 2: %c, " |
2858 | "output 1: %c, output 2: %c\n", | 2606 | "output 1: %c, output 2: %c\n", |
2859 | SDVO_NAME(sdvo_priv), | 2607 | SDVO_NAME(intel_sdvo), |
2860 | sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, | 2608 | intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, |
2861 | sdvo_priv->caps.device_rev_id, | 2609 | intel_sdvo->caps.device_rev_id, |
2862 | sdvo_priv->pixel_clock_min / 1000, | 2610 | intel_sdvo->pixel_clock_min / 1000, |
2863 | sdvo_priv->pixel_clock_max / 1000, | 2611 | intel_sdvo->pixel_clock_max / 1000, |
2864 | (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', | 2612 | (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', |
2865 | (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', | 2613 | (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', |
2866 | /* check currently supported outputs */ | 2614 | /* check currently supported outputs */ |
2867 | sdvo_priv->caps.output_flags & | 2615 | intel_sdvo->caps.output_flags & |
2868 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', | 2616 | (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', |
2869 | sdvo_priv->caps.output_flags & | 2617 | intel_sdvo->caps.output_flags & |
2870 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); | 2618 | (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); |
2871 | |||
2872 | return true; | 2619 | return true; |
2873 | 2620 | ||
2621 | err_enc: | ||
2622 | drm_encoder_cleanup(&intel_encoder->enc); | ||
2874 | err_i2c: | 2623 | err_i2c: |
2875 | if (sdvo_priv->analog_ddc_bus != NULL) | 2624 | if (intel_sdvo->analog_ddc_bus != NULL) |
2876 | intel_i2c_destroy(sdvo_priv->analog_ddc_bus); | 2625 | intel_i2c_destroy(intel_sdvo->analog_ddc_bus); |
2877 | if (intel_encoder->ddc_bus != NULL) | 2626 | if (intel_encoder->ddc_bus != NULL) |
2878 | intel_i2c_destroy(intel_encoder->ddc_bus); | 2627 | intel_i2c_destroy(intel_encoder->ddc_bus); |
2879 | if (intel_encoder->i2c_bus != NULL) | 2628 | if (intel_encoder->i2c_bus != NULL) |
2880 | intel_i2c_destroy(intel_encoder->i2c_bus); | 2629 | intel_i2c_destroy(intel_encoder->i2c_bus); |
2881 | err_inteloutput: | 2630 | err_inteloutput: |
2882 | kfree(intel_encoder); | 2631 | kfree(intel_sdvo); |
2883 | 2632 | ||
2884 | return false; | 2633 | return false; |
2885 | } | 2634 | } |
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index ba5cdf8ae40b..a386b022e538 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h | |||
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args { | |||
312 | # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) | 312 | # define SDVO_CLOCK_RATE_MULT_4X (1 << 3) |
313 | 313 | ||
314 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 | 314 | #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 |
315 | /** 5 bytes of bit flags for TV formats shared by all TV format functions */ | 315 | /** 6 bytes of bit flags for TV formats shared by all TV format functions */ |
316 | struct intel_sdvo_tv_format { | 316 | struct intel_sdvo_tv_format { |
317 | unsigned int ntsc_m:1; | 317 | unsigned int ntsc_m:1; |
318 | unsigned int ntsc_j:1; | 318 | unsigned int ntsc_j:1; |
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply { | |||
596 | unsigned int overscan_h:1; | 596 | unsigned int overscan_h:1; |
597 | 597 | ||
598 | unsigned int overscan_v:1; | 598 | unsigned int overscan_v:1; |
599 | unsigned int position_h:1; | 599 | unsigned int hpos:1; |
600 | unsigned int position_v:1; | 600 | unsigned int vpos:1; |
601 | unsigned int sharpness:1; | 601 | unsigned int sharpness:1; |
602 | unsigned int dot_crawl:1; | 602 | unsigned int dot_crawl:1; |
603 | unsigned int dither:1; | 603 | unsigned int dither:1; |
604 | unsigned int max_tv_chroma_filter:1; | 604 | unsigned int tv_chroma_filter:1; |
605 | unsigned int max_tv_luma_filter:1; | 605 | unsigned int tv_luma_filter:1; |
606 | } __attribute__((packed)); | 606 | } __attribute__((packed)); |
607 | 607 | ||
608 | /* Picture enhancement limits below are dependent on the current TV format, | 608 | /* Picture enhancement limits below are dependent on the current TV format, |
609 | * and thus need to be queried and set after it. | 609 | * and thus need to be queried and set after it. |
610 | */ | 610 | */ |
611 | #define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d | 611 | #define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d |
612 | #define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b | 612 | #define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b |
613 | #define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52 | 613 | #define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52 |
614 | #define SDVO_CMD_GET_MAX_SATURATION 0x55 | 614 | #define SDVO_CMD_GET_MAX_SATURATION 0x55 |
615 | #define SDVO_CMD_GET_MAX_HUE 0x58 | 615 | #define SDVO_CMD_GET_MAX_HUE 0x58 |
616 | #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b | 616 | #define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b |
617 | #define SDVO_CMD_GET_MAX_CONTRAST 0x5e | 617 | #define SDVO_CMD_GET_MAX_CONTRAST 0x5e |
618 | #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 | 618 | #define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 |
619 | #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 | 619 | #define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 |
620 | #define SDVO_CMD_GET_MAX_POSITION_H 0x67 | 620 | #define SDVO_CMD_GET_MAX_HPOS 0x67 |
621 | #define SDVO_CMD_GET_MAX_POSITION_V 0x6a | 621 | #define SDVO_CMD_GET_MAX_VPOS 0x6a |
622 | #define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d | 622 | #define SDVO_CMD_GET_MAX_SHARPNESS 0x6d |
623 | #define SDVO_CMD_GET_MAX_TV_CHROMA 0x74 | 623 | #define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74 |
624 | #define SDVO_CMD_GET_MAX_TV_LUMA 0x77 | 624 | #define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77 |
625 | struct intel_sdvo_enhancement_limits_reply { | 625 | struct intel_sdvo_enhancement_limits_reply { |
626 | u16 max_value; | 626 | u16 max_value; |
627 | u16 default_value; | 627 | u16 default_value; |
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply { | |||
638 | 638 | ||
639 | #define SDVO_CMD_GET_FLICKER_FILTER 0x4e | 639 | #define SDVO_CMD_GET_FLICKER_FILTER 0x4e |
640 | #define SDVO_CMD_SET_FLICKER_FILTER 0x4f | 640 | #define SDVO_CMD_SET_FLICKER_FILTER 0x4f |
641 | #define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50 | 641 | #define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50 |
642 | #define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51 | 642 | #define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51 |
643 | #define SDVO_CMD_GET_2D_FLICKER_FITER 0x53 | 643 | #define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53 |
644 | #define SDVO_CMD_SET_2D_FLICKER_FITER 0x54 | 644 | #define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54 |
645 | #define SDVO_CMD_GET_SATURATION 0x56 | 645 | #define SDVO_CMD_GET_SATURATION 0x56 |
646 | #define SDVO_CMD_SET_SATURATION 0x57 | 646 | #define SDVO_CMD_SET_SATURATION 0x57 |
647 | #define SDVO_CMD_GET_HUE 0x59 | 647 | #define SDVO_CMD_GET_HUE 0x59 |
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply { | |||
654 | #define SDVO_CMD_SET_OVERSCAN_H 0x63 | 654 | #define SDVO_CMD_SET_OVERSCAN_H 0x63 |
655 | #define SDVO_CMD_GET_OVERSCAN_V 0x65 | 655 | #define SDVO_CMD_GET_OVERSCAN_V 0x65 |
656 | #define SDVO_CMD_SET_OVERSCAN_V 0x66 | 656 | #define SDVO_CMD_SET_OVERSCAN_V 0x66 |
657 | #define SDVO_CMD_GET_POSITION_H 0x68 | 657 | #define SDVO_CMD_GET_HPOS 0x68 |
658 | #define SDVO_CMD_SET_POSITION_H 0x69 | 658 | #define SDVO_CMD_SET_HPOS 0x69 |
659 | #define SDVO_CMD_GET_POSITION_V 0x6b | 659 | #define SDVO_CMD_GET_VPOS 0x6b |
660 | #define SDVO_CMD_SET_POSITION_V 0x6c | 660 | #define SDVO_CMD_SET_VPOS 0x6c |
661 | #define SDVO_CMD_GET_SHARPNESS 0x6e | 661 | #define SDVO_CMD_GET_SHARPNESS 0x6e |
662 | #define SDVO_CMD_SET_SHARPNESS 0x6f | 662 | #define SDVO_CMD_SET_SHARPNESS 0x6f |
663 | #define SDVO_CMD_GET_TV_CHROMA 0x75 | 663 | #define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75 |
664 | #define SDVO_CMD_SET_TV_CHROMA 0x76 | 664 | #define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76 |
665 | #define SDVO_CMD_GET_TV_LUMA 0x78 | 665 | #define SDVO_CMD_GET_TV_LUMA_FILTER 0x78 |
666 | #define SDVO_CMD_SET_TV_LUMA 0x79 | 666 | #define SDVO_CMD_SET_TV_LUMA_FILTER 0x79 |
667 | struct intel_sdvo_enhancements_arg { | 667 | struct intel_sdvo_enhancements_arg { |
668 | u16 value; | 668 | u16 value; |
669 | }__attribute__((packed)); | 669 | }__attribute__((packed)); |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index cc3726a4a1cb..d2029efee982 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -44,7 +44,9 @@ enum tv_margin { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | /** Private structure for the integrated TV support */ | 46 | /** Private structure for the integrated TV support */ |
47 | struct intel_tv_priv { | 47 | struct intel_tv { |
48 | struct intel_encoder base; | ||
49 | |||
48 | int type; | 50 | int type; |
49 | char *tv_format; | 51 | char *tv_format; |
50 | int margin[4]; | 52 | int margin[4]; |
@@ -896,6 +898,11 @@ static const struct tv_mode tv_modes[] = { | |||
896 | }, | 898 | }, |
897 | }; | 899 | }; |
898 | 900 | ||
901 | static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) | ||
902 | { | ||
903 | return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); | ||
904 | } | ||
905 | |||
899 | static void | 906 | static void |
900 | intel_tv_dpms(struct drm_encoder *encoder, int mode) | 907 | intel_tv_dpms(struct drm_encoder *encoder, int mode) |
901 | { | 908 | { |
@@ -929,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format) | |||
929 | } | 936 | } |
930 | 937 | ||
931 | static const struct tv_mode * | 938 | static const struct tv_mode * |
932 | intel_tv_mode_find (struct intel_encoder *intel_encoder) | 939 | intel_tv_mode_find (struct intel_tv *intel_tv) |
933 | { | 940 | { |
934 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 941 | return intel_tv_mode_lookup(intel_tv->tv_format); |
935 | |||
936 | return intel_tv_mode_lookup(tv_priv->tv_format); | ||
937 | } | 942 | } |
938 | 943 | ||
939 | static enum drm_mode_status | 944 | static enum drm_mode_status |
940 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) | 945 | intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) |
941 | { | 946 | { |
942 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 947 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
943 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 948 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
944 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 949 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
945 | 950 | ||
946 | /* Ensure TV refresh is close to desired refresh */ | 951 | /* Ensure TV refresh is close to desired refresh */ |
947 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) | 952 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) |
@@ -957,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
957 | { | 962 | { |
958 | struct drm_device *dev = encoder->dev; | 963 | struct drm_device *dev = encoder->dev; |
959 | struct drm_mode_config *drm_config = &dev->mode_config; | 964 | struct drm_mode_config *drm_config = &dev->mode_config; |
960 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 965 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
961 | const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); | 966 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
962 | struct drm_encoder *other_encoder; | 967 | struct drm_encoder *other_encoder; |
963 | 968 | ||
964 | if (!tv_mode) | 969 | if (!tv_mode) |
@@ -983,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
983 | struct drm_i915_private *dev_priv = dev->dev_private; | 988 | struct drm_i915_private *dev_priv = dev->dev_private; |
984 | struct drm_crtc *crtc = encoder->crtc; | 989 | struct drm_crtc *crtc = encoder->crtc; |
985 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 990 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
986 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 991 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
987 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 992 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
988 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | ||
989 | u32 tv_ctl; | 993 | u32 tv_ctl; |
990 | u32 hctl1, hctl2, hctl3; | 994 | u32 hctl1, hctl2, hctl3; |
991 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; | 995 | u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; |
@@ -1001,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1001 | tv_ctl = I915_READ(TV_CTL); | 1005 | tv_ctl = I915_READ(TV_CTL); |
1002 | tv_ctl &= TV_CTL_SAVE; | 1006 | tv_ctl &= TV_CTL_SAVE; |
1003 | 1007 | ||
1004 | switch (tv_priv->type) { | 1008 | switch (intel_tv->type) { |
1005 | default: | 1009 | default: |
1006 | case DRM_MODE_CONNECTOR_Unknown: | 1010 | case DRM_MODE_CONNECTOR_Unknown: |
1007 | case DRM_MODE_CONNECTOR_Composite: | 1011 | case DRM_MODE_CONNECTOR_Composite: |
@@ -1154,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1154 | 1158 | ||
1155 | /* Wait for vblank for the disable to take effect */ | 1159 | /* Wait for vblank for the disable to take effect */ |
1156 | if (!IS_I9XX(dev)) | 1160 | if (!IS_I9XX(dev)) |
1157 | intel_wait_for_vblank(dev); | 1161 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1158 | 1162 | ||
1159 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); | 1163 | I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); |
1160 | /* Wait for vblank for the disable to take effect. */ | 1164 | /* Wait for vblank for the disable to take effect. */ |
1161 | intel_wait_for_vblank(dev); | 1165 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1162 | 1166 | ||
1163 | /* Filter ctl must be set before TV_WIN_SIZE */ | 1167 | /* Filter ctl must be set before TV_WIN_SIZE */ |
1164 | I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); | 1168 | I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); |
@@ -1168,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1168 | else | 1172 | else |
1169 | ysize = 2*tv_mode->nbr_end + 1; | 1173 | ysize = 2*tv_mode->nbr_end + 1; |
1170 | 1174 | ||
1171 | xpos += tv_priv->margin[TV_MARGIN_LEFT]; | 1175 | xpos += intel_tv->margin[TV_MARGIN_LEFT]; |
1172 | ypos += tv_priv->margin[TV_MARGIN_TOP]; | 1176 | ypos += intel_tv->margin[TV_MARGIN_TOP]; |
1173 | xsize -= (tv_priv->margin[TV_MARGIN_LEFT] + | 1177 | xsize -= (intel_tv->margin[TV_MARGIN_LEFT] + |
1174 | tv_priv->margin[TV_MARGIN_RIGHT]); | 1178 | intel_tv->margin[TV_MARGIN_RIGHT]); |
1175 | ysize -= (tv_priv->margin[TV_MARGIN_TOP] + | 1179 | ysize -= (intel_tv->margin[TV_MARGIN_TOP] + |
1176 | tv_priv->margin[TV_MARGIN_BOTTOM]); | 1180 | intel_tv->margin[TV_MARGIN_BOTTOM]); |
1177 | I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); | 1181 | I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); |
1178 | I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); | 1182 | I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); |
1179 | 1183 | ||
@@ -1222,11 +1226,12 @@ static const struct drm_display_mode reported_modes[] = { | |||
1222 | * \return false if TV is disconnected. | 1226 | * \return false if TV is disconnected. |
1223 | */ | 1227 | */ |
1224 | static int | 1228 | static int |
1225 | intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) | 1229 | intel_tv_detect_type (struct intel_tv *intel_tv) |
1226 | { | 1230 | { |
1227 | struct drm_encoder *encoder = &intel_encoder->enc; | 1231 | struct drm_encoder *encoder = &intel_tv->base.enc; |
1228 | struct drm_device *dev = encoder->dev; | 1232 | struct drm_device *dev = encoder->dev; |
1229 | struct drm_i915_private *dev_priv = dev->dev_private; | 1233 | struct drm_i915_private *dev_priv = dev->dev_private; |
1234 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
1230 | unsigned long irqflags; | 1235 | unsigned long irqflags; |
1231 | u32 tv_ctl, save_tv_ctl; | 1236 | u32 tv_ctl, save_tv_ctl; |
1232 | u32 tv_dac, save_tv_dac; | 1237 | u32 tv_dac, save_tv_dac; |
@@ -1263,11 +1268,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
1263 | DAC_C_0_7_V); | 1268 | DAC_C_0_7_V); |
1264 | I915_WRITE(TV_CTL, tv_ctl); | 1269 | I915_WRITE(TV_CTL, tv_ctl); |
1265 | I915_WRITE(TV_DAC, tv_dac); | 1270 | I915_WRITE(TV_DAC, tv_dac); |
1266 | intel_wait_for_vblank(dev); | 1271 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1267 | tv_dac = I915_READ(TV_DAC); | 1272 | tv_dac = I915_READ(TV_DAC); |
1268 | I915_WRITE(TV_DAC, save_tv_dac); | 1273 | I915_WRITE(TV_DAC, save_tv_dac); |
1269 | I915_WRITE(TV_CTL, save_tv_ctl); | 1274 | I915_WRITE(TV_CTL, save_tv_ctl); |
1270 | intel_wait_for_vblank(dev); | 1275 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1271 | /* | 1276 | /* |
1272 | * A B C | 1277 | * A B C |
1273 | * 0 1 1 Composite | 1278 | * 0 1 1 Composite |
@@ -1304,12 +1309,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder | |||
1304 | static void intel_tv_find_better_format(struct drm_connector *connector) | 1309 | static void intel_tv_find_better_format(struct drm_connector *connector) |
1305 | { | 1310 | { |
1306 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1311 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1307 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1312 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
1308 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 1313 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1309 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | ||
1310 | int i; | 1314 | int i; |
1311 | 1315 | ||
1312 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == | 1316 | if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == |
1313 | tv_mode->component_only) | 1317 | tv_mode->component_only) |
1314 | return; | 1318 | return; |
1315 | 1319 | ||
@@ -1317,12 +1321,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector) | |||
1317 | for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { | 1321 | for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { |
1318 | tv_mode = tv_modes + i; | 1322 | tv_mode = tv_modes + i; |
1319 | 1323 | ||
1320 | if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == | 1324 | if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) == |
1321 | tv_mode->component_only) | 1325 | tv_mode->component_only) |
1322 | break; | 1326 | break; |
1323 | } | 1327 | } |
1324 | 1328 | ||
1325 | tv_priv->tv_format = tv_mode->name; | 1329 | intel_tv->tv_format = tv_mode->name; |
1326 | drm_connector_property_set_value(connector, | 1330 | drm_connector_property_set_value(connector, |
1327 | connector->dev->mode_config.tv_mode_property, i); | 1331 | connector->dev->mode_config.tv_mode_property, i); |
1328 | } | 1332 | } |
@@ -1336,31 +1340,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector) | |||
1336 | static enum drm_connector_status | 1340 | static enum drm_connector_status |
1337 | intel_tv_detect(struct drm_connector *connector) | 1341 | intel_tv_detect(struct drm_connector *connector) |
1338 | { | 1342 | { |
1339 | struct drm_crtc *crtc; | ||
1340 | struct drm_display_mode mode; | 1343 | struct drm_display_mode mode; |
1341 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1344 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1342 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1345 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
1343 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | 1346 | int type; |
1344 | int dpms_mode; | ||
1345 | int type = tv_priv->type; | ||
1346 | 1347 | ||
1347 | mode = reported_modes[0]; | 1348 | mode = reported_modes[0]; |
1348 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1349 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1349 | 1350 | ||
1350 | if (encoder->crtc && encoder->crtc->enabled) { | 1351 | if (encoder->crtc && encoder->crtc->enabled) { |
1351 | type = intel_tv_detect_type(encoder->crtc, intel_encoder); | 1352 | type = intel_tv_detect_type(intel_tv); |
1352 | } else { | 1353 | } else { |
1353 | crtc = intel_get_load_detect_pipe(intel_encoder, connector, | 1354 | struct drm_crtc *crtc; |
1355 | int dpms_mode; | ||
1356 | |||
1357 | crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, | ||
1354 | &mode, &dpms_mode); | 1358 | &mode, &dpms_mode); |
1355 | if (crtc) { | 1359 | if (crtc) { |
1356 | type = intel_tv_detect_type(crtc, intel_encoder); | 1360 | type = intel_tv_detect_type(intel_tv); |
1357 | intel_release_load_detect_pipe(intel_encoder, connector, | 1361 | intel_release_load_detect_pipe(&intel_tv->base, connector, |
1358 | dpms_mode); | 1362 | dpms_mode); |
1359 | } else | 1363 | } else |
1360 | type = -1; | 1364 | type = -1; |
1361 | } | 1365 | } |
1362 | 1366 | ||
1363 | tv_priv->type = type; | 1367 | intel_tv->type = type; |
1364 | 1368 | ||
1365 | if (type < 0) | 1369 | if (type < 0) |
1366 | return connector_status_disconnected; | 1370 | return connector_status_disconnected; |
@@ -1391,8 +1395,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector, | |||
1391 | struct drm_display_mode *mode_ptr) | 1395 | struct drm_display_mode *mode_ptr) |
1392 | { | 1396 | { |
1393 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1397 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1394 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1398 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
1395 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 1399 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1396 | 1400 | ||
1397 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) | 1401 | if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) |
1398 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; | 1402 | mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; |
@@ -1417,8 +1421,8 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1417 | { | 1421 | { |
1418 | struct drm_display_mode *mode_ptr; | 1422 | struct drm_display_mode *mode_ptr; |
1419 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1423 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1420 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1424 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
1421 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); | 1425 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); |
1422 | int j, count = 0; | 1426 | int j, count = 0; |
1423 | u64 tmp; | 1427 | u64 tmp; |
1424 | 1428 | ||
@@ -1483,8 +1487,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1483 | { | 1487 | { |
1484 | struct drm_device *dev = connector->dev; | 1488 | struct drm_device *dev = connector->dev; |
1485 | struct drm_encoder *encoder = intel_attached_encoder(connector); | 1489 | struct drm_encoder *encoder = intel_attached_encoder(connector); |
1486 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | 1490 | struct intel_tv *intel_tv = enc_to_intel_tv(encoder); |
1487 | struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; | ||
1488 | struct drm_crtc *crtc = encoder->crtc; | 1491 | struct drm_crtc *crtc = encoder->crtc; |
1489 | int ret = 0; | 1492 | int ret = 0; |
1490 | bool changed = false; | 1493 | bool changed = false; |
@@ -1494,30 +1497,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop | |||
1494 | goto out; | 1497 | goto out; |
1495 | 1498 | ||
1496 | if (property == dev->mode_config.tv_left_margin_property && | 1499 | if (property == dev->mode_config.tv_left_margin_property && |
1497 | tv_priv->margin[TV_MARGIN_LEFT] != val) { | 1500 | intel_tv->margin[TV_MARGIN_LEFT] != val) { |
1498 | tv_priv->margin[TV_MARGIN_LEFT] = val; | 1501 | intel_tv->margin[TV_MARGIN_LEFT] = val; |
1499 | changed = true; | 1502 | changed = true; |
1500 | } else if (property == dev->mode_config.tv_right_margin_property && | 1503 | } else if (property == dev->mode_config.tv_right_margin_property && |
1501 | tv_priv->margin[TV_MARGIN_RIGHT] != val) { | 1504 | intel_tv->margin[TV_MARGIN_RIGHT] != val) { |
1502 | tv_priv->margin[TV_MARGIN_RIGHT] = val; | 1505 | intel_tv->margin[TV_MARGIN_RIGHT] = val; |
1503 | changed = true; | 1506 | changed = true; |
1504 | } else if (property == dev->mode_config.tv_top_margin_property && | 1507 | } else if (property == dev->mode_config.tv_top_margin_property && |
1505 | tv_priv->margin[TV_MARGIN_TOP] != val) { | 1508 | intel_tv->margin[TV_MARGIN_TOP] != val) { |
1506 | tv_priv->margin[TV_MARGIN_TOP] = val; | 1509 | intel_tv->margin[TV_MARGIN_TOP] = val; |
1507 | changed = true; | 1510 | changed = true; |
1508 | } else if (property == dev->mode_config.tv_bottom_margin_property && | 1511 | } else if (property == dev->mode_config.tv_bottom_margin_property && |
1509 | tv_priv->margin[TV_MARGIN_BOTTOM] != val) { | 1512 | intel_tv->margin[TV_MARGIN_BOTTOM] != val) { |
1510 | tv_priv->margin[TV_MARGIN_BOTTOM] = val; | 1513 | intel_tv->margin[TV_MARGIN_BOTTOM] = val; |
1511 | changed = true; | 1514 | changed = true; |
1512 | } else if (property == dev->mode_config.tv_mode_property) { | 1515 | } else if (property == dev->mode_config.tv_mode_property) { |
1513 | if (val >= ARRAY_SIZE(tv_modes)) { | 1516 | if (val >= ARRAY_SIZE(tv_modes)) { |
1514 | ret = -EINVAL; | 1517 | ret = -EINVAL; |
1515 | goto out; | 1518 | goto out; |
1516 | } | 1519 | } |
1517 | if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) | 1520 | if (!strcmp(intel_tv->tv_format, tv_modes[val].name)) |
1518 | goto out; | 1521 | goto out; |
1519 | 1522 | ||
1520 | tv_priv->tv_format = tv_modes[val].name; | 1523 | intel_tv->tv_format = tv_modes[val].name; |
1521 | changed = true; | 1524 | changed = true; |
1522 | } else { | 1525 | } else { |
1523 | ret = -EINVAL; | 1526 | ret = -EINVAL; |
@@ -1553,16 +1556,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = | |||
1553 | .best_encoder = intel_attached_encoder, | 1556 | .best_encoder = intel_attached_encoder, |
1554 | }; | 1557 | }; |
1555 | 1558 | ||
1556 | static void intel_tv_enc_destroy(struct drm_encoder *encoder) | ||
1557 | { | ||
1558 | struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); | ||
1559 | |||
1560 | drm_encoder_cleanup(encoder); | ||
1561 | kfree(intel_encoder); | ||
1562 | } | ||
1563 | |||
1564 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { | 1559 | static const struct drm_encoder_funcs intel_tv_enc_funcs = { |
1565 | .destroy = intel_tv_enc_destroy, | 1560 | .destroy = intel_encoder_destroy, |
1566 | }; | 1561 | }; |
1567 | 1562 | ||
1568 | /* | 1563 | /* |
@@ -1606,9 +1601,9 @@ intel_tv_init(struct drm_device *dev) | |||
1606 | { | 1601 | { |
1607 | struct drm_i915_private *dev_priv = dev->dev_private; | 1602 | struct drm_i915_private *dev_priv = dev->dev_private; |
1608 | struct drm_connector *connector; | 1603 | struct drm_connector *connector; |
1604 | struct intel_tv *intel_tv; | ||
1609 | struct intel_encoder *intel_encoder; | 1605 | struct intel_encoder *intel_encoder; |
1610 | struct intel_connector *intel_connector; | 1606 | struct intel_connector *intel_connector; |
1611 | struct intel_tv_priv *tv_priv; | ||
1612 | u32 tv_dac_on, tv_dac_off, save_tv_dac; | 1607 | u32 tv_dac_on, tv_dac_off, save_tv_dac; |
1613 | char **tv_format_names; | 1608 | char **tv_format_names; |
1614 | int i, initial_mode = 0; | 1609 | int i, initial_mode = 0; |
@@ -1647,18 +1642,18 @@ intel_tv_init(struct drm_device *dev) | |||
1647 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) | 1642 | (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) |
1648 | return; | 1643 | return; |
1649 | 1644 | ||
1650 | intel_encoder = kzalloc(sizeof(struct intel_encoder) + | 1645 | intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); |
1651 | sizeof(struct intel_tv_priv), GFP_KERNEL); | 1646 | if (!intel_tv) { |
1652 | if (!intel_encoder) { | ||
1653 | return; | 1647 | return; |
1654 | } | 1648 | } |
1655 | 1649 | ||
1656 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1650 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1657 | if (!intel_connector) { | 1651 | if (!intel_connector) { |
1658 | kfree(intel_encoder); | 1652 | kfree(intel_tv); |
1659 | return; | 1653 | return; |
1660 | } | 1654 | } |
1661 | 1655 | ||
1656 | intel_encoder = &intel_tv->base; | ||
1662 | connector = &intel_connector->base; | 1657 | connector = &intel_connector->base; |
1663 | 1658 | ||
1664 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1659 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
@@ -1668,22 +1663,20 @@ intel_tv_init(struct drm_device *dev) | |||
1668 | DRM_MODE_ENCODER_TVDAC); | 1663 | DRM_MODE_ENCODER_TVDAC); |
1669 | 1664 | ||
1670 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); | 1665 | drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); |
1671 | tv_priv = (struct intel_tv_priv *)(intel_encoder + 1); | ||
1672 | intel_encoder->type = INTEL_OUTPUT_TVOUT; | 1666 | intel_encoder->type = INTEL_OUTPUT_TVOUT; |
1673 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1667 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
1674 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); | 1668 | intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); |
1675 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); | 1669 | intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); |
1676 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); | 1670 | intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); |
1677 | intel_encoder->dev_priv = tv_priv; | 1671 | intel_tv->type = DRM_MODE_CONNECTOR_Unknown; |
1678 | tv_priv->type = DRM_MODE_CONNECTOR_Unknown; | ||
1679 | 1672 | ||
1680 | /* BIOS margin values */ | 1673 | /* BIOS margin values */ |
1681 | tv_priv->margin[TV_MARGIN_LEFT] = 54; | 1674 | intel_tv->margin[TV_MARGIN_LEFT] = 54; |
1682 | tv_priv->margin[TV_MARGIN_TOP] = 36; | 1675 | intel_tv->margin[TV_MARGIN_TOP] = 36; |
1683 | tv_priv->margin[TV_MARGIN_RIGHT] = 46; | 1676 | intel_tv->margin[TV_MARGIN_RIGHT] = 46; |
1684 | tv_priv->margin[TV_MARGIN_BOTTOM] = 37; | 1677 | intel_tv->margin[TV_MARGIN_BOTTOM] = 37; |
1685 | 1678 | ||
1686 | tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); | 1679 | intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); |
1687 | 1680 | ||
1688 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); | 1681 | drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); |
1689 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); | 1682 | drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); |
@@ -1703,16 +1696,16 @@ intel_tv_init(struct drm_device *dev) | |||
1703 | initial_mode); | 1696 | initial_mode); |
1704 | drm_connector_attach_property(connector, | 1697 | drm_connector_attach_property(connector, |
1705 | dev->mode_config.tv_left_margin_property, | 1698 | dev->mode_config.tv_left_margin_property, |
1706 | tv_priv->margin[TV_MARGIN_LEFT]); | 1699 | intel_tv->margin[TV_MARGIN_LEFT]); |
1707 | drm_connector_attach_property(connector, | 1700 | drm_connector_attach_property(connector, |
1708 | dev->mode_config.tv_top_margin_property, | 1701 | dev->mode_config.tv_top_margin_property, |
1709 | tv_priv->margin[TV_MARGIN_TOP]); | 1702 | intel_tv->margin[TV_MARGIN_TOP]); |
1710 | drm_connector_attach_property(connector, | 1703 | drm_connector_attach_property(connector, |
1711 | dev->mode_config.tv_right_margin_property, | 1704 | dev->mode_config.tv_right_margin_property, |
1712 | tv_priv->margin[TV_MARGIN_RIGHT]); | 1705 | intel_tv->margin[TV_MARGIN_RIGHT]); |
1713 | drm_connector_attach_property(connector, | 1706 | drm_connector_attach_property(connector, |
1714 | dev->mode_config.tv_bottom_margin_property, | 1707 | dev->mode_config.tv_bottom_margin_property, |
1715 | tv_priv->margin[TV_MARGIN_BOTTOM]); | 1708 | intel_tv->margin[TV_MARGIN_BOTTOM]); |
1716 | out: | 1709 | out: |
1717 | drm_sysfs_connector_add(connector); | 1710 | drm_sysfs_connector_add(connector); |
1718 | } | 1711 | } |
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index fff82045c427..9ce2827f8c00 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c | |||
@@ -1085,19 +1085,19 @@ file_priv) | |||
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | struct drm_ioctl_desc mga_ioctls[] = { | 1087 | struct drm_ioctl_desc mga_ioctls[] = { |
1088 | DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1088 | DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1089 | DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), | 1089 | DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), |
1090 | DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), | 1090 | DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), |
1091 | DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), | 1091 | DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH), |
1092 | DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), | 1092 | DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH), |
1093 | DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), | 1093 | DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH), |
1094 | DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), | 1094 | DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH), |
1095 | DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), | 1095 | DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH), |
1096 | DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), | 1096 | DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH), |
1097 | DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), | 1097 | DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH), |
1098 | DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), | 1098 | DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH), |
1099 | DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), | 1099 | DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), |
1100 | DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1100 | DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1101 | }; | 1101 | }; |
1102 | 1102 | ||
1103 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); | 1103 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 0b69a9628c95..e4f33a4edea1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb, | |||
2166 | uint32_t val = 0; | 2166 | uint32_t val = 0; |
2167 | 2167 | ||
2168 | if (off < pci_resource_len(dev->pdev, 1)) { | 2168 | if (off < pci_resource_len(dev->pdev, 1)) { |
2169 | uint32_t __iomem *p = | 2169 | uint8_t __iomem *p = |
2170 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); | 2170 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); |
2171 | 2171 | ||
2172 | val = ioread32(p + (off & ~PAGE_MASK)); | 2172 | val = ioread32(p + (off & ~PAGE_MASK)); |
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb, | |||
2182 | uint32_t off, uint32_t val) | 2182 | uint32_t off, uint32_t val) |
2183 | { | 2183 | { |
2184 | if (off < pci_resource_len(dev->pdev, 1)) { | 2184 | if (off < pci_resource_len(dev->pdev, 1)) { |
2185 | uint32_t __iomem *p = | 2185 | uint8_t __iomem *p = |
2186 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); | 2186 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); |
2187 | 2187 | ||
2188 | iowrite32(val, p + (off & ~PAGE_MASK)); | 2188 | iowrite32(val, p + (off & ~PAGE_MASK)); |
@@ -4587,7 +4587,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4587 | return 1; | 4587 | return 1; |
4588 | } | 4588 | } |
4589 | 4589 | ||
4590 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); | 4590 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); |
4591 | nouveau_bios_run_init_table(dev, script, dcbent); | 4591 | nouveau_bios_run_init_table(dev, script, dcbent); |
4592 | } else | 4592 | } else |
4593 | if (pxclk == -1) { | 4593 | if (pxclk == -1) { |
@@ -4597,7 +4597,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4597 | return 1; | 4597 | return 1; |
4598 | } | 4598 | } |
4599 | 4599 | ||
4600 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); | 4600 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); |
4601 | nouveau_bios_run_init_table(dev, script, dcbent); | 4601 | nouveau_bios_run_init_table(dev, script, dcbent); |
4602 | } else | 4602 | } else |
4603 | if (pxclk == -2) { | 4603 | if (pxclk == -2) { |
@@ -4610,7 +4610,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4610 | return 1; | 4610 | return 1; |
4611 | } | 4611 | } |
4612 | 4612 | ||
4613 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); | 4613 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); |
4614 | nouveau_bios_run_init_table(dev, script, dcbent); | 4614 | nouveau_bios_run_init_table(dev, script, dcbent); |
4615 | } else | 4615 | } else |
4616 | if (pxclk > 0) { | 4616 | if (pxclk > 0) { |
@@ -4622,7 +4622,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4622 | return 1; | 4622 | return 1; |
4623 | } | 4623 | } |
4624 | 4624 | ||
4625 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); | 4625 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); |
4626 | nouveau_bios_run_init_table(dev, script, dcbent); | 4626 | nouveau_bios_run_init_table(dev, script, dcbent); |
4627 | } else | 4627 | } else |
4628 | if (pxclk < 0) { | 4628 | if (pxclk < 0) { |
@@ -4634,7 +4634,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
4634 | return 1; | 4634 | return 1; |
4635 | } | 4635 | } |
4636 | 4636 | ||
4637 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); | 4637 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); |
4638 | nouveau_bios_run_init_table(dev, script, dcbent); | 4638 | nouveau_bios_run_init_table(dev, script, dcbent); |
4639 | } | 4639 | } |
4640 | 4640 | ||
@@ -5357,19 +5357,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, | |||
5357 | } | 5357 | } |
5358 | 5358 | ||
5359 | tmdstableptr = ROM16(bios->data[bitentry->offset]); | 5359 | tmdstableptr = ROM16(bios->data[bitentry->offset]); |
5360 | 5360 | if (!tmdstableptr) { | |
5361 | if (tmdstableptr == 0x0) { | ||
5362 | NV_ERROR(dev, "Pointer to TMDS table invalid\n"); | 5361 | NV_ERROR(dev, "Pointer to TMDS table invalid\n"); |
5363 | return -EINVAL; | 5362 | return -EINVAL; |
5364 | } | 5363 | } |
5365 | 5364 | ||
5365 | NV_INFO(dev, "TMDS table version %d.%d\n", | ||
5366 | bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); | ||
5367 | |||
5366 | /* nv50+ has v2.0, but we don't parse it atm */ | 5368 | /* nv50+ has v2.0, but we don't parse it atm */ |
5367 | if (bios->data[tmdstableptr] != 0x11) { | 5369 | if (bios->data[tmdstableptr] != 0x11) |
5368 | NV_WARN(dev, | ||
5369 | "TMDS table revision %d.%d not currently supported\n", | ||
5370 | bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); | ||
5371 | return -ENOSYS; | 5370 | return -ENOSYS; |
5372 | } | ||
5373 | 5371 | ||
5374 | /* | 5372 | /* |
5375 | * These two scripts are odd: they don't seem to get run even when | 5373 | * These two scripts are odd: they don't seem to get run even when |
@@ -5809,6 +5807,22 @@ parse_dcb_gpio_table(struct nvbios *bios) | |||
5809 | gpio->line = tvdac_gpio[1] >> 4; | 5807 | gpio->line = tvdac_gpio[1] >> 4; |
5810 | gpio->invert = tvdac_gpio[0] & 2; | 5808 | gpio->invert = tvdac_gpio[0] & 2; |
5811 | } | 5809 | } |
5810 | } else { | ||
5811 | /* | ||
5812 | * No systematic way to store GPIO info on pre-v2.2 | ||
5813 | * DCBs, try to match the PCI device IDs. | ||
5814 | */ | ||
5815 | |||
5816 | /* Apple iMac G4 NV18 */ | ||
5817 | if (dev->pdev->device == 0x0189 && | ||
5818 | dev->pdev->subsystem_vendor == 0x10de && | ||
5819 | dev->pdev->subsystem_device == 0x0010) { | ||
5820 | struct dcb_gpio_entry *gpio = new_gpio_entry(bios); | ||
5821 | |||
5822 | gpio->tag = DCB_GPIO_TVDAC0; | ||
5823 | gpio->line = 4; | ||
5824 | } | ||
5825 | |||
5812 | } | 5826 | } |
5813 | 5827 | ||
5814 | if (!gpio_table_ptr) | 5828 | if (!gpio_table_ptr) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 84f85183d041..f6f44779d82f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -36,6 +36,21 @@ | |||
36 | #include <linux/log2.h> | 36 | #include <linux/log2.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | 38 | ||
39 | int | ||
40 | nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) | ||
41 | { | ||
42 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | ||
43 | int ret; | ||
44 | |||
45 | if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) | ||
46 | return 0; | ||
47 | |||
48 | spin_lock(&nvbo->bo.lock); | ||
49 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | ||
50 | spin_unlock(&nvbo->bo.lock); | ||
51 | return ret; | ||
52 | } | ||
53 | |||
39 | static void | 54 | static void |
40 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | 55 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
41 | { | 56 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 90fdcda332be..0480f064f2c1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |||
426 | ***********************************/ | 426 | ***********************************/ |
427 | 427 | ||
428 | struct drm_ioctl_desc nouveau_ioctls[] = { | 428 | struct drm_ioctl_desc nouveau_ioctls[] = { |
429 | DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), | 429 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), |
430 | DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 430 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
431 | DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), | 431 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), |
432 | DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), | 432 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), |
433 | DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), | 433 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), |
434 | DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), | 434 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), |
435 | DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), | 435 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), |
436 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), | 436 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), |
437 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), | 437 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), |
438 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), | 438 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), |
439 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), | 439 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), |
440 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), | 440 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), |
441 | }; | 441 | }; |
442 | 442 | ||
443 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | 443 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b1b22baf1428..a1473fff06ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, | |||
104 | int i; | 104 | int i; |
105 | 105 | ||
106 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 106 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
107 | struct nouveau_i2c_chan *i2c; | 107 | struct nouveau_i2c_chan *i2c = NULL; |
108 | struct nouveau_encoder *nv_encoder; | 108 | struct nouveau_encoder *nv_encoder; |
109 | struct drm_mode_object *obj; | 109 | struct drm_mode_object *obj; |
110 | int id; | 110 | int id; |
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, | |||
117 | if (!obj) | 117 | if (!obj) |
118 | continue; | 118 | continue; |
119 | nv_encoder = nouveau_encoder(obj_to_encoder(obj)); | 119 | nv_encoder = nouveau_encoder(obj_to_encoder(obj)); |
120 | i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | 120 | |
121 | if (nv_encoder->dcb->i2c_index < 0xf) | ||
122 | i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | ||
121 | 123 | ||
122 | if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { | 124 | if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { |
123 | *pnv_encoder = nv_encoder; | 125 | *pnv_encoder = nv_encoder; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e424bf74d706..1e093a069b7b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | |||
1165 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1165 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
1166 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1166 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
1167 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | 1167 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); |
1168 | extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); | ||
1168 | 1169 | ||
1169 | /* nouveau_fence.c */ | 1170 | /* nouveau_fence.c */ |
1170 | struct nouveau_fence; | 1171 | struct nouveau_fence; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0f417ac1b696..79fc5ffff226 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
361 | 361 | ||
362 | list_for_each_entry(nvbo, list, entry) { | 362 | list_for_each_entry(nvbo, list, entry) { |
363 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | 363 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; |
364 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | ||
365 | 364 | ||
366 | if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { | 365 | ret = nouveau_bo_sync_gpu(nvbo, chan); |
367 | spin_lock(&nvbo->bo.lock); | 366 | if (unlikely(ret)) { |
368 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | 367 | NV_ERROR(dev, "fail pre-validate sync\n"); |
369 | spin_unlock(&nvbo->bo.lock); | 368 | return ret; |
370 | if (unlikely(ret)) { | ||
371 | NV_ERROR(dev, "fail wait other chan\n"); | ||
372 | return ret; | ||
373 | } | ||
374 | } | 369 | } |
375 | 370 | ||
376 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | 371 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, |
@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
381 | return ret; | 376 | return ret; |
382 | } | 377 | } |
383 | 378 | ||
384 | nvbo->channel = chan; | 379 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; |
385 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, | 380 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, |
386 | false, false, false); | 381 | false, false, false); |
387 | nvbo->channel = NULL; | 382 | nvbo->channel = NULL; |
@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
390 | return ret; | 385 | return ret; |
391 | } | 386 | } |
392 | 387 | ||
388 | ret = nouveau_bo_sync_gpu(nvbo, chan); | ||
389 | if (unlikely(ret)) { | ||
390 | NV_ERROR(dev, "fail post-validate sync\n"); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
393 | if (nvbo->bo.offset == b->presumed.offset && | 394 | if (nvbo->bo.offset == b->presumed.offset && |
394 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | 395 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
395 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | 396 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
@@ -615,6 +616,21 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
615 | 616 | ||
616 | mutex_lock(&dev->struct_mutex); | 617 | mutex_lock(&dev->struct_mutex); |
617 | 618 | ||
619 | /* Mark push buffers as being used on PFIFO, the validation code | ||
620 | * will then make sure that if the pushbuf bo moves, that they | ||
621 | * happen on the kernel channel, which will in turn cause a sync | ||
622 | * to happen before we try and submit the push buffer. | ||
623 | */ | ||
624 | for (i = 0; i < req->nr_push; i++) { | ||
625 | if (push[i].bo_index >= req->nr_buffers) { | ||
626 | NV_ERROR(dev, "push %d buffer not in list\n", i); | ||
627 | ret = -EINVAL; | ||
628 | goto out; | ||
629 | } | ||
630 | |||
631 | bo[push[i].bo_index].read_domains |= (1 << 31); | ||
632 | } | ||
633 | |||
618 | /* Validate buffer list */ | 634 | /* Validate buffer list */ |
619 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | 635 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, |
620 | req->nr_buffers, &op, &do_reloc); | 636 | req->nr_buffers, &op, &do_reloc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 0bd407ca3d42..84614858728b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | |||
163 | if (entry->chan) | 163 | if (entry->chan) |
164 | return -EEXIST; | 164 | return -EEXIST; |
165 | 165 | ||
166 | if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) { | 166 | if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { |
167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); | 167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); |
168 | return -EINVAL; | 168 | return -EINVAL; |
169 | } | 169 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 491767fe4fcf..6b9187d7f67d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
@@ -214,6 +214,7 @@ int | |||
214 | nouveau_sgdma_init(struct drm_device *dev) | 214 | nouveau_sgdma_init(struct drm_device *dev) |
215 | { | 215 | { |
216 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 216 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
217 | struct pci_dev *pdev = dev->pdev; | ||
217 | struct nouveau_gpuobj *gpuobj = NULL; | 218 | struct nouveau_gpuobj *gpuobj = NULL; |
218 | uint32_t aper_size, obj_size; | 219 | uint32_t aper_size, obj_size; |
219 | int i, ret; | 220 | int i, ret; |
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
239 | 240 | ||
240 | dev_priv->gart_info.sg_dummy_page = | 241 | dev_priv->gart_info.sg_dummy_page = |
241 | alloc_page(GFP_KERNEL|__GFP_DMA32); | 242 | alloc_page(GFP_KERNEL|__GFP_DMA32); |
243 | if (!dev_priv->gart_info.sg_dummy_page) { | ||
244 | nouveau_gpuobj_del(dev, &gpuobj); | ||
245 | return -ENOMEM; | ||
246 | } | ||
247 | |||
242 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); | 248 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); |
243 | dev_priv->gart_info.sg_dummy_bus = | 249 | dev_priv->gart_info.sg_dummy_bus = |
244 | pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, | 250 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, |
245 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 251 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
252 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { | ||
253 | nouveau_gpuobj_del(dev, &gpuobj); | ||
254 | return -EFAULT; | ||
255 | } | ||
246 | 256 | ||
247 | if (dev_priv->card_type < NV_50) { | 257 | if (dev_priv->card_type < NV_50) { |
248 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and | 258 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 44fefb0c7083..eefa5c856932 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
@@ -129,6 +129,14 @@ get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) | |||
129 | return false; | 129 | return false; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* MSI nForce2 IGP */ | ||
133 | if (dev->pdev->device == 0x01f0 && | ||
134 | dev->pdev->subsystem_vendor == 0x1462 && | ||
135 | dev->pdev->subsystem_device == 0x5710) { | ||
136 | *pin_mask = 0xc; | ||
137 | return false; | ||
138 | } | ||
139 | |||
132 | return true; | 140 | return true; |
133 | } | 141 | } |
134 | 142 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 37c7b48ab24a..c95bf9b681dd 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -278,7 +278,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
278 | /*XXX: incorrect, but needed to make hash func "work" */ | 278 | /*XXX: incorrect, but needed to make hash func "work" */ |
279 | dev_priv->ramht_offset = 0x10000; | 279 | dev_priv->ramht_offset = 0x10000; |
280 | dev_priv->ramht_bits = 9; | 280 | dev_priv->ramht_bits = 9; |
281 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits); | 281 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 3ab3cdc42173..6b451f864783 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -142,14 +142,16 @@ int | |||
142 | nvc0_instmem_suspend(struct drm_device *dev) | 142 | nvc0_instmem_suspend(struct drm_device *dev) |
143 | { | 143 | { |
144 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 144 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
145 | u32 *buf; | ||
145 | int i; | 146 | int i; |
146 | 147 | ||
147 | dev_priv->susres.ramin_copy = vmalloc(65536); | 148 | dev_priv->susres.ramin_copy = vmalloc(65536); |
148 | if (!dev_priv->susres.ramin_copy) | 149 | if (!dev_priv->susres.ramin_copy) |
149 | return -ENOMEM; | 150 | return -ENOMEM; |
151 | buf = dev_priv->susres.ramin_copy; | ||
150 | 152 | ||
151 | for (i = 0x700000; i < 0x710000; i += 4) | 153 | for (i = 0; i < 65536; i += 4) |
152 | dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i); | 154 | buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i); |
153 | return 0; | 155 | return 0; |
154 | } | 156 | } |
155 | 157 | ||
@@ -157,14 +159,15 @@ void | |||
157 | nvc0_instmem_resume(struct drm_device *dev) | 159 | nvc0_instmem_resume(struct drm_device *dev) |
158 | { | 160 | { |
159 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 161 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
162 | u32 *buf = dev_priv->susres.ramin_copy; | ||
160 | u64 chan; | 163 | u64 chan; |
161 | int i; | 164 | int i; |
162 | 165 | ||
163 | chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; | 166 | chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; |
164 | nv_wr32(dev, 0x001700, chan >> 16); | 167 | nv_wr32(dev, 0x001700, chan >> 16); |
165 | 168 | ||
166 | for (i = 0x700000; i < 0x710000; i += 4) | 169 | for (i = 0; i < 65536; i += 4) |
167 | nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]); | 170 | nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]); |
168 | vfree(dev_priv->susres.ramin_copy); | 171 | vfree(dev_priv->susres.ramin_copy); |
169 | dev_priv->susres.ramin_copy = NULL; | 172 | dev_priv->susres.ramin_copy = NULL; |
170 | 173 | ||
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev) | |||
221 | /*XXX: incorrect, but needed to make hash func "work" */ | 224 | /*XXX: incorrect, but needed to make hash func "work" */ |
222 | dev_priv->ramht_offset = 0x10000; | 225 | dev_priv->ramht_offset = 0x10000; |
223 | dev_priv->ramht_bits = 9; | 226 | dev_priv->ramht_bits = 9; |
224 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits); | 227 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; |
225 | return 0; | 228 | return 0; |
226 | } | 229 | } |
227 | 230 | ||
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 077af1f2f9b4..a9e33ce65918 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c | |||
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
1639 | r128_do_cleanup_pageflip(dev); | 1639 | r128_do_cleanup_pageflip(dev); |
1640 | } | 1640 | } |
1641 | } | 1641 | } |
1642 | |||
1643 | void r128_driver_lastclose(struct drm_device *dev) | 1642 | void r128_driver_lastclose(struct drm_device *dev) |
1644 | { | 1643 | { |
1645 | r128_do_cleanup_cce(dev); | 1644 | r128_do_cleanup_cce(dev); |
1646 | } | 1645 | } |
1647 | 1646 | ||
1648 | struct drm_ioctl_desc r128_ioctls[] = { | 1647 | struct drm_ioctl_desc r128_ioctls[] = { |
1649 | DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1648 | DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1650 | DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1649 | DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1651 | DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1650 | DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1652 | DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1651 | DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1653 | DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), | 1652 | DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), |
1654 | DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), | 1653 | DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH), |
1655 | DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), | 1654 | DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), |
1656 | DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), | 1655 | DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH), |
1657 | DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), | 1656 | DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH), |
1658 | DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), | 1657 | DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH), |
1659 | DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), | 1658 | DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH), |
1660 | DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), | 1659 | DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH), |
1661 | DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), | 1660 | DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH), |
1662 | DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), | 1661 | DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH), |
1663 | DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), | 1662 | DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH), |
1664 | DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1663 | DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1665 | DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), | 1664 | DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), |
1666 | }; | 1665 | }; |
1667 | 1666 | ||
1668 | int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); | 1667 | int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 12ad512bd3d3..577239a24fd5 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -471,6 +471,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
471 | struct radeon_encoder *radeon_encoder = NULL; | 471 | struct radeon_encoder *radeon_encoder = NULL; |
472 | u32 adjusted_clock = mode->clock; | 472 | u32 adjusted_clock = mode->clock; |
473 | int encoder_mode = 0; | 473 | int encoder_mode = 0; |
474 | u32 dp_clock = mode->clock; | ||
475 | int bpc = 8; | ||
474 | 476 | ||
475 | /* reset the pll flags */ | 477 | /* reset the pll flags */ |
476 | pll->flags = 0; | 478 | pll->flags = 0; |
@@ -513,6 +515,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
513 | if (encoder->crtc == crtc) { | 515 | if (encoder->crtc == crtc) { |
514 | radeon_encoder = to_radeon_encoder(encoder); | 516 | radeon_encoder = to_radeon_encoder(encoder); |
515 | encoder_mode = atombios_get_encoder_mode(encoder); | 517 | encoder_mode = atombios_get_encoder_mode(encoder); |
518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | ||
519 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
520 | if (connector) { | ||
521 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
522 | struct radeon_connector_atom_dig *dig_connector = | ||
523 | radeon_connector->con_priv; | ||
524 | |||
525 | dp_clock = dig_connector->dp_clock; | ||
526 | } | ||
527 | } | ||
528 | |||
516 | if (ASIC_IS_AVIVO(rdev)) { | 529 | if (ASIC_IS_AVIVO(rdev)) { |
517 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 530 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
518 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 531 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
@@ -555,6 +568,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
555 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | 568 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
556 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 569 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
557 | args.v1.ucEncodeMode = encoder_mode; | 570 | args.v1.ucEncodeMode = encoder_mode; |
571 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | ||
572 | /* may want to enable SS on DP eventually */ | ||
573 | /* args.v1.ucConfig |= | ||
574 | ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ | ||
575 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
576 | args.v1.ucConfig |= | ||
577 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | ||
578 | } | ||
558 | 579 | ||
559 | atom_execute_table(rdev->mode_info.atom_context, | 580 | atom_execute_table(rdev->mode_info.atom_context, |
560 | index, (uint32_t *)&args); | 581 | index, (uint32_t *)&args); |
@@ -568,10 +589,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
568 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 589 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
569 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 590 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
570 | 591 | ||
571 | if (encoder_mode == ATOM_ENCODER_MODE_DP) | 592 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
593 | /* may want to enable SS on DP/eDP eventually */ | ||
594 | /*args.v3.sInput.ucDispPllConfig |= | ||
595 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
572 | args.v3.sInput.ucDispPllConfig |= | 596 | args.v3.sInput.ucDispPllConfig |= |
573 | DISPPLL_CONFIG_COHERENT_MODE; | 597 | DISPPLL_CONFIG_COHERENT_MODE; |
574 | else { | 598 | /* 16200 or 27000 */ |
599 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
600 | } else { | ||
601 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
602 | /* deep color support */ | ||
603 | args.v3.sInput.usPixelClock = | ||
604 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
605 | } | ||
575 | if (dig->coherent_mode) | 606 | if (dig->coherent_mode) |
576 | args.v3.sInput.ucDispPllConfig |= | 607 | args.v3.sInput.ucDispPllConfig |= |
577 | DISPPLL_CONFIG_COHERENT_MODE; | 608 | DISPPLL_CONFIG_COHERENT_MODE; |
@@ -580,13 +611,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
580 | DISPPLL_CONFIG_DUAL_LINK; | 611 | DISPPLL_CONFIG_DUAL_LINK; |
581 | } | 612 | } |
582 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 613 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
583 | /* may want to enable SS on DP/eDP eventually */ | 614 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
584 | /*args.v3.sInput.ucDispPllConfig |= | 615 | /* may want to enable SS on DP/eDP eventually */ |
585 | DISPPLL_CONFIG_SS_ENABLE;*/ | 616 | /*args.v3.sInput.ucDispPllConfig |= |
586 | if (encoder_mode == ATOM_ENCODER_MODE_DP) | 617 | DISPPLL_CONFIG_SS_ENABLE;*/ |
587 | args.v3.sInput.ucDispPllConfig |= | 618 | args.v3.sInput.ucDispPllConfig |= |
588 | DISPPLL_CONFIG_COHERENT_MODE; | 619 | DISPPLL_CONFIG_COHERENT_MODE; |
589 | else { | 620 | /* 16200 or 27000 */ |
621 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
622 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
623 | /* want to enable SS on LVDS eventually */ | ||
624 | /*args.v3.sInput.ucDispPllConfig |= | ||
625 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
626 | } else { | ||
590 | if (mode->clock > 165000) | 627 | if (mode->clock > 165000) |
591 | args.v3.sInput.ucDispPllConfig |= | 628 | args.v3.sInput.ucDispPllConfig |= |
592 | DISPPLL_CONFIG_DUAL_LINK; | 629 | DISPPLL_CONFIG_DUAL_LINK; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 36e0d4b545e6..4e7778d44b8d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder, | |||
610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | 610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; |
611 | else | 611 | else |
612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | 612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; |
613 | if (dig_connector->linkb) | 613 | if (dig->linkb) |
614 | enc_id |= ATOM_DP_CONFIG_LINK_B; | 614 | enc_id |= ATOM_DP_CONFIG_LINK_B; |
615 | else | 615 | else |
616 | enc_id |= ATOM_DP_CONFIG_LINK_A; | 616 | enc_id |= ATOM_DP_CONFIG_LINK_A; |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index f40dfb77f9b1..bd2f33e5c91a 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
156 | } | 156 | } |
157 | 157 | ||
158 | mode.mode = info.mode; | 158 | mode.mode = info.mode; |
159 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; | 159 | /* chips with the agp to pcie bridge don't have the AGP_STATUS register |
160 | * Just use the whatever mode the host sets up. | ||
161 | */ | ||
162 | if (rdev->family <= CHIP_RV350) | ||
163 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; | ||
164 | else | ||
165 | agp_status = mode.mode; | ||
160 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); | 166 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
161 | 167 | ||
162 | if (is_v3) { | 168 | if (is_v3) { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 646f96f97c77..a21bf88e8c2d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = { | |||
733 | .set_engine_clock = &radeon_atom_set_engine_clock, | 733 | .set_engine_clock = &radeon_atom_set_engine_clock, |
734 | .get_memory_clock = &radeon_atom_get_memory_clock, | 734 | .get_memory_clock = &radeon_atom_get_memory_clock, |
735 | .set_memory_clock = &radeon_atom_set_memory_clock, | 735 | .set_memory_clock = &radeon_atom_set_memory_clock, |
736 | .get_pcie_lanes = NULL, | ||
736 | .set_pcie_lanes = NULL, | 737 | .set_pcie_lanes = NULL, |
737 | .set_clock_gating = NULL, | 738 | .set_clock_gating = NULL, |
738 | .set_surface_reg = r600_set_surface_reg, | 739 | .set_surface_reg = r600_set_surface_reg, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6d30868744ee..61141981880d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -32,11 +32,11 @@ | |||
32 | 32 | ||
33 | /* from radeon_encoder.c */ | 33 | /* from radeon_encoder.c */ |
34 | extern uint32_t | 34 | extern uint32_t |
35 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, | 35 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, |
36 | uint8_t dac); | 36 | uint8_t dac); |
37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
38 | extern void | 38 | extern void |
39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, | 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
40 | uint32_t supported_device); | 40 | uint32_t supported_device); |
41 | 41 | ||
42 | /* from radeon_connector.c */ | 42 | /* from radeon_connector.c */ |
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
46 | uint32_t supported_device, | 46 | uint32_t supported_device, |
47 | int connector_type, | 47 | int connector_type, |
48 | struct radeon_i2c_bus_rec *i2c_bus, | 48 | struct radeon_i2c_bus_rec *i2c_bus, |
49 | bool linkb, uint32_t igp_lane_info, | 49 | uint32_t igp_lane_info, |
50 | uint16_t connector_object_id, | 50 | uint16_t connector_object_id, |
51 | struct radeon_hpd *hpd, | 51 | struct radeon_hpd *hpd, |
52 | struct radeon_router *router); | 52 | struct radeon_router *router); |
53 | 53 | ||
54 | /* from radeon_legacy_encoder.c */ | 54 | /* from radeon_legacy_encoder.c */ |
55 | extern void | 55 | extern void |
56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, | 56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
57 | uint32_t supported_device); | 57 | uint32_t supported_device); |
58 | 58 | ||
59 | union atom_supported_devices { | 59 | union atom_supported_devices { |
@@ -226,6 +226,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device | |||
226 | struct radeon_hpd hpd; | 226 | struct radeon_hpd hpd; |
227 | u32 reg; | 227 | u32 reg; |
228 | 228 | ||
229 | memset(&hpd, 0, sizeof(struct radeon_hpd)); | ||
230 | |||
229 | if (ASIC_IS_DCE4(rdev)) | 231 | if (ASIC_IS_DCE4(rdev)) |
230 | reg = EVERGREEN_DC_GPIO_HPD_A; | 232 | reg = EVERGREEN_DC_GPIO_HPD_A; |
231 | else | 233 | else |
@@ -477,7 +479,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
477 | int i, j, k, path_size, device_support; | 479 | int i, j, k, path_size, device_support; |
478 | int connector_type; | 480 | int connector_type; |
479 | u16 igp_lane_info, conn_id, connector_object_id; | 481 | u16 igp_lane_info, conn_id, connector_object_id; |
480 | bool linkb; | ||
481 | struct radeon_i2c_bus_rec ddc_bus; | 482 | struct radeon_i2c_bus_rec ddc_bus; |
482 | struct radeon_router router; | 483 | struct radeon_router router; |
483 | struct radeon_gpio_rec gpio; | 484 | struct radeon_gpio_rec gpio; |
@@ -510,7 +511,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
510 | addr += path_size; | 511 | addr += path_size; |
511 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; | 512 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; |
512 | path_size += le16_to_cpu(path->usSize); | 513 | path_size += le16_to_cpu(path->usSize); |
513 | linkb = false; | 514 | |
514 | if (device_support & le16_to_cpu(path->usDeviceTag)) { | 515 | if (device_support & le16_to_cpu(path->usDeviceTag)) { |
515 | uint8_t con_obj_id, con_obj_num, con_obj_type; | 516 | uint8_t con_obj_id, con_obj_num, con_obj_type; |
516 | 517 | ||
@@ -601,13 +602,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
601 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 602 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
602 | 603 | ||
603 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { | 604 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { |
604 | if (grph_obj_num == 2) | 605 | u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); |
605 | linkb = true; | ||
606 | else | ||
607 | linkb = false; | ||
608 | 606 | ||
609 | radeon_add_atom_encoder(dev, | 607 | radeon_add_atom_encoder(dev, |
610 | grph_obj_id, | 608 | encoder_obj, |
611 | le16_to_cpu | 609 | le16_to_cpu |
612 | (path-> | 610 | (path-> |
613 | usDeviceTag)); | 611 | usDeviceTag)); |
@@ -744,7 +742,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
744 | le16_to_cpu(path-> | 742 | le16_to_cpu(path-> |
745 | usDeviceTag), | 743 | usDeviceTag), |
746 | connector_type, &ddc_bus, | 744 | connector_type, &ddc_bus, |
747 | linkb, igp_lane_info, | 745 | igp_lane_info, |
748 | connector_object_id, | 746 | connector_object_id, |
749 | &hpd, | 747 | &hpd, |
750 | &router); | 748 | &router); |
@@ -933,13 +931,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
933 | 931 | ||
934 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) | 932 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) |
935 | radeon_add_atom_encoder(dev, | 933 | radeon_add_atom_encoder(dev, |
936 | radeon_get_encoder_id(dev, | 934 | radeon_get_encoder_enum(dev, |
937 | (1 << i), | 935 | (1 << i), |
938 | dac), | 936 | dac), |
939 | (1 << i)); | 937 | (1 << i)); |
940 | else | 938 | else |
941 | radeon_add_legacy_encoder(dev, | 939 | radeon_add_legacy_encoder(dev, |
942 | radeon_get_encoder_id(dev, | 940 | radeon_get_encoder_enum(dev, |
943 | (1 << i), | 941 | (1 << i), |
944 | dac), | 942 | dac), |
945 | (1 << i)); | 943 | (1 << i)); |
@@ -996,7 +994,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
996 | bios_connectors[i]. | 994 | bios_connectors[i]. |
997 | connector_type, | 995 | connector_type, |
998 | &bios_connectors[i].ddc_bus, | 996 | &bios_connectors[i].ddc_bus, |
999 | false, 0, | 997 | 0, |
1000 | connector_object_id, | 998 | connector_object_id, |
1001 | &bios_connectors[i].hpd, | 999 | &bios_connectors[i].hpd, |
1002 | &router); | 1000 | &router); |
@@ -1183,7 +1181,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
1183 | return true; | 1181 | return true; |
1184 | break; | 1182 | break; |
1185 | case 2: | 1183 | case 2: |
1186 | if (igp_info->info_2.ucMemoryType & 0x0f) | 1184 | if (igp_info->info_2.ulBootUpSidePortClock) |
1187 | return true; | 1185 | return true; |
1188 | break; | 1186 | break; |
1189 | default: | 1187 | default: |
@@ -1305,6 +1303,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1305 | union lvds_info *lvds_info; | 1303 | union lvds_info *lvds_info; |
1306 | uint8_t frev, crev; | 1304 | uint8_t frev, crev; |
1307 | struct radeon_encoder_atom_dig *lvds = NULL; | 1305 | struct radeon_encoder_atom_dig *lvds = NULL; |
1306 | int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1308 | 1307 | ||
1309 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1308 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
1310 | &frev, &crev, &data_offset)) { | 1309 | &frev, &crev, &data_offset)) { |
@@ -1368,6 +1367,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
1368 | } | 1367 | } |
1369 | 1368 | ||
1370 | encoder->native_mode = lvds->native_mode; | 1369 | encoder->native_mode = lvds->native_mode; |
1370 | |||
1371 | if (encoder_enum == 2) | ||
1372 | lvds->linkb = true; | ||
1373 | else | ||
1374 | lvds->linkb = false; | ||
1375 | |||
1371 | } | 1376 | } |
1372 | return lvds; | 1377 | return lvds; |
1373 | } | 1378 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 885dcfac1838..bd74e428bd14 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -39,8 +39,8 @@ | |||
39 | 39 | ||
40 | /* from radeon_encoder.c */ | 40 | /* from radeon_encoder.c */ |
41 | extern uint32_t | 41 | extern uint32_t |
42 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, | 42 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, |
43 | uint8_t dac); | 43 | uint8_t dac); |
44 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 44 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
45 | 45 | ||
46 | /* from radeon_connector.c */ | 46 | /* from radeon_connector.c */ |
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
55 | 55 | ||
56 | /* from radeon_legacy_encoder.c */ | 56 | /* from radeon_legacy_encoder.c */ |
57 | extern void | 57 | extern void |
58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, | 58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
59 | uint32_t supported_device); | 59 | uint32_t supported_device); |
60 | 60 | ||
61 | /* old legacy ATI BIOS routines */ | 61 | /* old legacy ATI BIOS routines */ |
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1505 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1505 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1506 | hpd.hpd = RADEON_HPD_NONE; | 1506 | hpd.hpd = RADEON_HPD_NONE; |
1507 | radeon_add_legacy_encoder(dev, | 1507 | radeon_add_legacy_encoder(dev, |
1508 | radeon_get_encoder_id(dev, | 1508 | radeon_get_encoder_enum(dev, |
1509 | ATOM_DEVICE_CRT1_SUPPORT, | 1509 | ATOM_DEVICE_CRT1_SUPPORT, |
1510 | 1), | 1510 | 1), |
1511 | ATOM_DEVICE_CRT1_SUPPORT); | 1511 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1520 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); | 1520 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); |
1521 | hpd.hpd = RADEON_HPD_NONE; | 1521 | hpd.hpd = RADEON_HPD_NONE; |
1522 | radeon_add_legacy_encoder(dev, | 1522 | radeon_add_legacy_encoder(dev, |
1523 | radeon_get_encoder_id(dev, | 1523 | radeon_get_encoder_enum(dev, |
1524 | ATOM_DEVICE_LCD1_SUPPORT, | 1524 | ATOM_DEVICE_LCD1_SUPPORT, |
1525 | 0), | 1525 | 0), |
1526 | ATOM_DEVICE_LCD1_SUPPORT); | 1526 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1535 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1535 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1536 | hpd.hpd = RADEON_HPD_NONE; | 1536 | hpd.hpd = RADEON_HPD_NONE; |
1537 | radeon_add_legacy_encoder(dev, | 1537 | radeon_add_legacy_encoder(dev, |
1538 | radeon_get_encoder_id(dev, | 1538 | radeon_get_encoder_enum(dev, |
1539 | ATOM_DEVICE_CRT1_SUPPORT, | 1539 | ATOM_DEVICE_CRT1_SUPPORT, |
1540 | 1), | 1540 | 1), |
1541 | ATOM_DEVICE_CRT1_SUPPORT); | 1541 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1550 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1550 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1551 | hpd.hpd = RADEON_HPD_1; | 1551 | hpd.hpd = RADEON_HPD_1; |
1552 | radeon_add_legacy_encoder(dev, | 1552 | radeon_add_legacy_encoder(dev, |
1553 | radeon_get_encoder_id(dev, | 1553 | radeon_get_encoder_enum(dev, |
1554 | ATOM_DEVICE_DFP1_SUPPORT, | 1554 | ATOM_DEVICE_DFP1_SUPPORT, |
1555 | 0), | 1555 | 0), |
1556 | ATOM_DEVICE_DFP1_SUPPORT); | 1556 | ATOM_DEVICE_DFP1_SUPPORT); |
1557 | radeon_add_legacy_encoder(dev, | 1557 | radeon_add_legacy_encoder(dev, |
1558 | radeon_get_encoder_id(dev, | 1558 | radeon_get_encoder_enum(dev, |
1559 | ATOM_DEVICE_CRT2_SUPPORT, | 1559 | ATOM_DEVICE_CRT2_SUPPORT, |
1560 | 2), | 1560 | 2), |
1561 | ATOM_DEVICE_CRT2_SUPPORT); | 1561 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1571 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1571 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1572 | hpd.hpd = RADEON_HPD_NONE; | 1572 | hpd.hpd = RADEON_HPD_NONE; |
1573 | radeon_add_legacy_encoder(dev, | 1573 | radeon_add_legacy_encoder(dev, |
1574 | radeon_get_encoder_id(dev, | 1574 | radeon_get_encoder_enum(dev, |
1575 | ATOM_DEVICE_CRT1_SUPPORT, | 1575 | ATOM_DEVICE_CRT1_SUPPORT, |
1576 | 1), | 1576 | 1), |
1577 | ATOM_DEVICE_CRT1_SUPPORT); | 1577 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1588 | ddc_i2c.valid = false; | 1588 | ddc_i2c.valid = false; |
1589 | hpd.hpd = RADEON_HPD_NONE; | 1589 | hpd.hpd = RADEON_HPD_NONE; |
1590 | radeon_add_legacy_encoder(dev, | 1590 | radeon_add_legacy_encoder(dev, |
1591 | radeon_get_encoder_id(dev, | 1591 | radeon_get_encoder_enum(dev, |
1592 | ATOM_DEVICE_TV1_SUPPORT, | 1592 | ATOM_DEVICE_TV1_SUPPORT, |
1593 | 2), | 1593 | 2), |
1594 | ATOM_DEVICE_TV1_SUPPORT); | 1594 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1607 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1607 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1608 | hpd.hpd = RADEON_HPD_NONE; | 1608 | hpd.hpd = RADEON_HPD_NONE; |
1609 | radeon_add_legacy_encoder(dev, | 1609 | radeon_add_legacy_encoder(dev, |
1610 | radeon_get_encoder_id(dev, | 1610 | radeon_get_encoder_enum(dev, |
1611 | ATOM_DEVICE_LCD1_SUPPORT, | 1611 | ATOM_DEVICE_LCD1_SUPPORT, |
1612 | 0), | 1612 | 0), |
1613 | ATOM_DEVICE_LCD1_SUPPORT); | 1613 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1619 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1619 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1620 | hpd.hpd = RADEON_HPD_NONE; | 1620 | hpd.hpd = RADEON_HPD_NONE; |
1621 | radeon_add_legacy_encoder(dev, | 1621 | radeon_add_legacy_encoder(dev, |
1622 | radeon_get_encoder_id(dev, | 1622 | radeon_get_encoder_enum(dev, |
1623 | ATOM_DEVICE_CRT2_SUPPORT, | 1623 | ATOM_DEVICE_CRT2_SUPPORT, |
1624 | 2), | 1624 | 2), |
1625 | ATOM_DEVICE_CRT2_SUPPORT); | 1625 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1631 | ddc_i2c.valid = false; | 1631 | ddc_i2c.valid = false; |
1632 | hpd.hpd = RADEON_HPD_NONE; | 1632 | hpd.hpd = RADEON_HPD_NONE; |
1633 | radeon_add_legacy_encoder(dev, | 1633 | radeon_add_legacy_encoder(dev, |
1634 | radeon_get_encoder_id(dev, | 1634 | radeon_get_encoder_enum(dev, |
1635 | ATOM_DEVICE_TV1_SUPPORT, | 1635 | ATOM_DEVICE_TV1_SUPPORT, |
1636 | 2), | 1636 | 2), |
1637 | ATOM_DEVICE_TV1_SUPPORT); | 1637 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1648 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1648 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1649 | hpd.hpd = RADEON_HPD_NONE; | 1649 | hpd.hpd = RADEON_HPD_NONE; |
1650 | radeon_add_legacy_encoder(dev, | 1650 | radeon_add_legacy_encoder(dev, |
1651 | radeon_get_encoder_id(dev, | 1651 | radeon_get_encoder_enum(dev, |
1652 | ATOM_DEVICE_LCD1_SUPPORT, | 1652 | ATOM_DEVICE_LCD1_SUPPORT, |
1653 | 0), | 1653 | 0), |
1654 | ATOM_DEVICE_LCD1_SUPPORT); | 1654 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1660 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1660 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1661 | hpd.hpd = RADEON_HPD_2; /* ??? */ | 1661 | hpd.hpd = RADEON_HPD_2; /* ??? */ |
1662 | radeon_add_legacy_encoder(dev, | 1662 | radeon_add_legacy_encoder(dev, |
1663 | radeon_get_encoder_id(dev, | 1663 | radeon_get_encoder_enum(dev, |
1664 | ATOM_DEVICE_DFP2_SUPPORT, | 1664 | ATOM_DEVICE_DFP2_SUPPORT, |
1665 | 0), | 1665 | 0), |
1666 | ATOM_DEVICE_DFP2_SUPPORT); | 1666 | ATOM_DEVICE_DFP2_SUPPORT); |
1667 | radeon_add_legacy_encoder(dev, | 1667 | radeon_add_legacy_encoder(dev, |
1668 | radeon_get_encoder_id(dev, | 1668 | radeon_get_encoder_enum(dev, |
1669 | ATOM_DEVICE_CRT1_SUPPORT, | 1669 | ATOM_DEVICE_CRT1_SUPPORT, |
1670 | 1), | 1670 | 1), |
1671 | ATOM_DEVICE_CRT1_SUPPORT); | 1671 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1680 | ddc_i2c.valid = false; | 1680 | ddc_i2c.valid = false; |
1681 | hpd.hpd = RADEON_HPD_NONE; | 1681 | hpd.hpd = RADEON_HPD_NONE; |
1682 | radeon_add_legacy_encoder(dev, | 1682 | radeon_add_legacy_encoder(dev, |
1683 | radeon_get_encoder_id(dev, | 1683 | radeon_get_encoder_enum(dev, |
1684 | ATOM_DEVICE_TV1_SUPPORT, | 1684 | ATOM_DEVICE_TV1_SUPPORT, |
1685 | 2), | 1685 | 2), |
1686 | ATOM_DEVICE_TV1_SUPPORT); | 1686 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1697 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1697 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1698 | hpd.hpd = RADEON_HPD_NONE; | 1698 | hpd.hpd = RADEON_HPD_NONE; |
1699 | radeon_add_legacy_encoder(dev, | 1699 | radeon_add_legacy_encoder(dev, |
1700 | radeon_get_encoder_id(dev, | 1700 | radeon_get_encoder_enum(dev, |
1701 | ATOM_DEVICE_LCD1_SUPPORT, | 1701 | ATOM_DEVICE_LCD1_SUPPORT, |
1702 | 0), | 1702 | 0), |
1703 | ATOM_DEVICE_LCD1_SUPPORT); | 1703 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1709 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1709 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1710 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1710 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
1711 | radeon_add_legacy_encoder(dev, | 1711 | radeon_add_legacy_encoder(dev, |
1712 | radeon_get_encoder_id(dev, | 1712 | radeon_get_encoder_enum(dev, |
1713 | ATOM_DEVICE_DFP1_SUPPORT, | 1713 | ATOM_DEVICE_DFP1_SUPPORT, |
1714 | 0), | 1714 | 0), |
1715 | ATOM_DEVICE_DFP1_SUPPORT); | 1715 | ATOM_DEVICE_DFP1_SUPPORT); |
1716 | radeon_add_legacy_encoder(dev, | 1716 | radeon_add_legacy_encoder(dev, |
1717 | radeon_get_encoder_id(dev, | 1717 | radeon_get_encoder_enum(dev, |
1718 | ATOM_DEVICE_CRT1_SUPPORT, | 1718 | ATOM_DEVICE_CRT1_SUPPORT, |
1719 | 1), | 1719 | 1), |
1720 | ATOM_DEVICE_CRT1_SUPPORT); | 1720 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1728 | ddc_i2c.valid = false; | 1728 | ddc_i2c.valid = false; |
1729 | hpd.hpd = RADEON_HPD_NONE; | 1729 | hpd.hpd = RADEON_HPD_NONE; |
1730 | radeon_add_legacy_encoder(dev, | 1730 | radeon_add_legacy_encoder(dev, |
1731 | radeon_get_encoder_id(dev, | 1731 | radeon_get_encoder_enum(dev, |
1732 | ATOM_DEVICE_TV1_SUPPORT, | 1732 | ATOM_DEVICE_TV1_SUPPORT, |
1733 | 2), | 1733 | 2), |
1734 | ATOM_DEVICE_TV1_SUPPORT); | 1734 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1745 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1745 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1746 | hpd.hpd = RADEON_HPD_NONE; | 1746 | hpd.hpd = RADEON_HPD_NONE; |
1747 | radeon_add_legacy_encoder(dev, | 1747 | radeon_add_legacy_encoder(dev, |
1748 | radeon_get_encoder_id(dev, | 1748 | radeon_get_encoder_enum(dev, |
1749 | ATOM_DEVICE_LCD1_SUPPORT, | 1749 | ATOM_DEVICE_LCD1_SUPPORT, |
1750 | 0), | 1750 | 0), |
1751 | ATOM_DEVICE_LCD1_SUPPORT); | 1751 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1757 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1757 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1758 | hpd.hpd = RADEON_HPD_NONE; | 1758 | hpd.hpd = RADEON_HPD_NONE; |
1759 | radeon_add_legacy_encoder(dev, | 1759 | radeon_add_legacy_encoder(dev, |
1760 | radeon_get_encoder_id(dev, | 1760 | radeon_get_encoder_enum(dev, |
1761 | ATOM_DEVICE_CRT1_SUPPORT, | 1761 | ATOM_DEVICE_CRT1_SUPPORT, |
1762 | 1), | 1762 | 1), |
1763 | ATOM_DEVICE_CRT1_SUPPORT); | 1763 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1769 | ddc_i2c.valid = false; | 1769 | ddc_i2c.valid = false; |
1770 | hpd.hpd = RADEON_HPD_NONE; | 1770 | hpd.hpd = RADEON_HPD_NONE; |
1771 | radeon_add_legacy_encoder(dev, | 1771 | radeon_add_legacy_encoder(dev, |
1772 | radeon_get_encoder_id(dev, | 1772 | radeon_get_encoder_enum(dev, |
1773 | ATOM_DEVICE_TV1_SUPPORT, | 1773 | ATOM_DEVICE_TV1_SUPPORT, |
1774 | 2), | 1774 | 2), |
1775 | ATOM_DEVICE_TV1_SUPPORT); | 1775 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1786 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1786 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
1787 | hpd.hpd = RADEON_HPD_2; /* ??? */ | 1787 | hpd.hpd = RADEON_HPD_2; /* ??? */ |
1788 | radeon_add_legacy_encoder(dev, | 1788 | radeon_add_legacy_encoder(dev, |
1789 | radeon_get_encoder_id(dev, | 1789 | radeon_get_encoder_enum(dev, |
1790 | ATOM_DEVICE_DFP2_SUPPORT, | 1790 | ATOM_DEVICE_DFP2_SUPPORT, |
1791 | 0), | 1791 | 0), |
1792 | ATOM_DEVICE_DFP2_SUPPORT); | 1792 | ATOM_DEVICE_DFP2_SUPPORT); |
1793 | radeon_add_legacy_encoder(dev, | 1793 | radeon_add_legacy_encoder(dev, |
1794 | radeon_get_encoder_id(dev, | 1794 | radeon_get_encoder_enum(dev, |
1795 | ATOM_DEVICE_CRT2_SUPPORT, | 1795 | ATOM_DEVICE_CRT2_SUPPORT, |
1796 | 2), | 1796 | 2), |
1797 | ATOM_DEVICE_CRT2_SUPPORT); | 1797 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1806 | ddc_i2c.valid = false; | 1806 | ddc_i2c.valid = false; |
1807 | hpd.hpd = RADEON_HPD_NONE; | 1807 | hpd.hpd = RADEON_HPD_NONE; |
1808 | radeon_add_legacy_encoder(dev, | 1808 | radeon_add_legacy_encoder(dev, |
1809 | radeon_get_encoder_id(dev, | 1809 | radeon_get_encoder_enum(dev, |
1810 | ATOM_DEVICE_TV1_SUPPORT, | 1810 | ATOM_DEVICE_TV1_SUPPORT, |
1811 | 2), | 1811 | 2), |
1812 | ATOM_DEVICE_TV1_SUPPORT); | 1812 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1823 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1823 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
1824 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1824 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
1825 | radeon_add_legacy_encoder(dev, | 1825 | radeon_add_legacy_encoder(dev, |
1826 | radeon_get_encoder_id(dev, | 1826 | radeon_get_encoder_enum(dev, |
1827 | ATOM_DEVICE_DFP1_SUPPORT, | 1827 | ATOM_DEVICE_DFP1_SUPPORT, |
1828 | 0), | 1828 | 0), |
1829 | ATOM_DEVICE_DFP1_SUPPORT); | 1829 | ATOM_DEVICE_DFP1_SUPPORT); |
1830 | radeon_add_legacy_encoder(dev, | 1830 | radeon_add_legacy_encoder(dev, |
1831 | radeon_get_encoder_id(dev, | 1831 | radeon_get_encoder_enum(dev, |
1832 | ATOM_DEVICE_CRT2_SUPPORT, | 1832 | ATOM_DEVICE_CRT2_SUPPORT, |
1833 | 2), | 1833 | 2), |
1834 | ATOM_DEVICE_CRT2_SUPPORT); | 1834 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1842 | ddc_i2c.valid = false; | 1842 | ddc_i2c.valid = false; |
1843 | hpd.hpd = RADEON_HPD_NONE; | 1843 | hpd.hpd = RADEON_HPD_NONE; |
1844 | radeon_add_legacy_encoder(dev, | 1844 | radeon_add_legacy_encoder(dev, |
1845 | radeon_get_encoder_id(dev, | 1845 | radeon_get_encoder_enum(dev, |
1846 | ATOM_DEVICE_TV1_SUPPORT, | 1846 | ATOM_DEVICE_TV1_SUPPORT, |
1847 | 2), | 1847 | 2), |
1848 | ATOM_DEVICE_TV1_SUPPORT); | 1848 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1859 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | 1859 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); |
1860 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1860 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
1861 | radeon_add_legacy_encoder(dev, | 1861 | radeon_add_legacy_encoder(dev, |
1862 | radeon_get_encoder_id(dev, | 1862 | radeon_get_encoder_enum(dev, |
1863 | ATOM_DEVICE_DFP1_SUPPORT, | 1863 | ATOM_DEVICE_DFP1_SUPPORT, |
1864 | 0), | 1864 | 0), |
1865 | ATOM_DEVICE_DFP1_SUPPORT); | 1865 | ATOM_DEVICE_DFP1_SUPPORT); |
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1871 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1871 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
1872 | hpd.hpd = RADEON_HPD_NONE; | 1872 | hpd.hpd = RADEON_HPD_NONE; |
1873 | radeon_add_legacy_encoder(dev, | 1873 | radeon_add_legacy_encoder(dev, |
1874 | radeon_get_encoder_id(dev, | 1874 | radeon_get_encoder_enum(dev, |
1875 | ATOM_DEVICE_CRT2_SUPPORT, | 1875 | ATOM_DEVICE_CRT2_SUPPORT, |
1876 | 2), | 1876 | 2), |
1877 | ATOM_DEVICE_CRT2_SUPPORT); | 1877 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1883 | ddc_i2c.valid = false; | 1883 | ddc_i2c.valid = false; |
1884 | hpd.hpd = RADEON_HPD_NONE; | 1884 | hpd.hpd = RADEON_HPD_NONE; |
1885 | radeon_add_legacy_encoder(dev, | 1885 | radeon_add_legacy_encoder(dev, |
1886 | radeon_get_encoder_id(dev, | 1886 | radeon_get_encoder_enum(dev, |
1887 | ATOM_DEVICE_TV1_SUPPORT, | 1887 | ATOM_DEVICE_TV1_SUPPORT, |
1888 | 2), | 1888 | 2), |
1889 | ATOM_DEVICE_TV1_SUPPORT); | 1889 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1900 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1900 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1901 | hpd.hpd = RADEON_HPD_NONE; | 1901 | hpd.hpd = RADEON_HPD_NONE; |
1902 | radeon_add_legacy_encoder(dev, | 1902 | radeon_add_legacy_encoder(dev, |
1903 | radeon_get_encoder_id(dev, | 1903 | radeon_get_encoder_enum(dev, |
1904 | ATOM_DEVICE_CRT1_SUPPORT, | 1904 | ATOM_DEVICE_CRT1_SUPPORT, |
1905 | 1), | 1905 | 1), |
1906 | ATOM_DEVICE_CRT1_SUPPORT); | 1906 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1912 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1912 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
1913 | hpd.hpd = RADEON_HPD_NONE; | 1913 | hpd.hpd = RADEON_HPD_NONE; |
1914 | radeon_add_legacy_encoder(dev, | 1914 | radeon_add_legacy_encoder(dev, |
1915 | radeon_get_encoder_id(dev, | 1915 | radeon_get_encoder_enum(dev, |
1916 | ATOM_DEVICE_CRT2_SUPPORT, | 1916 | ATOM_DEVICE_CRT2_SUPPORT, |
1917 | 2), | 1917 | 2), |
1918 | ATOM_DEVICE_CRT2_SUPPORT); | 1918 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1924 | ddc_i2c.valid = false; | 1924 | ddc_i2c.valid = false; |
1925 | hpd.hpd = RADEON_HPD_NONE; | 1925 | hpd.hpd = RADEON_HPD_NONE; |
1926 | radeon_add_legacy_encoder(dev, | 1926 | radeon_add_legacy_encoder(dev, |
1927 | radeon_get_encoder_id(dev, | 1927 | radeon_get_encoder_enum(dev, |
1928 | ATOM_DEVICE_TV1_SUPPORT, | 1928 | ATOM_DEVICE_TV1_SUPPORT, |
1929 | 2), | 1929 | 2), |
1930 | ATOM_DEVICE_TV1_SUPPORT); | 1930 | ATOM_DEVICE_TV1_SUPPORT); |
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1941 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1941 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
1942 | hpd.hpd = RADEON_HPD_NONE; | 1942 | hpd.hpd = RADEON_HPD_NONE; |
1943 | radeon_add_legacy_encoder(dev, | 1943 | radeon_add_legacy_encoder(dev, |
1944 | radeon_get_encoder_id(dev, | 1944 | radeon_get_encoder_enum(dev, |
1945 | ATOM_DEVICE_CRT1_SUPPORT, | 1945 | ATOM_DEVICE_CRT1_SUPPORT, |
1946 | 1), | 1946 | 1), |
1947 | ATOM_DEVICE_CRT1_SUPPORT); | 1947 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1952 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1952 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
1953 | hpd.hpd = RADEON_HPD_NONE; | 1953 | hpd.hpd = RADEON_HPD_NONE; |
1954 | radeon_add_legacy_encoder(dev, | 1954 | radeon_add_legacy_encoder(dev, |
1955 | radeon_get_encoder_id(dev, | 1955 | radeon_get_encoder_enum(dev, |
1956 | ATOM_DEVICE_CRT2_SUPPORT, | 1956 | ATOM_DEVICE_CRT2_SUPPORT, |
1957 | 2), | 1957 | 2), |
1958 | ATOM_DEVICE_CRT2_SUPPORT); | 1958 | ATOM_DEVICE_CRT2_SUPPORT); |
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2109 | else | 2109 | else |
2110 | devices = ATOM_DEVICE_DFP1_SUPPORT; | 2110 | devices = ATOM_DEVICE_DFP1_SUPPORT; |
2111 | radeon_add_legacy_encoder(dev, | 2111 | radeon_add_legacy_encoder(dev, |
2112 | radeon_get_encoder_id | 2112 | radeon_get_encoder_enum |
2113 | (dev, devices, 0), | 2113 | (dev, devices, 0), |
2114 | devices); | 2114 | devices); |
2115 | radeon_add_legacy_connector(dev, i, devices, | 2115 | radeon_add_legacy_connector(dev, i, devices, |
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2123 | if (tmp & 0x1) { | 2123 | if (tmp & 0x1) { |
2124 | devices = ATOM_DEVICE_CRT2_SUPPORT; | 2124 | devices = ATOM_DEVICE_CRT2_SUPPORT; |
2125 | radeon_add_legacy_encoder(dev, | 2125 | radeon_add_legacy_encoder(dev, |
2126 | radeon_get_encoder_id | 2126 | radeon_get_encoder_enum |
2127 | (dev, | 2127 | (dev, |
2128 | ATOM_DEVICE_CRT2_SUPPORT, | 2128 | ATOM_DEVICE_CRT2_SUPPORT, |
2129 | 2), | 2129 | 2), |
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2131 | } else { | 2131 | } else { |
2132 | devices = ATOM_DEVICE_CRT1_SUPPORT; | 2132 | devices = ATOM_DEVICE_CRT1_SUPPORT; |
2133 | radeon_add_legacy_encoder(dev, | 2133 | radeon_add_legacy_encoder(dev, |
2134 | radeon_get_encoder_id | 2134 | radeon_get_encoder_enum |
2135 | (dev, | 2135 | (dev, |
2136 | ATOM_DEVICE_CRT1_SUPPORT, | 2136 | ATOM_DEVICE_CRT1_SUPPORT, |
2137 | 1), | 2137 | 1), |
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2151 | if (tmp & 0x1) { | 2151 | if (tmp & 0x1) { |
2152 | devices |= ATOM_DEVICE_CRT2_SUPPORT; | 2152 | devices |= ATOM_DEVICE_CRT2_SUPPORT; |
2153 | radeon_add_legacy_encoder(dev, | 2153 | radeon_add_legacy_encoder(dev, |
2154 | radeon_get_encoder_id | 2154 | radeon_get_encoder_enum |
2155 | (dev, | 2155 | (dev, |
2156 | ATOM_DEVICE_CRT2_SUPPORT, | 2156 | ATOM_DEVICE_CRT2_SUPPORT, |
2157 | 2), | 2157 | 2), |
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2159 | } else { | 2159 | } else { |
2160 | devices |= ATOM_DEVICE_CRT1_SUPPORT; | 2160 | devices |= ATOM_DEVICE_CRT1_SUPPORT; |
2161 | radeon_add_legacy_encoder(dev, | 2161 | radeon_add_legacy_encoder(dev, |
2162 | radeon_get_encoder_id | 2162 | radeon_get_encoder_enum |
2163 | (dev, | 2163 | (dev, |
2164 | ATOM_DEVICE_CRT1_SUPPORT, | 2164 | ATOM_DEVICE_CRT1_SUPPORT, |
2165 | 1), | 2165 | 1), |
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2168 | if ((tmp >> 4) & 0x1) { | 2168 | if ((tmp >> 4) & 0x1) { |
2169 | devices |= ATOM_DEVICE_DFP2_SUPPORT; | 2169 | devices |= ATOM_DEVICE_DFP2_SUPPORT; |
2170 | radeon_add_legacy_encoder(dev, | 2170 | radeon_add_legacy_encoder(dev, |
2171 | radeon_get_encoder_id | 2171 | radeon_get_encoder_enum |
2172 | (dev, | 2172 | (dev, |
2173 | ATOM_DEVICE_DFP2_SUPPORT, | 2173 | ATOM_DEVICE_DFP2_SUPPORT, |
2174 | 0), | 2174 | 0), |
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2177 | } else { | 2177 | } else { |
2178 | devices |= ATOM_DEVICE_DFP1_SUPPORT; | 2178 | devices |= ATOM_DEVICE_DFP1_SUPPORT; |
2179 | radeon_add_legacy_encoder(dev, | 2179 | radeon_add_legacy_encoder(dev, |
2180 | radeon_get_encoder_id | 2180 | radeon_get_encoder_enum |
2181 | (dev, | 2181 | (dev, |
2182 | ATOM_DEVICE_DFP1_SUPPORT, | 2182 | ATOM_DEVICE_DFP1_SUPPORT, |
2183 | 0), | 2183 | 0), |
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2202 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | 2202 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; |
2203 | } | 2203 | } |
2204 | radeon_add_legacy_encoder(dev, | 2204 | radeon_add_legacy_encoder(dev, |
2205 | radeon_get_encoder_id | 2205 | radeon_get_encoder_enum |
2206 | (dev, devices, 0), | 2206 | (dev, devices, 0), |
2207 | devices); | 2207 | devices); |
2208 | radeon_add_legacy_connector(dev, i, devices, | 2208 | radeon_add_legacy_connector(dev, i, devices, |
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2215 | case CONNECTOR_CTV_LEGACY: | 2215 | case CONNECTOR_CTV_LEGACY: |
2216 | case CONNECTOR_STV_LEGACY: | 2216 | case CONNECTOR_STV_LEGACY: |
2217 | radeon_add_legacy_encoder(dev, | 2217 | radeon_add_legacy_encoder(dev, |
2218 | radeon_get_encoder_id | 2218 | radeon_get_encoder_enum |
2219 | (dev, | 2219 | (dev, |
2220 | ATOM_DEVICE_TV1_SUPPORT, | 2220 | ATOM_DEVICE_TV1_SUPPORT, |
2221 | 2), | 2221 | 2), |
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2242 | DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); | 2242 | DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); |
2243 | 2243 | ||
2244 | radeon_add_legacy_encoder(dev, | 2244 | radeon_add_legacy_encoder(dev, |
2245 | radeon_get_encoder_id(dev, | 2245 | radeon_get_encoder_enum(dev, |
2246 | ATOM_DEVICE_CRT1_SUPPORT, | 2246 | ATOM_DEVICE_CRT1_SUPPORT, |
2247 | 1), | 2247 | 1), |
2248 | ATOM_DEVICE_CRT1_SUPPORT); | 2248 | ATOM_DEVICE_CRT1_SUPPORT); |
2249 | radeon_add_legacy_encoder(dev, | 2249 | radeon_add_legacy_encoder(dev, |
2250 | radeon_get_encoder_id(dev, | 2250 | radeon_get_encoder_enum(dev, |
2251 | ATOM_DEVICE_DFP1_SUPPORT, | 2251 | ATOM_DEVICE_DFP1_SUPPORT, |
2252 | 0), | 2252 | 0), |
2253 | ATOM_DEVICE_DFP1_SUPPORT); | 2253 | ATOM_DEVICE_DFP1_SUPPORT); |
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2268 | DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); | 2268 | DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); |
2269 | if (crt_info) { | 2269 | if (crt_info) { |
2270 | radeon_add_legacy_encoder(dev, | 2270 | radeon_add_legacy_encoder(dev, |
2271 | radeon_get_encoder_id(dev, | 2271 | radeon_get_encoder_enum(dev, |
2272 | ATOM_DEVICE_CRT1_SUPPORT, | 2272 | ATOM_DEVICE_CRT1_SUPPORT, |
2273 | 1), | 2273 | 1), |
2274 | ATOM_DEVICE_CRT1_SUPPORT); | 2274 | ATOM_DEVICE_CRT1_SUPPORT); |
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2297 | COMBIOS_LCD_DDC_INFO_TABLE); | 2297 | COMBIOS_LCD_DDC_INFO_TABLE); |
2298 | 2298 | ||
2299 | radeon_add_legacy_encoder(dev, | 2299 | radeon_add_legacy_encoder(dev, |
2300 | radeon_get_encoder_id(dev, | 2300 | radeon_get_encoder_enum(dev, |
2301 | ATOM_DEVICE_LCD1_SUPPORT, | 2301 | ATOM_DEVICE_LCD1_SUPPORT, |
2302 | 0), | 2302 | 0), |
2303 | ATOM_DEVICE_LCD1_SUPPORT); | 2303 | ATOM_DEVICE_LCD1_SUPPORT); |
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2351 | hpd.hpd = RADEON_HPD_NONE; | 2351 | hpd.hpd = RADEON_HPD_NONE; |
2352 | ddc_i2c.valid = false; | 2352 | ddc_i2c.valid = false; |
2353 | radeon_add_legacy_encoder(dev, | 2353 | radeon_add_legacy_encoder(dev, |
2354 | radeon_get_encoder_id | 2354 | radeon_get_encoder_enum |
2355 | (dev, | 2355 | (dev, |
2356 | ATOM_DEVICE_TV1_SUPPORT, | 2356 | ATOM_DEVICE_TV1_SUPPORT, |
2357 | 2), | 2357 | 2), |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 47c4b276d30c..31a09cd279ab 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -977,24 +977,25 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto | |||
977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
978 | enum drm_connector_status ret = connector_status_disconnected; | 978 | enum drm_connector_status ret = connector_status_disconnected; |
979 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | 979 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; |
980 | u8 sink_type; | ||
981 | 980 | ||
982 | if (radeon_connector->edid) { | 981 | if (radeon_connector->edid) { |
983 | kfree(radeon_connector->edid); | 982 | kfree(radeon_connector->edid); |
984 | radeon_connector->edid = NULL; | 983 | radeon_connector->edid = NULL; |
985 | } | 984 | } |
986 | 985 | ||
987 | sink_type = radeon_dp_getsinktype(radeon_connector); | 986 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
988 | if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 987 | /* eDP is always DP */ |
989 | (sink_type == CONNECTOR_OBJECT_ID_eDP)) { | 988 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
990 | if (radeon_dp_getdpcd(radeon_connector)) { | 989 | if (radeon_dp_getdpcd(radeon_connector)) |
991 | radeon_dig_connector->dp_sink_type = sink_type; | ||
992 | ret = connector_status_connected; | 990 | ret = connector_status_connected; |
993 | } | ||
994 | } else { | 991 | } else { |
995 | if (radeon_ddc_probe(radeon_connector)) { | 992 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
996 | radeon_dig_connector->dp_sink_type = sink_type; | 993 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
997 | ret = connector_status_connected; | 994 | if (radeon_dp_getdpcd(radeon_connector)) |
995 | ret = connector_status_connected; | ||
996 | } else { | ||
997 | if (radeon_ddc_probe(radeon_connector)) | ||
998 | ret = connector_status_connected; | ||
998 | } | 999 | } |
999 | } | 1000 | } |
1000 | 1001 | ||
@@ -1037,7 +1038,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1037 | uint32_t supported_device, | 1038 | uint32_t supported_device, |
1038 | int connector_type, | 1039 | int connector_type, |
1039 | struct radeon_i2c_bus_rec *i2c_bus, | 1040 | struct radeon_i2c_bus_rec *i2c_bus, |
1040 | bool linkb, | ||
1041 | uint32_t igp_lane_info, | 1041 | uint32_t igp_lane_info, |
1042 | uint16_t connector_object_id, | 1042 | uint16_t connector_object_id, |
1043 | struct radeon_hpd *hpd, | 1043 | struct radeon_hpd *hpd, |
@@ -1128,7 +1128,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1128 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1128 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1129 | if (!radeon_dig_connector) | 1129 | if (!radeon_dig_connector) |
1130 | goto failed; | 1130 | goto failed; |
1131 | radeon_dig_connector->linkb = linkb; | ||
1132 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1131 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1133 | radeon_connector->con_priv = radeon_dig_connector; | 1132 | radeon_connector->con_priv = radeon_dig_connector; |
1134 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1133 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
@@ -1158,7 +1157,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1158 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1157 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1159 | if (!radeon_dig_connector) | 1158 | if (!radeon_dig_connector) |
1160 | goto failed; | 1159 | goto failed; |
1161 | radeon_dig_connector->linkb = linkb; | ||
1162 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1160 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1163 | radeon_connector->con_priv = radeon_dig_connector; | 1161 | radeon_connector->con_priv = radeon_dig_connector; |
1164 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1162 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
@@ -1182,7 +1180,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1182 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1180 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1183 | if (!radeon_dig_connector) | 1181 | if (!radeon_dig_connector) |
1184 | goto failed; | 1182 | goto failed; |
1185 | radeon_dig_connector->linkb = linkb; | ||
1186 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1183 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1187 | radeon_connector->con_priv = radeon_dig_connector; | 1184 | radeon_connector->con_priv = radeon_dig_connector; |
1188 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); | 1185 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); |
@@ -1229,7 +1226,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1229 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1226 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
1230 | if (!radeon_dig_connector) | 1227 | if (!radeon_dig_connector) |
1231 | goto failed; | 1228 | goto failed; |
1232 | radeon_dig_connector->linkb = linkb; | ||
1233 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1229 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
1234 | radeon_connector->con_priv = radeon_dig_connector; | 1230 | radeon_connector->con_priv = radeon_dig_connector; |
1235 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1231 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4f7a170d1566..69b3c2291e92 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
199 | mc->mc_vram_size = mc->aper_size; | 199 | mc->mc_vram_size = mc->aper_size; |
200 | } | 200 | } |
201 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 201 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
202 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { | 202 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { |
203 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | 203 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); |
204 | mc->real_vram_size = mc->aper_size; | 204 | mc->real_vram_size = mc->aper_size; |
205 | mc->mc_vram_size = mc->aper_size; | 205 | mc->mc_vram_size = mc->aper_size; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5764f4d3b4f1..6dd434ad2429 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
1094 | radeon_i2c_fini(rdev); | 1094 | radeon_i2c_fini(rdev); |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | static bool is_hdtv_mode(struct drm_display_mode *mode) | ||
1098 | { | ||
1099 | /* try and guess if this is a tv or a monitor */ | ||
1100 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ | ||
1101 | (mode->vdisplay == 576) || /* 576p */ | ||
1102 | (mode->vdisplay == 720) || /* 720p */ | ||
1103 | (mode->vdisplay == 1080)) /* 1080p */ | ||
1104 | return true; | ||
1105 | else | ||
1106 | return false; | ||
1107 | } | ||
1108 | |||
1097 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | 1109 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
1098 | struct drm_display_mode *mode, | 1110 | struct drm_display_mode *mode, |
1099 | struct drm_display_mode *adjusted_mode) | 1111 | struct drm_display_mode *adjusted_mode) |
@@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1141 | if (ASIC_IS_AVIVO(rdev) && | 1153 | if (ASIC_IS_AVIVO(rdev) && |
1142 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || | 1154 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
1143 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 1155 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
1144 | drm_detect_hdmi_monitor(radeon_connector->edid)))) { | 1156 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
1157 | is_hdtv_mode(mode)))) { | ||
1145 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | 1158 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; |
1146 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | 1159 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; |
1147 | radeon_crtc->rmx_type = RMX_FULL; | 1160 | radeon_crtc->rmx_type = RMX_FULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 263c8098d7dd..2c293e8304d6 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | uint32_t | 83 | uint32_t |
84 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 84 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
85 | { | 85 | { |
86 | struct radeon_device *rdev = dev->dev_private; | 86 | struct radeon_device *rdev = dev->dev_private; |
87 | uint32_t ret = 0; | 87 | uint32_t ret = 0; |
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t | |||
97 | if ((rdev->family == CHIP_RS300) || | 97 | if ((rdev->family == CHIP_RS300) || |
98 | (rdev->family == CHIP_RS400) || | 98 | (rdev->family == CHIP_RS400) || |
99 | (rdev->family == CHIP_RS480)) | 99 | (rdev->family == CHIP_RS480)) |
100 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; | 100 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
101 | else if (ASIC_IS_AVIVO(rdev)) | 101 | else if (ASIC_IS_AVIVO(rdev)) |
102 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; | 102 | ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; |
103 | else | 103 | else |
104 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; | 104 | ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; |
105 | break; | 105 | break; |
106 | case 2: /* dac b */ | 106 | case 2: /* dac b */ |
107 | if (ASIC_IS_AVIVO(rdev)) | 107 | if (ASIC_IS_AVIVO(rdev)) |
108 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; | 108 | ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; |
109 | else { | 109 | else { |
110 | /*if (rdev->family == CHIP_R200) | 110 | /*if (rdev->family == CHIP_R200) |
111 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 111 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
112 | else*/ | 112 | else*/ |
113 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; | 113 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
114 | } | 114 | } |
115 | break; | 115 | break; |
116 | case 3: /* external dac */ | 116 | case 3: /* external dac */ |
117 | if (ASIC_IS_AVIVO(rdev)) | 117 | if (ASIC_IS_AVIVO(rdev)) |
118 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; | 118 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
119 | else | 119 | else |
120 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 120 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
121 | break; | 121 | break; |
122 | } | 122 | } |
123 | break; | 123 | break; |
124 | case ATOM_DEVICE_LCD1_SUPPORT: | 124 | case ATOM_DEVICE_LCD1_SUPPORT: |
125 | if (ASIC_IS_AVIVO(rdev)) | 125 | if (ASIC_IS_AVIVO(rdev)) |
126 | ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; | 126 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
127 | else | 127 | else |
128 | ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; | 128 | ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; |
129 | break; | 129 | break; |
130 | case ATOM_DEVICE_DFP1_SUPPORT: | 130 | case ATOM_DEVICE_DFP1_SUPPORT: |
131 | if ((rdev->family == CHIP_RS300) || | 131 | if ((rdev->family == CHIP_RS300) || |
132 | (rdev->family == CHIP_RS400) || | 132 | (rdev->family == CHIP_RS400) || |
133 | (rdev->family == CHIP_RS480)) | 133 | (rdev->family == CHIP_RS480)) |
134 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 134 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
135 | else if (ASIC_IS_AVIVO(rdev)) | 135 | else if (ASIC_IS_AVIVO(rdev)) |
136 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; | 136 | ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; |
137 | else | 137 | else |
138 | ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; | 138 | ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; |
139 | break; | 139 | break; |
140 | case ATOM_DEVICE_LCD2_SUPPORT: | 140 | case ATOM_DEVICE_LCD2_SUPPORT: |
141 | case ATOM_DEVICE_DFP2_SUPPORT: | 141 | case ATOM_DEVICE_DFP2_SUPPORT: |
142 | if ((rdev->family == CHIP_RS600) || | 142 | if ((rdev->family == CHIP_RS600) || |
143 | (rdev->family == CHIP_RS690) || | 143 | (rdev->family == CHIP_RS690) || |
144 | (rdev->family == CHIP_RS740)) | 144 | (rdev->family == CHIP_RS740)) |
145 | ret = ENCODER_OBJECT_ID_INTERNAL_DDI; | 145 | ret = ENCODER_INTERNAL_DDI_ENUM_ID1; |
146 | else if (ASIC_IS_AVIVO(rdev)) | 146 | else if (ASIC_IS_AVIVO(rdev)) |
147 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; | 147 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
148 | else | 148 | else |
149 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 149 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
150 | break; | 150 | break; |
151 | case ATOM_DEVICE_DFP3_SUPPORT: | 151 | case ATOM_DEVICE_DFP3_SUPPORT: |
152 | ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; | 152 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
153 | break; | 153 | break; |
154 | } | 154 | } |
155 | 155 | ||
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
228 | return NULL; | 228 | return NULL; |
229 | } | 229 | } |
230 | 230 | ||
231 | static struct radeon_connector_atom_dig * | ||
232 | radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) | ||
233 | { | ||
234 | struct drm_device *dev = encoder->dev; | ||
235 | struct radeon_device *rdev = dev->dev_private; | ||
236 | struct drm_connector *connector; | ||
237 | struct radeon_connector *radeon_connector; | ||
238 | struct radeon_connector_atom_dig *dig_connector; | ||
239 | |||
240 | if (!rdev->is_atom_bios) | ||
241 | return NULL; | ||
242 | |||
243 | connector = radeon_get_connector_for_encoder(encoder); | ||
244 | if (!connector) | ||
245 | return NULL; | ||
246 | |||
247 | radeon_connector = to_radeon_connector(connector); | ||
248 | |||
249 | if (!radeon_connector->con_priv) | ||
250 | return NULL; | ||
251 | |||
252 | dig_connector = radeon_connector->con_priv; | ||
253 | |||
254 | return dig_connector; | ||
255 | } | ||
256 | |||
257 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 231 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
258 | struct drm_display_mode *adjusted_mode) | 232 | struct drm_display_mode *adjusted_mode) |
259 | { | 233 | { |
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
512 | struct radeon_device *rdev = dev->dev_private; | 486 | struct radeon_device *rdev = dev->dev_private; |
513 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 487 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
514 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 488 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
515 | struct radeon_connector_atom_dig *dig_connector = | ||
516 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
517 | union lvds_encoder_control args; | 489 | union lvds_encoder_control args; |
518 | int index = 0; | 490 | int index = 0; |
519 | int hdmi_detected = 0; | 491 | int hdmi_detected = 0; |
520 | uint8_t frev, crev; | 492 | uint8_t frev, crev; |
521 | 493 | ||
522 | if (!dig || !dig_connector) | 494 | if (!dig) |
523 | return; | 495 | return; |
524 | 496 | ||
525 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 497 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
562 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 534 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) |
563 | args.v1.ucMisc |= (1 << 1); | 535 | args.v1.ucMisc |= (1 << 1); |
564 | } else { | 536 | } else { |
565 | if (dig_connector->linkb) | 537 | if (dig->linkb) |
566 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 538 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
567 | if (radeon_encoder->pixel_clock > 165000) | 539 | if (radeon_encoder->pixel_clock > 165000) |
568 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 540 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
601 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | 573 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
602 | } | 574 | } |
603 | } else { | 575 | } else { |
604 | if (dig_connector->linkb) | 576 | if (dig->linkb) |
605 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 577 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
606 | if (radeon_encoder->pixel_clock > 165000) | 578 | if (radeon_encoder->pixel_clock > 165000) |
607 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 579 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
623 | int | 595 | int |
624 | atombios_get_encoder_mode(struct drm_encoder *encoder) | 596 | atombios_get_encoder_mode(struct drm_encoder *encoder) |
625 | { | 597 | { |
598 | struct drm_device *dev = encoder->dev; | ||
599 | struct radeon_device *rdev = dev->dev_private; | ||
626 | struct drm_connector *connector; | 600 | struct drm_connector *connector; |
627 | struct radeon_connector *radeon_connector; | 601 | struct radeon_connector *radeon_connector; |
628 | struct radeon_connector_atom_dig *dig_connector; | 602 | struct radeon_connector_atom_dig *dig_connector; |
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
636 | switch (connector->connector_type) { | 610 | switch (connector->connector_type) { |
637 | case DRM_MODE_CONNECTOR_DVII: | 611 | case DRM_MODE_CONNECTOR_DVII: |
638 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 612 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
639 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 613 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
640 | return ATOM_ENCODER_MODE_HDMI; | 614 | /* fix me */ |
641 | else if (radeon_connector->use_digital) | 615 | if (ASIC_IS_DCE4(rdev)) |
616 | return ATOM_ENCODER_MODE_DVI; | ||
617 | else | ||
618 | return ATOM_ENCODER_MODE_HDMI; | ||
619 | } else if (radeon_connector->use_digital) | ||
642 | return ATOM_ENCODER_MODE_DVI; | 620 | return ATOM_ENCODER_MODE_DVI; |
643 | else | 621 | else |
644 | return ATOM_ENCODER_MODE_CRT; | 622 | return ATOM_ENCODER_MODE_CRT; |
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
646 | case DRM_MODE_CONNECTOR_DVID: | 624 | case DRM_MODE_CONNECTOR_DVID: |
647 | case DRM_MODE_CONNECTOR_HDMIA: | 625 | case DRM_MODE_CONNECTOR_HDMIA: |
648 | default: | 626 | default: |
649 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 627 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
650 | return ATOM_ENCODER_MODE_HDMI; | 628 | /* fix me */ |
651 | else | 629 | if (ASIC_IS_DCE4(rdev)) |
630 | return ATOM_ENCODER_MODE_DVI; | ||
631 | else | ||
632 | return ATOM_ENCODER_MODE_HDMI; | ||
633 | } else | ||
652 | return ATOM_ENCODER_MODE_DVI; | 634 | return ATOM_ENCODER_MODE_DVI; |
653 | break; | 635 | break; |
654 | case DRM_MODE_CONNECTOR_LVDS: | 636 | case DRM_MODE_CONNECTOR_LVDS: |
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
660 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 642 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
661 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 643 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
662 | return ATOM_ENCODER_MODE_DP; | 644 | return ATOM_ENCODER_MODE_DP; |
663 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 645 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
664 | return ATOM_ENCODER_MODE_HDMI; | 646 | /* fix me */ |
665 | else | 647 | if (ASIC_IS_DCE4(rdev)) |
648 | return ATOM_ENCODER_MODE_DVI; | ||
649 | else | ||
650 | return ATOM_ENCODER_MODE_HDMI; | ||
651 | } else | ||
666 | return ATOM_ENCODER_MODE_DVI; | 652 | return ATOM_ENCODER_MODE_DVI; |
667 | break; | 653 | break; |
668 | case DRM_MODE_CONNECTOR_DVIA: | 654 | case DRM_MODE_CONNECTOR_DVIA: |
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
729 | struct radeon_device *rdev = dev->dev_private; | 715 | struct radeon_device *rdev = dev->dev_private; |
730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 716 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 717 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
732 | struct radeon_connector_atom_dig *dig_connector = | 718 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
733 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
734 | union dig_encoder_control args; | 719 | union dig_encoder_control args; |
735 | int index = 0; | 720 | int index = 0; |
736 | uint8_t frev, crev; | 721 | uint8_t frev, crev; |
722 | int dp_clock = 0; | ||
723 | int dp_lane_count = 0; | ||
724 | |||
725 | if (connector) { | ||
726 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
727 | struct radeon_connector_atom_dig *dig_connector = | ||
728 | radeon_connector->con_priv; | ||
737 | 729 | ||
738 | if (!dig || !dig_connector) | 730 | dp_clock = dig_connector->dp_clock; |
731 | dp_lane_count = dig_connector->dp_lane_count; | ||
732 | } | ||
733 | |||
734 | /* no dig encoder assigned */ | ||
735 | if (dig->dig_encoder == -1) | ||
739 | return; | 736 | return; |
740 | 737 | ||
741 | memset(&args, 0, sizeof(args)); | 738 | memset(&args, 0, sizeof(args)); |
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
757 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | 754 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); |
758 | 755 | ||
759 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | 756 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { |
760 | if (dig_connector->dp_clock == 270000) | 757 | if (dp_clock == 270000) |
761 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | 758 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; |
762 | args.v1.ucLaneNum = dig_connector->dp_lane_count; | 759 | args.v1.ucLaneNum = dp_lane_count; |
763 | } else if (radeon_encoder->pixel_clock > 165000) | 760 | } else if (radeon_encoder->pixel_clock > 165000) |
764 | args.v1.ucLaneNum = 8; | 761 | args.v1.ucLaneNum = 8; |
765 | else | 762 | else |
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
781 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; | 778 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; |
782 | break; | 779 | break; |
783 | } | 780 | } |
784 | if (dig_connector->linkb) | 781 | if (dig->linkb) |
785 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | 782 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; |
786 | else | 783 | else |
787 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | 784 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; |
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
804 | struct radeon_device *rdev = dev->dev_private; | 801 | struct radeon_device *rdev = dev->dev_private; |
805 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 802 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
806 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 803 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
807 | struct radeon_connector_atom_dig *dig_connector = | 804 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
808 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
809 | struct drm_connector *connector; | ||
810 | struct radeon_connector *radeon_connector; | ||
811 | union dig_transmitter_control args; | 805 | union dig_transmitter_control args; |
812 | int index = 0; | 806 | int index = 0; |
813 | uint8_t frev, crev; | 807 | uint8_t frev, crev; |
814 | bool is_dp = false; | 808 | bool is_dp = false; |
815 | int pll_id = 0; | 809 | int pll_id = 0; |
810 | int dp_clock = 0; | ||
811 | int dp_lane_count = 0; | ||
812 | int connector_object_id = 0; | ||
813 | int igp_lane_info = 0; | ||
816 | 814 | ||
817 | if (!dig || !dig_connector) | 815 | if (connector) { |
818 | return; | 816 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
817 | struct radeon_connector_atom_dig *dig_connector = | ||
818 | radeon_connector->con_priv; | ||
819 | 819 | ||
820 | connector = radeon_get_connector_for_encoder(encoder); | 820 | dp_clock = dig_connector->dp_clock; |
821 | radeon_connector = to_radeon_connector(connector); | 821 | dp_lane_count = dig_connector->dp_lane_count; |
822 | connector_object_id = | ||
823 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
824 | igp_lane_info = dig_connector->igp_lane_info; | ||
825 | } | ||
826 | |||
827 | /* no dig encoder assigned */ | ||
828 | if (dig->dig_encoder == -1) | ||
829 | return; | ||
822 | 830 | ||
823 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) | 831 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) |
824 | is_dp = true; | 832 | is_dp = true; |
825 | 833 | ||
826 | memset(&args, 0, sizeof(args)); | 834 | memset(&args, 0, sizeof(args)); |
827 | 835 | ||
828 | if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) | 836 | switch (radeon_encoder->encoder_id) { |
837 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
838 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
839 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
829 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | 840 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); |
830 | else { | 841 | break; |
831 | switch (radeon_encoder->encoder_id) { | 842 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
832 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 843 | index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); |
833 | index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); | 844 | break; |
834 | break; | ||
835 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
836 | index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); | ||
837 | break; | ||
838 | } | ||
839 | } | 845 | } |
840 | 846 | ||
841 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | 847 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
843 | 849 | ||
844 | args.v1.ucAction = action; | 850 | args.v1.ucAction = action; |
845 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 851 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
846 | args.v1.usInitInfo = radeon_connector->connector_object_id; | 852 | args.v1.usInitInfo = connector_object_id; |
847 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | 853 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { |
848 | args.v1.asMode.ucLaneSel = lane_num; | 854 | args.v1.asMode.ucLaneSel = lane_num; |
849 | args.v1.asMode.ucLaneSet = lane_set; | 855 | args.v1.asMode.ucLaneSet = lane_set; |
850 | } else { | 856 | } else { |
851 | if (is_dp) | 857 | if (is_dp) |
852 | args.v1.usPixelClock = | 858 | args.v1.usPixelClock = |
853 | cpu_to_le16(dig_connector->dp_clock / 10); | 859 | cpu_to_le16(dp_clock / 10); |
854 | else if (radeon_encoder->pixel_clock > 165000) | 860 | else if (radeon_encoder->pixel_clock > 165000) |
855 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | 861 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); |
856 | else | 862 | else |
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
858 | } | 864 | } |
859 | if (ASIC_IS_DCE4(rdev)) { | 865 | if (ASIC_IS_DCE4(rdev)) { |
860 | if (is_dp) | 866 | if (is_dp) |
861 | args.v3.ucLaneNum = dig_connector->dp_lane_count; | 867 | args.v3.ucLaneNum = dp_lane_count; |
862 | else if (radeon_encoder->pixel_clock > 165000) | 868 | else if (radeon_encoder->pixel_clock > 165000) |
863 | args.v3.ucLaneNum = 8; | 869 | args.v3.ucLaneNum = 8; |
864 | else | 870 | else |
865 | args.v3.ucLaneNum = 4; | 871 | args.v3.ucLaneNum = 4; |
866 | 872 | ||
867 | if (dig_connector->linkb) { | 873 | if (dig->linkb) { |
868 | args.v3.acConfig.ucLinkSel = 1; | 874 | args.v3.acConfig.ucLinkSel = 1; |
869 | args.v3.acConfig.ucEncoderSel = 1; | 875 | args.v3.acConfig.ucEncoderSel = 1; |
870 | } | 876 | } |
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
904 | } | 910 | } |
905 | } else if (ASIC_IS_DCE32(rdev)) { | 911 | } else if (ASIC_IS_DCE32(rdev)) { |
906 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 912 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
907 | if (dig_connector->linkb) | 913 | if (dig->linkb) |
908 | args.v2.acConfig.ucLinkSel = 1; | 914 | args.v2.acConfig.ucLinkSel = 1; |
909 | 915 | ||
910 | switch (radeon_encoder->encoder_id) { | 916 | switch (radeon_encoder->encoder_id) { |
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
938 | if ((rdev->flags & RADEON_IS_IGP) && | 944 | if ((rdev->flags & RADEON_IS_IGP) && |
939 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { | 945 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { |
940 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { | 946 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { |
941 | if (dig_connector->igp_lane_info & 0x1) | 947 | if (igp_lane_info & 0x1) |
942 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | 948 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
943 | else if (dig_connector->igp_lane_info & 0x2) | 949 | else if (igp_lane_info & 0x2) |
944 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | 950 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; |
945 | else if (dig_connector->igp_lane_info & 0x4) | 951 | else if (igp_lane_info & 0x4) |
946 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | 952 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; |
947 | else if (dig_connector->igp_lane_info & 0x8) | 953 | else if (igp_lane_info & 0x8) |
948 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | 954 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
949 | } else { | 955 | } else { |
950 | if (dig_connector->igp_lane_info & 0x3) | 956 | if (igp_lane_info & 0x3) |
951 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | 957 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
952 | else if (dig_connector->igp_lane_info & 0xc) | 958 | else if (igp_lane_info & 0xc) |
953 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | 959 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
954 | } | 960 | } |
955 | } | 961 | } |
956 | 962 | ||
957 | if (dig_connector->linkb) | 963 | if (dig->linkb) |
958 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | 964 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; |
959 | else | 965 | else |
960 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | 966 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; |
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1072 | if (is_dig) { | 1078 | if (is_dig) { |
1073 | switch (mode) { | 1079 | switch (mode) { |
1074 | case DRM_MODE_DPMS_ON: | 1080 | case DRM_MODE_DPMS_ON: |
1075 | if (!ASIC_IS_DCE4(rdev)) | 1081 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
1076 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1077 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1082 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1078 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1083 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1079 | 1084 | ||
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1085 | case DRM_MODE_DPMS_STANDBY: | 1090 | case DRM_MODE_DPMS_STANDBY: |
1086 | case DRM_MODE_DPMS_SUSPEND: | 1091 | case DRM_MODE_DPMS_SUSPEND: |
1087 | case DRM_MODE_DPMS_OFF: | 1092 | case DRM_MODE_DPMS_OFF: |
1088 | if (!ASIC_IS_DCE4(rdev)) | 1093 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
1089 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1090 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1094 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
1091 | if (ASIC_IS_DCE4(rdev)) | 1095 | if (ASIC_IS_DCE4(rdev)) |
1092 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | 1096 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); |
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1290 | uint32_t dig_enc_in_use = 0; | 1294 | uint32_t dig_enc_in_use = 0; |
1291 | 1295 | ||
1292 | if (ASIC_IS_DCE4(rdev)) { | 1296 | if (ASIC_IS_DCE4(rdev)) { |
1293 | struct radeon_connector_atom_dig *dig_connector = | 1297 | dig = radeon_encoder->enc_priv; |
1294 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
1295 | |||
1296 | switch (radeon_encoder->encoder_id) { | 1298 | switch (radeon_encoder->encoder_id) { |
1297 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1299 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1298 | if (dig_connector->linkb) | 1300 | if (dig->linkb) |
1299 | return 1; | 1301 | return 1; |
1300 | else | 1302 | else |
1301 | return 0; | 1303 | return 0; |
1302 | break; | 1304 | break; |
1303 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1305 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1304 | if (dig_connector->linkb) | 1306 | if (dig->linkb) |
1305 | return 3; | 1307 | return 3; |
1306 | else | 1308 | else |
1307 | return 2; | 1309 | return 2; |
1308 | break; | 1310 | break; |
1309 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1311 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1310 | if (dig_connector->linkb) | 1312 | if (dig->linkb) |
1311 | return 5; | 1313 | return 5; |
1312 | else | 1314 | else |
1313 | return 4; | 1315 | return 4; |
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) | |||
1641 | struct radeon_encoder_atom_dig * | 1643 | struct radeon_encoder_atom_dig * |
1642 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | 1644 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) |
1643 | { | 1645 | { |
1646 | int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1644 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | 1647 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); |
1645 | 1648 | ||
1646 | if (!dig) | 1649 | if (!dig) |
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
1650 | dig->coherent_mode = true; | 1653 | dig->coherent_mode = true; |
1651 | dig->dig_encoder = -1; | 1654 | dig->dig_encoder = -1; |
1652 | 1655 | ||
1656 | if (encoder_enum == 2) | ||
1657 | dig->linkb = true; | ||
1658 | else | ||
1659 | dig->linkb = false; | ||
1660 | |||
1653 | return dig; | 1661 | return dig; |
1654 | } | 1662 | } |
1655 | 1663 | ||
1656 | void | 1664 | void |
1657 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1665 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) |
1658 | { | 1666 | { |
1659 | struct radeon_device *rdev = dev->dev_private; | 1667 | struct radeon_device *rdev = dev->dev_private; |
1660 | struct drm_encoder *encoder; | 1668 | struct drm_encoder *encoder; |
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1663 | /* see if we already added it */ | 1671 | /* see if we already added it */ |
1664 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1672 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1665 | radeon_encoder = to_radeon_encoder(encoder); | 1673 | radeon_encoder = to_radeon_encoder(encoder); |
1666 | if (radeon_encoder->encoder_id == encoder_id) { | 1674 | if (radeon_encoder->encoder_enum == encoder_enum) { |
1667 | radeon_encoder->devices |= supported_device; | 1675 | radeon_encoder->devices |= supported_device; |
1668 | return; | 1676 | return; |
1669 | } | 1677 | } |
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1691 | 1699 | ||
1692 | radeon_encoder->enc_priv = NULL; | 1700 | radeon_encoder->enc_priv = NULL; |
1693 | 1701 | ||
1694 | radeon_encoder->encoder_id = encoder_id; | 1702 | radeon_encoder->encoder_enum = encoder_enum; |
1703 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1695 | radeon_encoder->devices = supported_device; | 1704 | radeon_encoder->devices = supported_device; |
1696 | radeon_encoder->rmx_type = RMX_OFF; | 1705 | radeon_encoder->rmx_type = RMX_OFF; |
1697 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 1706 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index dbf86962bdd1..c74a8b20d941 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
118 | aligned_size = ALIGN(size, PAGE_SIZE); | 118 | aligned_size = ALIGN(size, PAGE_SIZE); |
119 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 119 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
120 | RADEON_GEM_DOMAIN_VRAM, | 120 | RADEON_GEM_DOMAIN_VRAM, |
121 | false, ttm_bo_type_kernel, | 121 | false, true, |
122 | &gobj); | 122 | &gobj); |
123 | if (ret) { | 123 | if (ret) { |
124 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | 124 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index bfd2ce5f5372..0416804d8f30 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | |||
99 | } | 99 | } |
100 | } | 100 | } |
101 | 101 | ||
102 | /* switch the pads to ddc mode */ | ||
103 | if (ASIC_IS_DCE3(rdev) && rec->hw_capable) { | ||
104 | temp = RREG32(rec->mask_clk_reg); | ||
105 | temp &= ~(1 << 16); | ||
106 | WREG32(rec->mask_clk_reg, temp); | ||
107 | } | ||
108 | |||
102 | /* clear the output pin values */ | 109 | /* clear the output pin values */ |
103 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; | 110 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; |
104 | WREG32(rec->a_clk_reg, temp); | 111 | WREG32(rec->a_clk_reg, temp); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 059bfa4098d7..a108c7ed14f5 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
121 | * chips. Disable MSI on them for now. | 121 | * chips. Disable MSI on them for now. |
122 | */ | 122 | */ |
123 | if ((rdev->family >= CHIP_RV380) && | 123 | if ((rdev->family >= CHIP_RV380) && |
124 | (!(rdev->flags & RADEON_IS_IGP))) { | 124 | (!(rdev->flags & RADEON_IS_IGP)) && |
125 | (!(rdev->flags & RADEON_IS_AGP))) { | ||
125 | int ret = pci_enable_msi(rdev->pdev); | 126 | int ret = pci_enable_msi(rdev->pdev); |
126 | if (!ret) { | 127 | if (!ret) { |
127 | rdev->msi_enabled = 1; | 128 | rdev->msi_enabled = 1; |
128 | DRM_INFO("radeon: using MSI.\n"); | 129 | dev_info(rdev->dev, "radeon: using MSI.\n"); |
129 | } | 130 | } |
130 | } | 131 | } |
131 | rdev->irq.installed = true; | 132 | rdev->irq.installed = true; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b1c8ace5f080..5eee3c41d124 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
161 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); | 161 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); |
162 | return -EINVAL; | 162 | return -EINVAL; |
163 | } | 163 | } |
164 | break; | ||
164 | case RADEON_INFO_WANT_HYPERZ: | 165 | case RADEON_INFO_WANT_HYPERZ: |
165 | /* The "value" here is both an input and output parameter. | 166 | /* The "value" here is both an input and output parameter. |
166 | * If the input value is 1, filp requests hyper-z access. | 167 | * If the input value is 1, filp requests hyper-z access. |
@@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms) | |||
323 | 324 | ||
324 | 325 | ||
325 | struct drm_ioctl_desc radeon_ioctls_kms[] = { | 326 | struct drm_ioctl_desc radeon_ioctls_kms[] = { |
326 | DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 327 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
327 | DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 328 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
328 | DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 329 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
329 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 330 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
330 | DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), | 331 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), |
331 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), | 332 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), |
332 | DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), | 333 | DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), |
333 | DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), | 334 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), |
334 | DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), | 335 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), |
335 | DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), | 336 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), |
336 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), | 337 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), |
337 | DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), | 338 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), |
338 | DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), | 339 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), |
339 | DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), | 340 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), |
340 | DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 341 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
341 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), | 342 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), |
342 | DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), | 343 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), |
343 | DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), | 344 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), |
344 | DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), | 345 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), |
345 | DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), | 346 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), |
346 | DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), | 347 | DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), |
347 | DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 348 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
348 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), | 349 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), |
349 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), | 350 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), |
350 | DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), | 351 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), |
351 | DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), | 352 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), |
352 | DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), | 353 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), |
353 | /* KMS */ | 354 | /* KMS */ |
354 | DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), | 355 | DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), |
355 | DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), | 356 | DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), |
356 | DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), | 357 | DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), |
357 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), | 358 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), |
358 | DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), | 359 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), |
359 | DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), | 360 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), |
360 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), | 361 | DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
361 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), | 362 | DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), |
362 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), | 363 | DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), |
363 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), | 364 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), |
364 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), | 365 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), |
365 | DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | 366 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
366 | }; | 367 | }; |
367 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 368 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 989df519a1e4..305049afde15 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, | |||
272 | if (!ref_div) | 272 | if (!ref_div) |
273 | return 1; | 273 | return 1; |
274 | 274 | ||
275 | vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; | 275 | vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * This is horribly crude: the VCO frequency range is divided into | 278 | * This is horribly crude: the VCO frequency range is divided into |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b8149cbc0c70..0b8397000f4c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra | |||
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | void | 1347 | void |
1348 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1348 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) |
1349 | { | 1349 | { |
1350 | struct radeon_device *rdev = dev->dev_private; | 1350 | struct radeon_device *rdev = dev->dev_private; |
1351 | struct drm_encoder *encoder; | 1351 | struct drm_encoder *encoder; |
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1354 | /* see if we already added it */ | 1354 | /* see if we already added it */ |
1355 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1355 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
1356 | radeon_encoder = to_radeon_encoder(encoder); | 1356 | radeon_encoder = to_radeon_encoder(encoder); |
1357 | if (radeon_encoder->encoder_id == encoder_id) { | 1357 | if (radeon_encoder->encoder_enum == encoder_enum) { |
1358 | radeon_encoder->devices |= supported_device; | 1358 | radeon_encoder->devices |= supported_device; |
1359 | return; | 1359 | return; |
1360 | } | 1360 | } |
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1374 | 1374 | ||
1375 | radeon_encoder->enc_priv = NULL; | 1375 | radeon_encoder->enc_priv = NULL; |
1376 | 1376 | ||
1377 | radeon_encoder->encoder_id = encoder_id; | 1377 | radeon_encoder->encoder_enum = encoder_enum; |
1378 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1378 | radeon_encoder->devices = supported_device; | 1379 | radeon_encoder->devices = supported_device; |
1379 | radeon_encoder->rmx_type = RMX_OFF; | 1380 | radeon_encoder->rmx_type = RMX_OFF; |
1380 | 1381 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 5bbc086b9267..8f93e2b4b0c8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -342,6 +342,7 @@ struct radeon_atom_ss { | |||
342 | }; | 342 | }; |
343 | 343 | ||
344 | struct radeon_encoder_atom_dig { | 344 | struct radeon_encoder_atom_dig { |
345 | bool linkb; | ||
345 | /* atom dig */ | 346 | /* atom dig */ |
346 | bool coherent_mode; | 347 | bool coherent_mode; |
347 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ | 348 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ |
@@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac { | |||
360 | 361 | ||
361 | struct radeon_encoder { | 362 | struct radeon_encoder { |
362 | struct drm_encoder base; | 363 | struct drm_encoder base; |
364 | uint32_t encoder_enum; | ||
363 | uint32_t encoder_id; | 365 | uint32_t encoder_id; |
364 | uint32_t devices; | 366 | uint32_t devices; |
365 | uint32_t active_device; | 367 | uint32_t active_device; |
@@ -378,7 +380,6 @@ struct radeon_encoder { | |||
378 | 380 | ||
379 | struct radeon_connector_atom_dig { | 381 | struct radeon_connector_atom_dig { |
380 | uint32_t igp_lane_info; | 382 | uint32_t igp_lane_info; |
381 | bool linkb; | ||
382 | /* displayport */ | 383 | /* displayport */ |
383 | struct radeon_i2c_chan *dp_i2c_bus; | 384 | struct radeon_i2c_chan *dp_i2c_bus; |
384 | u8 dpcd[8]; | 385 | u8 dpcd[8]; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 58038f5cab38..477ba673e1b4 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
226 | { | 226 | { |
227 | int i; | 227 | int i; |
228 | 228 | ||
229 | /* no need to take locks, etc. if nothing's going to change */ | ||
230 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | ||
231 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | ||
232 | return; | ||
233 | |||
229 | mutex_lock(&rdev->ddev->struct_mutex); | 234 | mutex_lock(&rdev->ddev->struct_mutex); |
230 | mutex_lock(&rdev->vram_mutex); | 235 | mutex_lock(&rdev->vram_mutex); |
231 | mutex_lock(&rdev->cp.mutex); | 236 | mutex_lock(&rdev->cp.mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index b3ba44c0a818..4ae5a3d1074e 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | |||
3228 | } | 3228 | } |
3229 | 3229 | ||
3230 | struct drm_ioctl_desc radeon_ioctls[] = { | 3230 | struct drm_ioctl_desc radeon_ioctls[] = { |
3231 | DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3231 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3232 | DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3232 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3233 | DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3233 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3234 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3234 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3235 | DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), | 3235 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), |
3236 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), | 3236 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), |
3237 | DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), | 3237 | DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH), |
3238 | DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), | 3238 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), |
3239 | DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), | 3239 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH), |
3240 | DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), | 3240 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), |
3241 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), | 3241 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), |
3242 | DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), | 3242 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH), |
3243 | DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), | 3243 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), |
3244 | DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), | 3244 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), |
3245 | DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3245 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3246 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), | 3246 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), |
3247 | DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), | 3247 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), |
3248 | DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), | 3248 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), |
3249 | DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), | 3249 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH), |
3250 | DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), | 3250 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), |
3251 | DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), | 3251 | DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH), |
3252 | DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3252 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
3253 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), | 3253 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), |
3254 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), | 3254 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), |
3255 | DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), | 3255 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), |
3256 | DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), | 3256 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), |
3257 | DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), | 3257 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), |
3258 | DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) | 3258 | DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) |
3259 | }; | 3259 | }; |
3260 | 3260 | ||
3261 | int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); | 3261 | int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); |
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 976dc8d25280..bf5f83ea14fe 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c | |||
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) | |||
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | struct drm_ioctl_desc savage_ioctls[] = { | 1084 | struct drm_ioctl_desc savage_ioctls[] = { |
1085 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1085 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1086 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), | 1086 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), |
1087 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), | 1087 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), |
1088 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), | 1088 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), |
1089 | }; | 1089 | }; |
1090 | 1090 | ||
1091 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); | 1091 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); |
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 07d0f2979cac..7fe2b63412ce 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c | |||
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, | |||
320 | } | 320 | } |
321 | 321 | ||
322 | struct drm_ioctl_desc sis_ioctls[] = { | 322 | struct drm_ioctl_desc sis_ioctls[] = { |
323 | DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), | 323 | DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), |
324 | DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), | 324 | DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), |
325 | DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), | 325 | DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), |
326 | DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), | 326 | DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), |
327 | DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), | 327 | DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH), |
328 | DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), | 328 | DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), |
329 | }; | 329 | }; |
330 | 330 | ||
331 | int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); | 331 | int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); |
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 68dda74a50ae..cc0ffa9abd00 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c | |||
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * | |||
722 | } | 722 | } |
723 | 723 | ||
724 | struct drm_ioctl_desc via_ioctls[] = { | 724 | struct drm_ioctl_desc via_ioctls[] = { |
725 | DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), | 725 | DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), |
726 | DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), | 726 | DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), |
727 | DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), | 727 | DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), |
728 | DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), | 728 | DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), |
729 | DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), | 729 | DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), |
730 | DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), | 730 | DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), |
731 | DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), | 731 | DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH), |
732 | DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), | 732 | DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), |
733 | DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), | 733 | DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH), |
734 | DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), | 734 | DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), |
735 | DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), | 735 | DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), |
736 | DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), | 736 | DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), |
737 | DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), | 737 | DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), |
738 | DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) | 738 | DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) |
739 | }; | 739 | }; |
740 | 740 | ||
741 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); | 741 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9dd395b90216..72ec2e2b6e97 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -99,47 +99,47 @@ | |||
99 | */ | 99 | */ |
100 | 100 | ||
101 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | 101 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
102 | [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} | 102 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
103 | 103 | ||
104 | /** | 104 | /** |
105 | * Ioctl definitions. | 105 | * Ioctl definitions. |
106 | */ | 106 | */ |
107 | 107 | ||
108 | static struct drm_ioctl_desc vmw_ioctls[] = { | 108 | static struct drm_ioctl_desc vmw_ioctls[] = { |
109 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, | 109 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
110 | DRM_AUTH | DRM_UNLOCKED), | 110 | DRM_AUTH | DRM_UNLOCKED), |
111 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | 111 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
112 | DRM_AUTH | DRM_UNLOCKED), | 112 | DRM_AUTH | DRM_UNLOCKED), |
113 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | 113 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
114 | DRM_AUTH | DRM_UNLOCKED), | 114 | DRM_AUTH | DRM_UNLOCKED), |
115 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, | 115 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
116 | vmw_kms_cursor_bypass_ioctl, | 116 | vmw_kms_cursor_bypass_ioctl, |
117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
118 | 118 | ||
119 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, | 119 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
120 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 120 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
121 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, | 121 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
122 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 122 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, | 123 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
124 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 124 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
125 | 125 | ||
126 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | 126 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
127 | DRM_AUTH | DRM_UNLOCKED), | 127 | DRM_AUTH | DRM_UNLOCKED), |
128 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | 128 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
129 | DRM_AUTH | DRM_UNLOCKED), | 129 | DRM_AUTH | DRM_UNLOCKED), |
130 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | 130 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
131 | DRM_AUTH | DRM_UNLOCKED), | 131 | DRM_AUTH | DRM_UNLOCKED), |
132 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | 132 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
133 | DRM_AUTH | DRM_UNLOCKED), | 133 | DRM_AUTH | DRM_UNLOCKED), |
134 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 134 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
135 | DRM_AUTH | DRM_UNLOCKED), | 135 | DRM_AUTH | DRM_UNLOCKED), |
136 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, | 136 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
137 | DRM_AUTH | DRM_UNLOCKED), | 137 | DRM_AUTH | DRM_UNLOCKED), |
138 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 138 | VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
140 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 140 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
141 | DRM_AUTH | DRM_UNLOCKED), | 141 | DRM_AUTH | DRM_UNLOCKED), |
142 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, | 142 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, |
143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) | 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) |
144 | }; | 144 | }; |
145 | 145 | ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e635199a0cd2..0c52899be964 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = { | |||
1299 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, | 1299 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, |
1300 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, | 1300 | { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, |
1301 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, | 1301 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, |
1302 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, | ||
1302 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 1303 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
1303 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, | 1304 | { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, |
1304 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, | 1305 | { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, |
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c index f44bdc084cb2..8ca7f65cf2f8 100644 --- a/drivers/hid/hid-egalax.c +++ b/drivers/hid/hid-egalax.c | |||
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field, | |||
159 | { | 159 | { |
160 | struct egalax_data *td = hid_get_drvdata(hid); | 160 | struct egalax_data *td = hid_get_drvdata(hid); |
161 | 161 | ||
162 | /* Note, eGalax has two product lines: the first is resistive and | ||
163 | * uses a standard parallel multitouch protocol (product ID == | ||
164 | * 48xx). The second is capacitive and uses an unusual "serial" | ||
165 | * protocol with a different message for each multitouch finger | ||
166 | * (product ID == 72xx). We do not yet generate a correct event | ||
167 | * sequence for the capacitive/serial protocol. | ||
168 | */ | ||
162 | if (hid->claimed & HID_CLAIMED_INPUT) { | 169 | if (hid->claimed & HID_CLAIMED_INPUT) { |
163 | struct input_dev *input = field->hidinput->input; | 170 | struct input_dev *input = field->hidinput->input; |
164 | 171 | ||
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev) | |||
246 | static const struct hid_device_id egalax_devices[] = { | 253 | static const struct hid_device_id egalax_devices[] = { |
247 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, | 254 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, |
248 | USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, | 255 | USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, |
256 | { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, | ||
257 | USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, | ||
249 | { } | 258 | { } |
250 | }; | 259 | }; |
251 | MODULE_DEVICE_TABLE(hid, egalax_devices); | 260 | MODULE_DEVICE_TABLE(hid, egalax_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d3fc13ae094d..85c6d13c9ffa 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -188,6 +188,7 @@ | |||
188 | #define USB_VENDOR_ID_DWAV 0x0eef | 188 | #define USB_VENDOR_ID_DWAV 0x0eef |
189 | #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 | 189 | #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 |
190 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d | 190 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d |
191 | #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c | ||
191 | 192 | ||
192 | #define USB_VENDOR_ID_ELECOM 0x056e | 193 | #define USB_VENDOR_ID_ELECOM 0x056e |
193 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 194 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c index 346f0e34987e..bc2e07740628 100644 --- a/drivers/hid/hid-picolcd.c +++ b/drivers/hid/hid-picolcd.c | |||
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info) | |||
547 | ref_cnt--; | 547 | ref_cnt--; |
548 | mutex_lock(&info->lock); | 548 | mutex_lock(&info->lock); |
549 | (*ref_cnt)--; | 549 | (*ref_cnt)--; |
550 | may_release = !ref_cnt; | 550 | may_release = !*ref_cnt; |
551 | mutex_unlock(&info->lock); | 551 | mutex_unlock(&info->lock); |
552 | if (may_release) { | 552 | if (may_release) { |
553 | framebuffer_release(info); | ||
554 | vfree((u8 *)info->fix.smem_start); | 553 | vfree((u8 *)info->fix.smem_start); |
554 | framebuffer_release(info); | ||
555 | } | 555 | } |
556 | } | 556 | } |
557 | 557 | ||
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 254a003af048..0a29c51114aa 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file) | |||
266 | { | 266 | { |
267 | struct hiddev_list *list; | 267 | struct hiddev_list *list; |
268 | struct usb_interface *intf; | 268 | struct usb_interface *intf; |
269 | struct hid_device *hid; | ||
269 | struct hiddev *hiddev; | 270 | struct hiddev *hiddev; |
270 | int res; | 271 | int res; |
271 | 272 | ||
272 | intf = usb_find_interface(&hiddev_driver, iminor(inode)); | 273 | intf = usb_find_interface(&hiddev_driver, iminor(inode)); |
273 | if (!intf) | 274 | if (!intf) |
274 | return -ENODEV; | 275 | return -ENODEV; |
275 | hiddev = usb_get_intfdata(intf); | 276 | hid = usb_get_intfdata(intf); |
277 | hiddev = hid->hiddev; | ||
276 | 278 | ||
277 | if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL))) | 279 | if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL))) |
278 | return -ENOMEM; | 280 | return -ENOMEM; |
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
587 | struct hiddev_list *list = file->private_data; | 589 | struct hiddev_list *list = file->private_data; |
588 | struct hiddev *hiddev = list->hiddev; | 590 | struct hiddev *hiddev = list->hiddev; |
589 | struct hid_device *hid = hiddev->hid; | 591 | struct hid_device *hid = hiddev->hid; |
590 | struct usb_device *dev = hid_to_usb_dev(hid); | 592 | struct usb_device *dev; |
591 | struct hiddev_collection_info cinfo; | 593 | struct hiddev_collection_info cinfo; |
592 | struct hiddev_report_info rinfo; | 594 | struct hiddev_report_info rinfo; |
593 | struct hiddev_field_info finfo; | 595 | struct hiddev_field_info finfo; |
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
601 | /* Called without BKL by compat methods so no BKL taken */ | 603 | /* Called without BKL by compat methods so no BKL taken */ |
602 | 604 | ||
603 | /* FIXME: Who or what stop this racing with a disconnect ?? */ | 605 | /* FIXME: Who or what stop this racing with a disconnect ?? */ |
604 | if (!hiddev->exist) | 606 | if (!hiddev->exist || !hid) |
605 | return -EIO; | 607 | return -EIO; |
606 | 608 | ||
609 | dev = hid_to_usb_dev(hid); | ||
610 | |||
607 | switch (cmd) { | 611 | switch (cmd) { |
608 | 612 | ||
609 | case HIDIOCGVERSION: | 613 | case HIDIOCGVERSION: |
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force) | |||
888 | hid->hiddev = hiddev; | 892 | hid->hiddev = hiddev; |
889 | hiddev->hid = hid; | 893 | hiddev->hid = hid; |
890 | hiddev->exist = 1; | 894 | hiddev->exist = 1; |
891 | usb_set_intfdata(usbhid->intf, usbhid); | ||
892 | retval = usb_register_dev(usbhid->intf, &hiddev_class); | 895 | retval = usb_register_dev(usbhid->intf, &hiddev_class); |
893 | if (retval) { | 896 | if (retval) { |
894 | err_hid("Not able to get a minor for this device."); | 897 | err_hid("Not able to get a minor for this device."); |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0fba82943125..4d4d09bdec0a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -332,11 +332,11 @@ config SENSORS_F71805F | |||
332 | will be called f71805f. | 332 | will be called f71805f. |
333 | 333 | ||
334 | config SENSORS_F71882FG | 334 | config SENSORS_F71882FG |
335 | tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000" | 335 | tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000" |
336 | depends on EXPERIMENTAL | 336 | depends on EXPERIMENTAL |
337 | help | 337 | help |
338 | If you say yes here you get support for hardware monitoring features | 338 | If you say yes here you get support for hardware monitoring |
339 | of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG, | 339 | features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG, |
340 | F71889FG and F8000 Super-I/O chips. | 340 | F71889FG and F8000 Super-I/O chips. |
341 | 341 | ||
342 | This driver can also be built as a module. If so, the module | 342 | This driver can also be built as a module. If so, the module |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 6207120dcd4d..537841ef44b9 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ | 45 | #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ |
46 | 46 | ||
47 | #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ | 47 | #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ |
48 | #define SIO_F71808_ID 0x0901 /* Chipset ID */ | ||
49 | #define SIO_F71858_ID 0x0507 /* Chipset ID */ | 48 | #define SIO_F71858_ID 0x0507 /* Chipset ID */ |
50 | #define SIO_F71862_ID 0x0601 /* Chipset ID */ | 49 | #define SIO_F71862_ID 0x0601 /* Chipset ID */ |
51 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ | 50 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ |
@@ -97,10 +96,9 @@ static unsigned short force_id; | |||
97 | module_param(force_id, ushort, 0); | 96 | module_param(force_id, ushort, 0); |
98 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); | 97 | MODULE_PARM_DESC(force_id, "Override the detected device ID"); |
99 | 98 | ||
100 | enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; | 99 | enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; |
101 | 100 | ||
102 | static const char *f71882fg_names[] = { | 101 | static const char *f71882fg_names[] = { |
103 | "f71808fg", | ||
104 | "f71858fg", | 102 | "f71858fg", |
105 | "f71862fg", | 103 | "f71862fg", |
106 | "f71882fg", | 104 | "f71882fg", |
@@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { | |||
308 | SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), | 306 | SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), |
309 | }; | 307 | }; |
310 | 308 | ||
311 | /* In attr common to the f71862fg, f71882fg and f71889fg */ | 309 | /* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ |
312 | static struct sensor_device_attribute_2 fxxxx_in_attr[] = { | 310 | static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { |
313 | SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), | 311 | SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), |
314 | SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), | 312 | SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), |
315 | SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), | 313 | SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), |
@@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = { | |||
319 | SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), | 317 | SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), |
320 | SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), | 318 | SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), |
321 | SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), | 319 | SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), |
322 | }; | ||
323 | |||
324 | /* In attr for the f71808fg */ | ||
325 | static struct sensor_device_attribute_2 f71808_in_attr[] = { | ||
326 | SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), | ||
327 | SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), | ||
328 | SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), | ||
329 | SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), | ||
330 | SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), | ||
331 | SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), | ||
332 | SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7), | ||
333 | SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8), | ||
334 | }; | ||
335 | |||
336 | /* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */ | ||
337 | static struct sensor_device_attribute_2 fxxxx_temp_attr[] = { | ||
338 | SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), | 320 | SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), |
339 | SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, | 321 | SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, |
340 | store_temp_max, 0, 1), | 322 | store_temp_max, 0, 1), |
@@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = { | |||
373 | store_temp_beep, 0, 6), | 355 | store_temp_beep, 0, 6), |
374 | SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), | 356 | SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), |
375 | SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), | 357 | SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), |
376 | }; | ||
377 | |||
378 | /* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ | ||
379 | static struct sensor_device_attribute_2 f71862_temp_attr[] = { | ||
380 | SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), | 358 | SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), |
381 | SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, | 359 | SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, |
382 | store_temp_max, 0, 3), | 360 | store_temp_max, 0, 3), |
@@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) | |||
1011 | data->temp_type[1] = 6; | 989 | data->temp_type[1] = 6; |
1012 | break; | 990 | break; |
1013 | } | 991 | } |
1014 | } else if (data->type == f71808fg) { | ||
1015 | reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); | ||
1016 | data->temp_type[1] = (reg & 0x02) ? 2 : 4; | ||
1017 | data->temp_type[2] = (reg & 0x04) ? 2 : 4; | ||
1018 | |||
1019 | } else { | 992 | } else { |
1020 | reg2 = f71882fg_read8(data, F71882FG_REG_PECI); | 993 | reg2 = f71882fg_read8(data, F71882FG_REG_PECI); |
1021 | if ((reg2 & 0x03) == 0x01) | 994 | if ((reg2 & 0x03) == 0x01) |
@@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, | |||
1898 | 1871 | ||
1899 | val /= 1000; | 1872 | val /= 1000; |
1900 | 1873 | ||
1901 | if (data->type == f71889fg | 1874 | if (data->type == f71889fg) |
1902 | || data->type == f71808fg) | ||
1903 | val = SENSORS_LIMIT(val, -128, 127); | 1875 | val = SENSORS_LIMIT(val, -128, 127); |
1904 | else | 1876 | else |
1905 | val = SENSORS_LIMIT(val, 0, 127); | 1877 | val = SENSORS_LIMIT(val, 0, 127); |
@@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2002 | /* fall through! */ | 1974 | /* fall through! */ |
2003 | case f71862fg: | 1975 | case f71862fg: |
2004 | err = f71882fg_create_sysfs_files(pdev, | 1976 | err = f71882fg_create_sysfs_files(pdev, |
2005 | f71862_temp_attr, | 1977 | fxxxx_in_temp_attr, |
2006 | ARRAY_SIZE(f71862_temp_attr)); | 1978 | ARRAY_SIZE(fxxxx_in_temp_attr)); |
2007 | if (err) | ||
2008 | goto exit_unregister_sysfs; | ||
2009 | err = f71882fg_create_sysfs_files(pdev, | ||
2010 | fxxxx_in_attr, | ||
2011 | ARRAY_SIZE(fxxxx_in_attr)); | ||
2012 | if (err) | ||
2013 | goto exit_unregister_sysfs; | ||
2014 | err = f71882fg_create_sysfs_files(pdev, | ||
2015 | fxxxx_temp_attr, | ||
2016 | ARRAY_SIZE(fxxxx_temp_attr)); | ||
2017 | break; | ||
2018 | case f71808fg: | ||
2019 | err = f71882fg_create_sysfs_files(pdev, | ||
2020 | f71808_in_attr, | ||
2021 | ARRAY_SIZE(f71808_in_attr)); | ||
2022 | if (err) | ||
2023 | goto exit_unregister_sysfs; | ||
2024 | err = f71882fg_create_sysfs_files(pdev, | ||
2025 | fxxxx_temp_attr, | ||
2026 | ARRAY_SIZE(fxxxx_temp_attr)); | ||
2027 | break; | 1979 | break; |
2028 | case f8000: | 1980 | case f8000: |
2029 | err = f71882fg_create_sysfs_files(pdev, | 1981 | err = f71882fg_create_sysfs_files(pdev, |
@@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2050 | case f71862fg: | 2002 | case f71862fg: |
2051 | err = (data->pwm_enable & 0x15) != 0x15; | 2003 | err = (data->pwm_enable & 0x15) != 0x15; |
2052 | break; | 2004 | break; |
2053 | case f71808fg: | ||
2054 | case f71882fg: | 2005 | case f71882fg: |
2055 | case f71889fg: | 2006 | case f71889fg: |
2056 | err = 0; | 2007 | err = 0; |
@@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) | |||
2096 | f8000_auto_pwm_attr, | 2047 | f8000_auto_pwm_attr, |
2097 | ARRAY_SIZE(f8000_auto_pwm_attr)); | 2048 | ARRAY_SIZE(f8000_auto_pwm_attr)); |
2098 | break; | 2049 | break; |
2099 | case f71808fg: | ||
2100 | case f71889fg: | 2050 | case f71889fg: |
2101 | for (i = 0; i < nr_fans; i++) { | 2051 | for (i = 0; i < nr_fans; i++) { |
2102 | data->pwm_auto_point_mapping[i] = | 2052 | data->pwm_auto_point_mapping[i] = |
@@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev) | |||
2176 | /* fall through! */ | 2126 | /* fall through! */ |
2177 | case f71862fg: | 2127 | case f71862fg: |
2178 | f71882fg_remove_sysfs_files(pdev, | 2128 | f71882fg_remove_sysfs_files(pdev, |
2179 | f71862_temp_attr, | 2129 | fxxxx_in_temp_attr, |
2180 | ARRAY_SIZE(f71862_temp_attr)); | 2130 | ARRAY_SIZE(fxxxx_in_temp_attr)); |
2181 | f71882fg_remove_sysfs_files(pdev, | ||
2182 | fxxxx_in_attr, | ||
2183 | ARRAY_SIZE(fxxxx_in_attr)); | ||
2184 | f71882fg_remove_sysfs_files(pdev, | ||
2185 | fxxxx_temp_attr, | ||
2186 | ARRAY_SIZE(fxxxx_temp_attr)); | ||
2187 | break; | ||
2188 | case f71808fg: | ||
2189 | f71882fg_remove_sysfs_files(pdev, | ||
2190 | f71808_in_attr, | ||
2191 | ARRAY_SIZE(f71808_in_attr)); | ||
2192 | f71882fg_remove_sysfs_files(pdev, | ||
2193 | fxxxx_temp_attr, | ||
2194 | ARRAY_SIZE(fxxxx_temp_attr)); | ||
2195 | break; | 2131 | break; |
2196 | case f8000: | 2132 | case f8000: |
2197 | f71882fg_remove_sysfs_files(pdev, | 2133 | f71882fg_remove_sysfs_files(pdev, |
@@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, | |||
2259 | 2195 | ||
2260 | devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); | 2196 | devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); |
2261 | switch (devid) { | 2197 | switch (devid) { |
2262 | case SIO_F71808_ID: | ||
2263 | sio_data->type = f71808fg; | ||
2264 | break; | ||
2265 | case SIO_F71858_ID: | 2198 | case SIO_F71858_ID: |
2266 | sio_data->type = f71858fg; | 2199 | sio_data->type = f71858fg; |
2267 | break; | 2200 | break; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 11567c7999a2..c148b6302154 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares) | |||
2136 | * with the rest of the array) | 2136 | * with the rest of the array) |
2137 | */ | 2137 | */ |
2138 | mdk_rdev_t *rdev; | 2138 | mdk_rdev_t *rdev; |
2139 | |||
2140 | /* First make sure individual recovery_offsets are correct */ | ||
2141 | list_for_each_entry(rdev, &mddev->disks, same_set) { | ||
2142 | if (rdev->raid_disk >= 0 && | ||
2143 | mddev->delta_disks >= 0 && | ||
2144 | !test_bit(In_sync, &rdev->flags) && | ||
2145 | mddev->curr_resync_completed > rdev->recovery_offset) | ||
2146 | rdev->recovery_offset = mddev->curr_resync_completed; | ||
2147 | |||
2148 | } | ||
2149 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2139 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2150 | if (rdev->sb_events == mddev->events || | 2140 | if (rdev->sb_events == mddev->events || |
2151 | (nospares && | 2141 | (nospares && |
@@ -2167,12 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change) | |||
2167 | int sync_req; | 2157 | int sync_req; |
2168 | int nospares = 0; | 2158 | int nospares = 0; |
2169 | 2159 | ||
2170 | mddev->utime = get_seconds(); | ||
2171 | if (mddev->external) | ||
2172 | return; | ||
2173 | repeat: | 2160 | repeat: |
2161 | /* First make sure individual recovery_offsets are correct */ | ||
2162 | list_for_each_entry(rdev, &mddev->disks, same_set) { | ||
2163 | if (rdev->raid_disk >= 0 && | ||
2164 | mddev->delta_disks >= 0 && | ||
2165 | !test_bit(In_sync, &rdev->flags) && | ||
2166 | mddev->curr_resync_completed > rdev->recovery_offset) | ||
2167 | rdev->recovery_offset = mddev->curr_resync_completed; | ||
2168 | |||
2169 | } | ||
2170 | if (mddev->external || !mddev->persistent) { | ||
2171 | clear_bit(MD_CHANGE_DEVS, &mddev->flags); | ||
2172 | clear_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
2173 | wake_up(&mddev->sb_wait); | ||
2174 | return; | ||
2175 | } | ||
2176 | |||
2174 | spin_lock_irq(&mddev->write_lock); | 2177 | spin_lock_irq(&mddev->write_lock); |
2175 | 2178 | ||
2179 | mddev->utime = get_seconds(); | ||
2180 | |||
2176 | set_bit(MD_CHANGE_PENDING, &mddev->flags); | 2181 | set_bit(MD_CHANGE_PENDING, &mddev->flags); |
2177 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) | 2182 | if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) |
2178 | force_change = 1; | 2183 | force_change = 1; |
@@ -2221,19 +2226,6 @@ repeat: | |||
2221 | MD_BUG(); | 2226 | MD_BUG(); |
2222 | mddev->events --; | 2227 | mddev->events --; |
2223 | } | 2228 | } |
2224 | |||
2225 | /* | ||
2226 | * do not write anything to disk if using | ||
2227 | * nonpersistent superblocks | ||
2228 | */ | ||
2229 | if (!mddev->persistent) { | ||
2230 | if (!mddev->external) | ||
2231 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
2232 | |||
2233 | spin_unlock_irq(&mddev->write_lock); | ||
2234 | wake_up(&mddev->sb_wait); | ||
2235 | return; | ||
2236 | } | ||
2237 | sync_sbs(mddev, nospares); | 2229 | sync_sbs(mddev, nospares); |
2238 | spin_unlock_irq(&mddev->write_lock); | 2230 | spin_unlock_irq(&mddev->write_lock); |
2239 | 2231 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 73cc74ffc26b..ad83a4dcadc3 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
787 | struct bio_list bl; | 787 | struct bio_list bl; |
788 | struct page **behind_pages = NULL; | 788 | struct page **behind_pages = NULL; |
789 | const int rw = bio_data_dir(bio); | 789 | const int rw = bio_data_dir(bio); |
790 | const bool do_sync = (bio->bi_rw & REQ_SYNC); | 790 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
791 | bool do_barriers; | 791 | unsigned long do_barriers; |
792 | mdk_rdev_t *blocked_rdev; | 792 | mdk_rdev_t *blocked_rdev; |
793 | 793 | ||
794 | /* | 794 | /* |
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1120 | { | 1120 | { |
1121 | int i; | 1121 | int i; |
1122 | conf_t *conf = mddev->private; | 1122 | conf_t *conf = mddev->private; |
1123 | int count = 0; | ||
1124 | unsigned long flags; | ||
1123 | 1125 | ||
1124 | /* | 1126 | /* |
1125 | * Find all failed disks within the RAID1 configuration | 1127 | * Find all failed disks within the RAID1 configuration |
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev) | |||
1131 | if (rdev | 1133 | if (rdev |
1132 | && !test_bit(Faulty, &rdev->flags) | 1134 | && !test_bit(Faulty, &rdev->flags) |
1133 | && !test_and_set_bit(In_sync, &rdev->flags)) { | 1135 | && !test_and_set_bit(In_sync, &rdev->flags)) { |
1134 | unsigned long flags; | 1136 | count++; |
1135 | spin_lock_irqsave(&conf->device_lock, flags); | 1137 | sysfs_notify_dirent(rdev->sysfs_state); |
1136 | mddev->degraded--; | ||
1137 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1138 | } | 1138 | } |
1139 | } | 1139 | } |
1140 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1141 | mddev->degraded -= count; | ||
1142 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1140 | 1143 | ||
1141 | print_conf(conf); | 1144 | print_conf(conf); |
1142 | return 0; | 1145 | return count; |
1143 | } | 1146 | } |
1144 | 1147 | ||
1145 | 1148 | ||
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev) | |||
1640 | * We already have a nr_pending reference on these rdevs. | 1643 | * We already have a nr_pending reference on these rdevs. |
1641 | */ | 1644 | */ |
1642 | int i; | 1645 | int i; |
1643 | const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); | 1646 | const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); |
1644 | clear_bit(R1BIO_BarrierRetry, &r1_bio->state); | 1647 | clear_bit(R1BIO_BarrierRetry, &r1_bio->state); |
1645 | clear_bit(R1BIO_Barrier, &r1_bio->state); | 1648 | clear_bit(R1BIO_Barrier, &r1_bio->state); |
1646 | for (i=0; i < conf->raid_disks; i++) | 1649 | for (i=0; i < conf->raid_disks; i++) |
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev) | |||
1696 | (unsigned long long)r1_bio->sector); | 1699 | (unsigned long long)r1_bio->sector); |
1697 | raid_end_bio_io(r1_bio); | 1700 | raid_end_bio_io(r1_bio); |
1698 | } else { | 1701 | } else { |
1699 | const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; | 1702 | const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; |
1700 | r1_bio->bios[r1_bio->read_disk] = | 1703 | r1_bio->bios[r1_bio->read_disk] = |
1701 | mddev->ro ? IO_BLOCKED : NULL; | 1704 | mddev->ro ? IO_BLOCKED : NULL; |
1702 | r1_bio->read_disk = disk; | 1705 | r1_bio->read_disk = disk; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a88aeb5198c7..84718383124d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
799 | int i; | 799 | int i; |
800 | int chunk_sects = conf->chunk_mask + 1; | 800 | int chunk_sects = conf->chunk_mask + 1; |
801 | const int rw = bio_data_dir(bio); | 801 | const int rw = bio_data_dir(bio); |
802 | const bool do_sync = (bio->bi_rw & REQ_SYNC); | 802 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
803 | struct bio_list bl; | 803 | struct bio_list bl; |
804 | unsigned long flags; | 804 | unsigned long flags; |
805 | mdk_rdev_t *blocked_rdev; | 805 | mdk_rdev_t *blocked_rdev; |
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev) | |||
1116 | int i; | 1116 | int i; |
1117 | conf_t *conf = mddev->private; | 1117 | conf_t *conf = mddev->private; |
1118 | mirror_info_t *tmp; | 1118 | mirror_info_t *tmp; |
1119 | int count = 0; | ||
1120 | unsigned long flags; | ||
1119 | 1121 | ||
1120 | /* | 1122 | /* |
1121 | * Find all non-in_sync disks within the RAID10 configuration | 1123 | * Find all non-in_sync disks within the RAID10 configuration |
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev) | |||
1126 | if (tmp->rdev | 1128 | if (tmp->rdev |
1127 | && !test_bit(Faulty, &tmp->rdev->flags) | 1129 | && !test_bit(Faulty, &tmp->rdev->flags) |
1128 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { | 1130 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
1129 | unsigned long flags; | 1131 | count++; |
1130 | spin_lock_irqsave(&conf->device_lock, flags); | 1132 | sysfs_notify_dirent(tmp->rdev->sysfs_state); |
1131 | mddev->degraded--; | ||
1132 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1133 | } | 1133 | } |
1134 | } | 1134 | } |
1135 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1136 | mddev->degraded -= count; | ||
1137 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1135 | 1138 | ||
1136 | print_conf(conf); | 1139 | print_conf(conf); |
1137 | return 0; | 1140 | return count; |
1138 | } | 1141 | } |
1139 | 1142 | ||
1140 | 1143 | ||
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev) | |||
1734 | raid_end_bio_io(r10_bio); | 1737 | raid_end_bio_io(r10_bio); |
1735 | bio_put(bio); | 1738 | bio_put(bio); |
1736 | } else { | 1739 | } else { |
1737 | const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); | 1740 | const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); |
1738 | bio_put(bio); | 1741 | bio_put(bio); |
1739 | rdev = conf->mirrors[mirror].rdev; | 1742 | rdev = conf->mirrors[mirror].rdev; |
1740 | if (printk_ratelimit()) | 1743 | if (printk_ratelimit()) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 866d4b5a144c..69b0a169e43d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev) | |||
5330 | int i; | 5330 | int i; |
5331 | raid5_conf_t *conf = mddev->private; | 5331 | raid5_conf_t *conf = mddev->private; |
5332 | struct disk_info *tmp; | 5332 | struct disk_info *tmp; |
5333 | int count = 0; | ||
5334 | unsigned long flags; | ||
5333 | 5335 | ||
5334 | for (i = 0; i < conf->raid_disks; i++) { | 5336 | for (i = 0; i < conf->raid_disks; i++) { |
5335 | tmp = conf->disks + i; | 5337 | tmp = conf->disks + i; |
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev) | |||
5337 | && tmp->rdev->recovery_offset == MaxSector | 5339 | && tmp->rdev->recovery_offset == MaxSector |
5338 | && !test_bit(Faulty, &tmp->rdev->flags) | 5340 | && !test_bit(Faulty, &tmp->rdev->flags) |
5339 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { | 5341 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
5340 | unsigned long flags; | 5342 | count++; |
5341 | spin_lock_irqsave(&conf->device_lock, flags); | 5343 | sysfs_notify_dirent(tmp->rdev->sysfs_state); |
5342 | mddev->degraded--; | ||
5343 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
5344 | } | 5344 | } |
5345 | } | 5345 | } |
5346 | spin_lock_irqsave(&conf->device_lock, flags); | ||
5347 | mddev->degraded -= count; | ||
5348 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
5346 | print_raid5_conf(conf); | 5349 | print_raid5_conf(conf); |
5347 | return 0; | 5350 | return count; |
5348 | } | 5351 | } |
5349 | 5352 | ||
5350 | static int raid5_remove_disk(mddev_t *mddev, int number) | 5353 | static int raid5_remove_disk(mddev_t *mddev, int number) |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 0efe631e50ca..d80cfdc8edd2 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
86 | init_waitqueue_head(&host->wq); | 86 | init_waitqueue_head(&host->wq); |
87 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); | 87 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
88 | INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); | 88 | INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); |
89 | #ifdef CONFIG_PM | ||
89 | host->pm_notify.notifier_call = mmc_pm_notify; | 90 | host->pm_notify.notifier_call = mmc_pm_notify; |
91 | #endif | ||
90 | 92 | ||
91 | /* | 93 | /* |
92 | * By default, hosts do not support SGIO or large requests. | 94 | * By default, hosts do not support SGIO or large requests. |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 283190bc2a40..68d12794cfd9 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX | |||
132 | 132 | ||
133 | config MMC_SDHCI_S3C | 133 | config MMC_SDHCI_S3C |
134 | tristate "SDHCI support on Samsung S3C SoC" | 134 | tristate "SDHCI support on Samsung S3C SoC" |
135 | depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX) | 135 | depends on MMC_SDHCI && PLAT_SAMSUNG |
136 | help | 136 | help |
137 | This selects the Secure Digital Host Controller Interface (SDHCI) | 137 | This selects the Secure Digital Host Controller Interface (SDHCI) |
138 | often referrered to as the HSMMC block in some of the Samsung S3C | 138 | often referrered to as the HSMMC block in some of the Samsung S3C |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 0a7f2614c6f0..71ad4163b95e 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c | |||
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | |||
242 | { | 242 | { |
243 | struct sdhci_host *host = platform_get_drvdata(dev); | 243 | struct sdhci_host *host = platform_get_drvdata(dev); |
244 | if (host) { | 244 | if (host) { |
245 | mutex_lock(&host->lock); | 245 | spin_lock(&host->lock); |
246 | if (state) { | 246 | if (state) { |
247 | dev_dbg(&dev->dev, "card inserted.\n"); | 247 | dev_dbg(&dev->dev, "card inserted.\n"); |
248 | host->flags &= ~SDHCI_DEVICE_DEAD; | 248 | host->flags &= ~SDHCI_DEVICE_DEAD; |
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) | |||
252 | host->flags |= SDHCI_DEVICE_DEAD; | 252 | host->flags |= SDHCI_DEVICE_DEAD; |
253 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 253 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
254 | } | 254 | } |
255 | sdhci_card_detect(host); | 255 | tasklet_schedule(&host->card_tasklet); |
256 | mutex_unlock(&host->lock); | 256 | spin_unlock(&host->lock); |
257 | } | 257 | } |
258 | } | 258 | } |
259 | 259 | ||
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 785512133b50..401527d273b5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
1180 | else | 1180 | else |
1181 | ctrl &= ~SDHCI_CTRL_4BITBUS; | 1181 | ctrl &= ~SDHCI_CTRL_4BITBUS; |
1182 | 1182 | ||
1183 | if (ios->timing == MMC_TIMING_SD_HS) | 1183 | if (ios->timing == MMC_TIMING_SD_HS && |
1184 | !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) | ||
1184 | ctrl |= SDHCI_CTRL_HISPD; | 1185 | ctrl |= SDHCI_CTRL_HISPD; |
1185 | else | 1186 | else |
1186 | ctrl &= ~SDHCI_CTRL_HISPD; | 1187 | ctrl &= ~SDHCI_CTRL_HISPD; |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 036cfae76368..d316bc79b636 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -245,6 +245,8 @@ struct sdhci_host { | |||
245 | #define SDHCI_QUIRK_MISSING_CAPS (1<<27) | 245 | #define SDHCI_QUIRK_MISSING_CAPS (1<<27) |
246 | /* Controller uses Auto CMD12 command to stop the transfer */ | 246 | /* Controller uses Auto CMD12 command to stop the transfer */ |
247 | #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) | 247 | #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) |
248 | /* Controller doesn't have HISPD bit field in HI-SPEED SD card */ | ||
249 | #define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) | ||
248 | 250 | ||
249 | int irq; /* Device IRQ */ | 251 | int irq; /* Device IRQ */ |
250 | void __iomem * ioaddr; /* Mapped address */ | 252 | void __iomem * ioaddr; /* Mapped address */ |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 00af55d7afba..fe63f6bd663c 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/mtd/concat.h> | 23 | #include <linux/mtd/concat.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/of_address.h> | ||
25 | #include <linux/of_platform.h> | 26 | #include <linux/of_platform.h> |
26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
27 | 28 | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a3c7473dd409..d551ddd9537a 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2866 | */ | 2866 | */ |
2867 | if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && | 2867 | if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && |
2868 | id_data[0] == NAND_MFR_SAMSUNG && | 2868 | id_data[0] == NAND_MFR_SAMSUNG && |
2869 | (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | ||
2869 | id_data[5] != 0x00) { | 2870 | id_data[5] != 0x00) { |
2870 | /* Calc pagesize */ | 2871 | /* Calc pagesize */ |
2871 | mtd->writesize = 2048 << (extid & 0x03); | 2872 | mtd->writesize = 2048 << (extid & 0x03); |
@@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2934 | chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; | 2935 | chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; |
2935 | 2936 | ||
2936 | /* Set the bad block position */ | 2937 | /* Set the bad block position */ |
2937 | if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO || | 2938 | if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) |
2938 | (*maf_id == NAND_MFR_SAMSUNG && | ||
2939 | mtd->writesize == 512) || | ||
2940 | *maf_id == NAND_MFR_AMD)) | ||
2941 | chip->badblockpos = NAND_SMALL_BADBLOCK_POS; | ||
2942 | else | ||
2943 | chip->badblockpos = NAND_LARGE_BADBLOCK_POS; | 2939 | chip->badblockpos = NAND_LARGE_BADBLOCK_POS; |
2944 | 2940 | else | |
2941 | chip->badblockpos = NAND_SMALL_BADBLOCK_POS; | ||
2945 | 2942 | ||
2946 | /* Get chip options, preserve non chip based options */ | 2943 | /* Get chip options, preserve non chip based options */ |
2947 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | 2944 | chip->options &= ~NAND_CHIPOPTIONS_MSK; |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index e02fa4f0e3c9..4d89f3780207 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { | |||
363 | #define tAR_NDTR1(r) (((r) >> 0) & 0xf) | 363 | #define tAR_NDTR1(r) (((r) >> 0) & 0xf) |
364 | 364 | ||
365 | /* convert nano-seconds to nand flash controller clock cycles */ | 365 | /* convert nano-seconds to nand flash controller clock cycles */ |
366 | #define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) | 366 | #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) |
367 | 367 | ||
368 | /* convert nand flash controller clock cycles to nano-seconds */ | 368 | /* convert nand flash controller clock cycles to nano-seconds */ |
369 | #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) | 369 | #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 08e7b6abacdd..8ed30fa35d0a 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #define USB_PRODUCT_IPHONE 0x1290 | 58 | #define USB_PRODUCT_IPHONE 0x1290 |
59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | 59 | #define USB_PRODUCT_IPHONE_3G 0x1292 |
60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | 60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 |
61 | #define USB_PRODUCT_IPHONE_4 0x1297 | ||
61 | 62 | ||
62 | #define IPHETH_USBINTF_CLASS 255 | 63 | #define IPHETH_USBINTF_CLASS 255 |
63 | #define IPHETH_USBINTF_SUBCLASS 253 | 64 | #define IPHETH_USBINTF_SUBCLASS 253 |
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = { | |||
92 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, | 93 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, |
93 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | 94 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, |
94 | IPHETH_USBINTF_PROTO) }, | 95 | IPHETH_USBINTF_PROTO) }, |
96 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
97 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, | ||
98 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
99 | IPHETH_USBINTF_PROTO) }, | ||
95 | { } | 100 | { } |
96 | }; | 101 | }; |
97 | MODULE_DEVICE_TABLE(usb, ipheth_table); | 102 | MODULE_DEVICE_TABLE(usb, ipheth_table); |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 0d5de2574dd1..373dcfec689c 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/netdevice.h> | 48 | #include <linux/netdevice.h> |
49 | #include <linux/cache.h> | 49 | #include <linux/cache.h> |
50 | #include <linux/pci.h> | 50 | #include <linux/pci.h> |
51 | #include <linux/pci-aspm.h> | ||
51 | #include <linux/ethtool.h> | 52 | #include <linux/ethtool.h> |
52 | #include <linux/uaccess.h> | 53 | #include <linux/uaccess.h> |
53 | #include <linux/slab.h> | 54 | #include <linux/slab.h> |
@@ -476,6 +477,26 @@ ath5k_pci_probe(struct pci_dev *pdev, | |||
476 | int ret; | 477 | int ret; |
477 | u8 csz; | 478 | u8 csz; |
478 | 479 | ||
480 | /* | ||
481 | * L0s needs to be disabled on all ath5k cards. | ||
482 | * | ||
483 | * For distributions shipping with CONFIG_PCIEASPM (this will be enabled | ||
484 | * by default in the future in 2.6.36) this will also mean both L1 and | ||
485 | * L0s will be disabled when a pre 1.1 PCIe device is detected. We do | ||
486 | * know L1 works correctly even for all ath5k pre 1.1 PCIe devices | ||
487 | * though but cannot currently undue the effect of a blacklist, for | ||
488 | * details you can read pcie_aspm_sanity_check() and see how it adjusts | ||
489 | * the device link capability. | ||
490 | * | ||
491 | * It may be possible in the future to implement some PCI API to allow | ||
492 | * drivers to override blacklists for pre 1.1 PCIe but for now it is | ||
493 | * best to accept that both L0s and L1 will be disabled completely for | ||
494 | * distributions shipping with CONFIG_PCIEASPM rather than having this | ||
495 | * issue present. Motivation for adding this new API will be to help | ||
496 | * with power consumption for some of these devices. | ||
497 | */ | ||
498 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); | ||
499 | |||
479 | ret = pci_enable_device(pdev); | 500 | ret = pci_enable_device(pdev); |
480 | if (ret) { | 501 | if (ret) { |
481 | dev_err(&pdev->dev, "can't enable device\n"); | 502 | dev_err(&pdev->dev, "can't enable device\n"); |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 8750c558c221..7f48df1e2903 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h | |||
@@ -191,6 +191,7 @@ | |||
191 | #define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1 | 191 | #define AR9287_EEP_NO_BACK_VER AR9287_EEP_MINOR_VER_1 |
192 | 192 | ||
193 | #define AR9287_EEP_START_LOC 128 | 193 | #define AR9287_EEP_START_LOC 128 |
194 | #define AR9287_HTC_EEP_START_LOC 256 | ||
194 | #define AR9287_NUM_2G_CAL_PIERS 3 | 195 | #define AR9287_NUM_2G_CAL_PIERS 3 |
195 | #define AR9287_NUM_2G_CCK_TARGET_POWERS 3 | 196 | #define AR9287_NUM_2G_CCK_TARGET_POWERS 3 |
196 | #define AR9287_NUM_2G_20_TARGET_POWERS 3 | 197 | #define AR9287_NUM_2G_20_TARGET_POWERS 3 |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 4a52cf03808b..dff2da777312 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c | |||
@@ -34,9 +34,14 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) | |||
34 | struct ar9287_eeprom *eep = &ah->eeprom.map9287; | 34 | struct ar9287_eeprom *eep = &ah->eeprom.map9287; |
35 | struct ath_common *common = ath9k_hw_common(ah); | 35 | struct ath_common *common = ath9k_hw_common(ah); |
36 | u16 *eep_data; | 36 | u16 *eep_data; |
37 | int addr, eep_start_loc = AR9287_EEP_START_LOC; | 37 | int addr, eep_start_loc; |
38 | eep_data = (u16 *)eep; | 38 | eep_data = (u16 *)eep; |
39 | 39 | ||
40 | if (ah->hw_version.devid == 0x7015) | ||
41 | eep_start_loc = AR9287_HTC_EEP_START_LOC; | ||
42 | else | ||
43 | eep_start_loc = AR9287_EEP_START_LOC; | ||
44 | |||
40 | if (!ath9k_hw_use_flash(ah)) { | 45 | if (!ath9k_hw_use_flash(ah)) { |
41 | ath_print(common, ATH_DBG_EEPROM, | 46 | ath_print(common, ATH_DBG_EEPROM, |
42 | "Reading from EEPROM, not flash\n"); | 47 | "Reading from EEPROM, not flash\n"); |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 61c1bee3f26a..17e7a9a367e7 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -799,7 +799,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) | |||
799 | } | 799 | } |
800 | kfree(buf); | 800 | kfree(buf); |
801 | 801 | ||
802 | if (hif_dev->device_id == 0x7010) | 802 | if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015)) |
803 | firm_offset = AR7010_FIRMWARE_TEXT; | 803 | firm_offset = AR7010_FIRMWARE_TEXT; |
804 | else | 804 | else |
805 | firm_offset = AR9271_FIRMWARE_TEXT; | 805 | firm_offset = AR9271_FIRMWARE_TEXT; |
@@ -901,6 +901,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, | |||
901 | 901 | ||
902 | switch(hif_dev->device_id) { | 902 | switch(hif_dev->device_id) { |
903 | case 0x7010: | 903 | case 0x7010: |
904 | case 0x7015: | ||
904 | case 0x9018: | 905 | case 0x9018: |
905 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) | 906 | if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) |
906 | hif_dev->fw_name = FIRMWARE_AR7010_1_1; | 907 | hif_dev->fw_name = FIRMWARE_AR7010_1_1; |
@@ -912,11 +913,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, | |||
912 | break; | 913 | break; |
913 | } | 914 | } |
914 | 915 | ||
915 | if (!hif_dev->fw_name) { | ||
916 | dev_err(&udev->dev, "Can't determine firmware !\n"); | ||
917 | goto err_htc_hw_alloc; | ||
918 | } | ||
919 | |||
920 | ret = ath9k_hif_usb_dev_init(hif_dev); | 916 | ret = ath9k_hif_usb_dev_init(hif_dev); |
921 | if (ret) { | 917 | if (ret) { |
922 | ret = -EINVAL; | 918 | ret = -EINVAL; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 148b43317fdb..2d4279191d7a 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -245,6 +245,7 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) | |||
245 | 245 | ||
246 | switch(devid) { | 246 | switch(devid) { |
247 | case 0x7010: | 247 | case 0x7010: |
248 | case 0x7015: | ||
248 | case 0x9018: | 249 | case 0x9018: |
249 | priv->htc->credits = 45; | 250 | priv->htc->credits = 45; |
250 | break; | 251 | break; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index ebed9d1691a5..7d09b4b17bbd 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -366,7 +366,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, | |||
366 | caps = WLAN_RC_HT_FLAG; | 366 | caps = WLAN_RC_HT_FLAG; |
367 | if (sta->ht_cap.mcs.rx_mask[1]) | 367 | if (sta->ht_cap.mcs.rx_mask[1]) |
368 | caps |= WLAN_RC_DS_FLAG; | 368 | caps |= WLAN_RC_DS_FLAG; |
369 | if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) | 369 | if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && |
370 | (conf_is_ht40(&priv->hw->conf))) | ||
370 | caps |= WLAN_RC_40_FLAG; | 371 | caps |= WLAN_RC_40_FLAG; |
371 | if (conf_is_ht40(&priv->hw->conf) && | 372 | if (conf_is_ht40(&priv->hw->conf) && |
372 | (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) | 373 | (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bd0b4acc3ece..2a6e45a293a9 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -78,18 +78,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
78 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 78 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
79 | struct ieee80211_sta *sta = tx_info->control.sta; | 79 | struct ieee80211_sta *sta = tx_info->control.sta; |
80 | struct ath9k_htc_sta *ista; | 80 | struct ath9k_htc_sta *ista; |
81 | struct ath9k_htc_vif *avp; | ||
82 | struct ath9k_htc_tx_ctl tx_ctl; | 81 | struct ath9k_htc_tx_ctl tx_ctl; |
83 | enum htc_endpoint_id epid; | 82 | enum htc_endpoint_id epid; |
84 | u16 qnum; | 83 | u16 qnum; |
85 | __le16 fc; | 84 | __le16 fc; |
86 | u8 *tx_fhdr; | 85 | u8 *tx_fhdr; |
87 | u8 sta_idx; | 86 | u8 sta_idx, vif_idx; |
88 | 87 | ||
89 | hdr = (struct ieee80211_hdr *) skb->data; | 88 | hdr = (struct ieee80211_hdr *) skb->data; |
90 | fc = hdr->frame_control; | 89 | fc = hdr->frame_control; |
91 | 90 | ||
92 | avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv; | 91 | if (tx_info->control.vif && |
92 | (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) | ||
93 | vif_idx = ((struct ath9k_htc_vif *) | ||
94 | tx_info->control.vif->drv_priv)->index; | ||
95 | else | ||
96 | vif_idx = priv->nvifs; | ||
97 | |||
93 | if (sta) { | 98 | if (sta) { |
94 | ista = (struct ath9k_htc_sta *) sta->drv_priv; | 99 | ista = (struct ath9k_htc_sta *) sta->drv_priv; |
95 | sta_idx = ista->index; | 100 | sta_idx = ista->index; |
@@ -106,7 +111,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
106 | memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); | 111 | memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); |
107 | 112 | ||
108 | tx_hdr.node_idx = sta_idx; | 113 | tx_hdr.node_idx = sta_idx; |
109 | tx_hdr.vif_idx = avp->index; | 114 | tx_hdr.vif_idx = vif_idx; |
110 | 115 | ||
111 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { | 116 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
112 | tx_ctl.type = ATH9K_HTC_AMPDU; | 117 | tx_ctl.type = ATH9K_HTC_AMPDU; |
@@ -169,7 +174,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
169 | tx_ctl.type = ATH9K_HTC_NORMAL; | 174 | tx_ctl.type = ATH9K_HTC_NORMAL; |
170 | 175 | ||
171 | mgmt_hdr.node_idx = sta_idx; | 176 | mgmt_hdr.node_idx = sta_idx; |
172 | mgmt_hdr.vif_idx = avp->index; | 177 | mgmt_hdr.vif_idx = vif_idx; |
173 | mgmt_hdr.tidno = 0; | 178 | mgmt_hdr.tidno = 0; |
174 | mgmt_hdr.flags = 0; | 179 | mgmt_hdr.flags = 0; |
175 | 180 | ||
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 633e3d949ec0..d01c4adab8d6 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h | |||
@@ -899,6 +899,7 @@ | |||
899 | 899 | ||
900 | #define AR_DEVID_7010(_ah) \ | 900 | #define AR_DEVID_7010(_ah) \ |
901 | (((_ah)->hw_version.devid == 0x7010) || \ | 901 | (((_ah)->hw_version.devid == 0x7010) || \ |
902 | ((_ah)->hw_version.devid == 0x7015) || \ | ||
902 | ((_ah)->hw_version.devid == 0x9018)) | 903 | ((_ah)->hw_version.devid == 0x9018)) |
903 | 904 | ||
904 | #define AR_RADIO_SREV_MAJOR 0xf0 | 905 | #define AR_RADIO_SREV_MAJOR 0xf0 |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 16bbfa3189a5..1189dbb6e2a6 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -6665,12 +6665,13 @@ static int __init ipw2100_init(void) | |||
6665 | printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); | 6665 | printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
6666 | printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); | 6666 | printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); |
6667 | 6667 | ||
6668 | pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
6669 | PM_QOS_DEFAULT_VALUE); | ||
6670 | |||
6668 | ret = pci_register_driver(&ipw2100_pci_driver); | 6671 | ret = pci_register_driver(&ipw2100_pci_driver); |
6669 | if (ret) | 6672 | if (ret) |
6670 | goto out; | 6673 | goto out; |
6671 | 6674 | ||
6672 | pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
6673 | PM_QOS_DEFAULT_VALUE); | ||
6674 | #ifdef CONFIG_IPW2100_DEBUG | 6675 | #ifdef CONFIG_IPW2100_DEBUG |
6675 | ipw2100_debug_level = debug; | 6676 | ipw2100_debug_level = debug; |
6676 | ret = driver_create_file(&ipw2100_pci_driver.driver, | 6677 | ret = driver_create_file(&ipw2100_pci_driver.driver, |
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c index a37b30cef489..ce3722f4c3e3 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c | |||
@@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout) | |||
484 | 484 | ||
485 | cmd->timeout = timeout; | 485 | cmd->timeout = timeout; |
486 | 486 | ||
487 | ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); | 487 | ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd)); |
488 | if (ret < 0) { | 488 | if (ret < 0) { |
489 | wl1251_error("cmd trigger scan to failed: %d", ret); | 489 | wl1251_error("cmd trigger scan to failed: %d", ret); |
490 | goto out; | 490 | goto out; |
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index e058c2ba2a15..ca05aefd03bf 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
@@ -938,10 +938,11 @@ static int set_brightness(int value) | |||
938 | /* SPLV laptop */ | 938 | /* SPLV laptop */ |
939 | if (hotk->methods->brightness_set) { | 939 | if (hotk->methods->brightness_set) { |
940 | if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set, | 940 | if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set, |
941 | value, NULL)) | 941 | value, NULL)) { |
942 | printk(KERN_WARNING | 942 | printk(KERN_WARNING |
943 | "Asus ACPI: Error changing brightness\n"); | 943 | "Asus ACPI: Error changing brightness\n"); |
944 | ret = -EIO; | 944 | ret = -EIO; |
945 | } | ||
945 | goto out; | 946 | goto out; |
946 | } | 947 | } |
947 | 948 | ||
@@ -953,10 +954,11 @@ static int set_brightness(int value) | |||
953 | hotk->methods->brightness_down, | 954 | hotk->methods->brightness_down, |
954 | NULL, NULL); | 955 | NULL, NULL); |
955 | (value > 0) ? value-- : value++; | 956 | (value > 0) ? value-- : value++; |
956 | if (ACPI_FAILURE(status)) | 957 | if (ACPI_FAILURE(status)) { |
957 | printk(KERN_WARNING | 958 | printk(KERN_WARNING |
958 | "Asus ACPI: Error changing brightness\n"); | 959 | "Asus ACPI: Error changing brightness\n"); |
959 | ret = -EIO; | 960 | ret = -EIO; |
961 | } | ||
960 | } | 962 | } |
961 | out: | 963 | out: |
962 | return ret; | 964 | return ret; |
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index d071ce056322..097083cac413 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c | |||
@@ -841,6 +841,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = { | |||
841 | .callback = dmi_check_cb | 841 | .callback = dmi_check_cb |
842 | }, | 842 | }, |
843 | { | 843 | { |
844 | .ident = "Dell Mini 1012", | ||
845 | .matches = { | ||
846 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
847 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), | ||
848 | }, | ||
849 | .callback = dmi_check_cb | ||
850 | }, | ||
851 | { | ||
844 | .ident = "Dell Inspiron 11z", | 852 | .ident = "Dell Inspiron 11z", |
845 | .matches = { | 853 | .matches = { |
846 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 854 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
@@ -1092,5 +1100,6 @@ MODULE_ALIAS("dmi:*:rnJHL90:rvrREFERENCE:*"); | |||
1092 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); | 1100 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); |
1093 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); | 1101 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); |
1094 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); | 1102 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); |
1103 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*"); | ||
1095 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); | 1104 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); |
1096 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); | 1105 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index b41ed5cab3e7..4413975912e0 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
@@ -122,6 +122,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = { | |||
122 | }, | 122 | }, |
123 | }, | 123 | }, |
124 | { | 124 | { |
125 | .ident = "Dell Mini 1012", | ||
126 | .matches = { | ||
127 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
128 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), | ||
129 | }, | ||
130 | }, | ||
131 | { | ||
125 | .ident = "Dell Inspiron 11z", | 132 | .ident = "Dell Inspiron 11z", |
126 | .matches = { | 133 | .matches = { |
127 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 134 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index afe82e50dfea..9024480a8228 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c | |||
@@ -1342,8 +1342,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) | |||
1342 | limits = &ips_lv_limits; | 1342 | limits = &ips_lv_limits; |
1343 | else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) | 1343 | else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) |
1344 | limits = &ips_ulv_limits; | 1344 | limits = &ips_ulv_limits; |
1345 | else | 1345 | else { |
1346 | dev_info(&ips->dev->dev, "No CPUID match found.\n"); | 1346 | dev_info(&ips->dev->dev, "No CPUID match found.\n"); |
1347 | goto out; | ||
1348 | } | ||
1347 | 1349 | ||
1348 | rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); | 1350 | rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); |
1349 | tdp = turbo_power & TURBO_TDP_MASK; | 1351 | tdp = turbo_power & TURBO_TDP_MASK; |
@@ -1432,6 +1434,12 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1432 | 1434 | ||
1433 | spin_lock_init(&ips->turbo_status_lock); | 1435 | spin_lock_init(&ips->turbo_status_lock); |
1434 | 1436 | ||
1437 | ret = pci_enable_device(dev); | ||
1438 | if (ret) { | ||
1439 | dev_err(&dev->dev, "can't enable PCI device, aborting\n"); | ||
1440 | goto error_free; | ||
1441 | } | ||
1442 | |||
1435 | if (!pci_resource_start(dev, 0)) { | 1443 | if (!pci_resource_start(dev, 0)) { |
1436 | dev_err(&dev->dev, "TBAR not assigned, aborting\n"); | 1444 | dev_err(&dev->dev, "TBAR not assigned, aborting\n"); |
1437 | ret = -ENXIO; | 1445 | ret = -ENXIO; |
@@ -1444,11 +1452,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1444 | goto error_free; | 1452 | goto error_free; |
1445 | } | 1453 | } |
1446 | 1454 | ||
1447 | ret = pci_enable_device(dev); | ||
1448 | if (ret) { | ||
1449 | dev_err(&dev->dev, "can't enable PCI device, aborting\n"); | ||
1450 | goto error_free; | ||
1451 | } | ||
1452 | 1455 | ||
1453 | ips->regmap = ioremap(pci_resource_start(dev, 0), | 1456 | ips->regmap = ioremap(pci_resource_start(dev, 0), |
1454 | pci_resource_len(dev, 0)); | 1457 | pci_resource_len(dev, 0)); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 5d6119bed00c..e35ed128bdef 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */ | |||
1911 | TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, | 1911 | TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, |
1912 | TP_ACPI_HOTKEYSCAN_MUTE, | 1912 | TP_ACPI_HOTKEYSCAN_MUTE, |
1913 | TP_ACPI_HOTKEYSCAN_THINKPAD, | 1913 | TP_ACPI_HOTKEYSCAN_THINKPAD, |
1914 | TP_ACPI_HOTKEYSCAN_UNK1, | ||
1915 | TP_ACPI_HOTKEYSCAN_UNK2, | ||
1916 | TP_ACPI_HOTKEYSCAN_UNK3, | ||
1917 | TP_ACPI_HOTKEYSCAN_UNK4, | ||
1918 | TP_ACPI_HOTKEYSCAN_UNK5, | ||
1919 | TP_ACPI_HOTKEYSCAN_UNK6, | ||
1920 | TP_ACPI_HOTKEYSCAN_UNK7, | ||
1921 | TP_ACPI_HOTKEYSCAN_UNK8, | ||
1922 | |||
1923 | /* Hotkey keymap size */ | ||
1924 | TPACPI_HOTKEY_MAP_LEN | ||
1914 | }; | 1925 | }; |
1915 | 1926 | ||
1916 | enum { /* Keys/events available through NVRAM polling */ | 1927 | enum { /* Keys/events available through NVRAM polling */ |
@@ -3082,6 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { | |||
3082 | TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ | 3093 | TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ |
3083 | }; | 3094 | }; |
3084 | 3095 | ||
3096 | typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; | ||
3097 | |||
3085 | static int __init hotkey_init(struct ibm_init_struct *iibm) | 3098 | static int __init hotkey_init(struct ibm_init_struct *iibm) |
3086 | { | 3099 | { |
3087 | /* Requirements for changing the default keymaps: | 3100 | /* Requirements for changing the default keymaps: |
@@ -3113,9 +3126,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3113 | * If the above is too much to ask, don't change the keymap. | 3126 | * If the above is too much to ask, don't change the keymap. |
3114 | * Ask the thinkpad-acpi maintainer to do it, instead. | 3127 | * Ask the thinkpad-acpi maintainer to do it, instead. |
3115 | */ | 3128 | */ |
3116 | static u16 ibm_keycode_map[] __initdata = { | 3129 | |
3130 | enum keymap_index { | ||
3131 | TPACPI_KEYMAP_IBM_GENERIC = 0, | ||
3132 | TPACPI_KEYMAP_LENOVO_GENERIC, | ||
3133 | }; | ||
3134 | |||
3135 | static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = { | ||
3136 | /* Generic keymap for IBM ThinkPads */ | ||
3137 | [TPACPI_KEYMAP_IBM_GENERIC] = { | ||
3117 | /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ | 3138 | /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ |
3118 | KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, | 3139 | KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP, |
3119 | KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, | 3140 | KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, |
3120 | KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, | 3141 | KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, |
3121 | 3142 | ||
@@ -3146,11 +3167,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3146 | /* (assignments unknown, please report if found) */ | 3167 | /* (assignments unknown, please report if found) */ |
3147 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3168 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, |
3148 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3169 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, |
3149 | }; | 3170 | }, |
3150 | static u16 lenovo_keycode_map[] __initdata = { | 3171 | |
3172 | /* Generic keymap for Lenovo ThinkPads */ | ||
3173 | [TPACPI_KEYMAP_LENOVO_GENERIC] = { | ||
3151 | /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ | 3174 | /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ |
3152 | KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, | 3175 | KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, |
3153 | KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, | 3176 | KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8, |
3154 | KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, | 3177 | KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, |
3155 | 3178 | ||
3156 | /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ | 3179 | /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ |
@@ -3189,11 +3212,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3189 | /* (assignments unknown, please report if found) */ | 3212 | /* (assignments unknown, please report if found) */ |
3190 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3213 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, |
3191 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, | 3214 | KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, |
3215 | }, | ||
3216 | }; | ||
3217 | |||
3218 | static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = { | ||
3219 | /* Generic maps (fallback) */ | ||
3220 | { | ||
3221 | .vendor = PCI_VENDOR_ID_IBM, | ||
3222 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
3223 | .quirks = TPACPI_KEYMAP_IBM_GENERIC, | ||
3224 | }, | ||
3225 | { | ||
3226 | .vendor = PCI_VENDOR_ID_LENOVO, | ||
3227 | .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, | ||
3228 | .quirks = TPACPI_KEYMAP_LENOVO_GENERIC, | ||
3229 | }, | ||
3192 | }; | 3230 | }; |
3193 | 3231 | ||
3194 | #define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) | 3232 | #define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) |
3195 | #define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) | 3233 | #define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) |
3196 | #define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0]) | ||
3197 | 3234 | ||
3198 | int res, i; | 3235 | int res, i; |
3199 | int status; | 3236 | int status; |
@@ -3202,6 +3239,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3202 | bool tabletsw_state = false; | 3239 | bool tabletsw_state = false; |
3203 | 3240 | ||
3204 | unsigned long quirks; | 3241 | unsigned long quirks; |
3242 | unsigned long keymap_id; | ||
3205 | 3243 | ||
3206 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, | 3244 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, |
3207 | "initializing hotkey subdriver\n"); | 3245 | "initializing hotkey subdriver\n"); |
@@ -3342,7 +3380,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3342 | goto err_exit; | 3380 | goto err_exit; |
3343 | 3381 | ||
3344 | /* Set up key map */ | 3382 | /* Set up key map */ |
3345 | |||
3346 | hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, | 3383 | hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, |
3347 | GFP_KERNEL); | 3384 | GFP_KERNEL); |
3348 | if (!hotkey_keycode_map) { | 3385 | if (!hotkey_keycode_map) { |
@@ -3352,17 +3389,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3352 | goto err_exit; | 3389 | goto err_exit; |
3353 | } | 3390 | } |
3354 | 3391 | ||
3355 | if (tpacpi_is_lenovo()) { | 3392 | keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable, |
3356 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, | 3393 | ARRAY_SIZE(tpacpi_keymap_qtable)); |
3357 | "using Lenovo default hot key map\n"); | 3394 | BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps)); |
3358 | memcpy(hotkey_keycode_map, &lenovo_keycode_map, | 3395 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, |
3359 | TPACPI_HOTKEY_MAP_SIZE); | 3396 | "using keymap number %lu\n", keymap_id); |
3360 | } else { | 3397 | |
3361 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, | 3398 | memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id], |
3362 | "using IBM default hot key map\n"); | 3399 | TPACPI_HOTKEY_MAP_SIZE); |
3363 | memcpy(hotkey_keycode_map, &ibm_keycode_map, | ||
3364 | TPACPI_HOTKEY_MAP_SIZE); | ||
3365 | } | ||
3366 | 3400 | ||
3367 | input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); | 3401 | input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); |
3368 | tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; | 3402 | tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; |
@@ -3469,7 +3503,8 @@ static bool hotkey_notify_hotkey(const u32 hkey, | |||
3469 | *send_acpi_ev = true; | 3503 | *send_acpi_ev = true; |
3470 | *ignore_acpi_ev = false; | 3504 | *ignore_acpi_ev = false; |
3471 | 3505 | ||
3472 | if (scancode > 0 && scancode < 0x21) { | 3506 | /* HKEY event 0x1001 is scancode 0x00 */ |
3507 | if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) { | ||
3473 | scancode--; | 3508 | scancode--; |
3474 | if (!(hotkey_source_mask & (1 << scancode))) { | 3509 | if (!(hotkey_source_mask & (1 << scancode))) { |
3475 | tpacpi_input_send_key_masked(scancode); | 3510 | tpacpi_input_send_key_masked(scancode); |
@@ -6080,13 +6115,18 @@ static struct backlight_ops ibm_backlight_data = { | |||
6080 | 6115 | ||
6081 | /* --------------------------------------------------------------------- */ | 6116 | /* --------------------------------------------------------------------- */ |
6082 | 6117 | ||
6118 | /* | ||
6119 | * Call _BCL method of video device. On some ThinkPads this will | ||
6120 | * switch the firmware to the ACPI brightness control mode. | ||
6121 | */ | ||
6122 | |||
6083 | static int __init tpacpi_query_bcl_levels(acpi_handle handle) | 6123 | static int __init tpacpi_query_bcl_levels(acpi_handle handle) |
6084 | { | 6124 | { |
6085 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 6125 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
6086 | union acpi_object *obj; | 6126 | union acpi_object *obj; |
6087 | int rc; | 6127 | int rc; |
6088 | 6128 | ||
6089 | if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) { | 6129 | if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) { |
6090 | obj = (union acpi_object *)buffer.pointer; | 6130 | obj = (union acpi_object *)buffer.pointer; |
6091 | if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { | 6131 | if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { |
6092 | printk(TPACPI_ERR "Unknown _BCL data, " | 6132 | printk(TPACPI_ERR "Unknown _BCL data, " |
@@ -6103,55 +6143,22 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) | |||
6103 | return rc; | 6143 | return rc; |
6104 | } | 6144 | } |
6105 | 6145 | ||
6106 | static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle, | ||
6107 | u32 lvl, void *context, void **rv) | ||
6108 | { | ||
6109 | char name[ACPI_PATH_SEGMENT_LENGTH]; | ||
6110 | struct acpi_buffer buffer = { sizeof(name), &name }; | ||
6111 | |||
6112 | if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) && | ||
6113 | !strncmp("_BCL", name, sizeof(name) - 1)) { | ||
6114 | BUG_ON(!rv || !*rv); | ||
6115 | **(int **)rv = tpacpi_query_bcl_levels(handle); | ||
6116 | return AE_CTRL_TERMINATE; | ||
6117 | } else { | ||
6118 | return AE_OK; | ||
6119 | } | ||
6120 | } | ||
6121 | 6146 | ||
6122 | /* | 6147 | /* |
6123 | * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map | 6148 | * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map |
6124 | */ | 6149 | */ |
6125 | static unsigned int __init tpacpi_check_std_acpi_brightness_support(void) | 6150 | static unsigned int __init tpacpi_check_std_acpi_brightness_support(void) |
6126 | { | 6151 | { |
6127 | int status; | 6152 | acpi_handle video_device; |
6128 | int bcl_levels = 0; | 6153 | int bcl_levels = 0; |
6129 | void *bcl_ptr = &bcl_levels; | ||
6130 | |||
6131 | if (!vid_handle) | ||
6132 | TPACPI_ACPIHANDLE_INIT(vid); | ||
6133 | |||
6134 | if (!vid_handle) | ||
6135 | return 0; | ||
6136 | |||
6137 | /* | ||
6138 | * Search for a _BCL method, and execute it. This is safe on all | ||
6139 | * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista | ||
6140 | * BIOS in ACPI backlight control mode. We do NOT have to care | ||
6141 | * about calling the _BCL method in an enabled video device, any | ||
6142 | * will do for our purposes. | ||
6143 | */ | ||
6144 | 6154 | ||
6145 | status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3, | 6155 | tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device); |
6146 | tpacpi_acpi_walk_find_bcl, NULL, NULL, | 6156 | if (video_device) |
6147 | &bcl_ptr); | 6157 | bcl_levels = tpacpi_query_bcl_levels(video_device); |
6148 | 6158 | ||
6149 | if (ACPI_SUCCESS(status) && bcl_levels > 2) { | 6159 | tp_features.bright_acpimode = (bcl_levels > 0); |
6150 | tp_features.bright_acpimode = 1; | ||
6151 | return bcl_levels - 2; | ||
6152 | } | ||
6153 | 6160 | ||
6154 | return 0; | 6161 | return (bcl_levels > 2) ? (bcl_levels - 2) : 0; |
6155 | } | 6162 | } |
6156 | 6163 | ||
6157 | /* | 6164 | /* |
@@ -6244,28 +6251,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
6244 | if (tp_features.bright_unkfw) | 6251 | if (tp_features.bright_unkfw) |
6245 | return 1; | 6252 | return 1; |
6246 | 6253 | ||
6247 | if (tp_features.bright_acpimode) { | ||
6248 | if (acpi_video_backlight_support()) { | ||
6249 | if (brightness_enable > 1) { | ||
6250 | printk(TPACPI_NOTICE | ||
6251 | "Standard ACPI backlight interface " | ||
6252 | "available, not loading native one.\n"); | ||
6253 | return 1; | ||
6254 | } else if (brightness_enable == 1) { | ||
6255 | printk(TPACPI_NOTICE | ||
6256 | "Backlight control force enabled, even if standard " | ||
6257 | "ACPI backlight interface is available\n"); | ||
6258 | } | ||
6259 | } else { | ||
6260 | if (brightness_enable > 1) { | ||
6261 | printk(TPACPI_NOTICE | ||
6262 | "Standard ACPI backlight interface not " | ||
6263 | "available, thinkpad_acpi native " | ||
6264 | "brightness control enabled\n"); | ||
6265 | } | ||
6266 | } | ||
6267 | } | ||
6268 | |||
6269 | if (!brightness_enable) { | 6254 | if (!brightness_enable) { |
6270 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, | 6255 | dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, |
6271 | "brightness support disabled by " | 6256 | "brightness support disabled by " |
@@ -6273,6 +6258,26 @@ static int __init brightness_init(struct ibm_init_struct *iibm) | |||
6273 | return 1; | 6258 | return 1; |
6274 | } | 6259 | } |
6275 | 6260 | ||
6261 | if (acpi_video_backlight_support()) { | ||
6262 | if (brightness_enable > 1) { | ||
6263 | printk(TPACPI_INFO | ||
6264 | "Standard ACPI backlight interface " | ||
6265 | "available, not loading native one.\n"); | ||
6266 | return 1; | ||
6267 | } else if (brightness_enable == 1) { | ||
6268 | printk(TPACPI_WARN | ||
6269 | "Cannot enable backlight brightness support, " | ||
6270 | "ACPI is already handling it. Refer to the " | ||
6271 | "acpi_backlight kernel parameter\n"); | ||
6272 | return 1; | ||
6273 | } | ||
6274 | } else if (tp_features.bright_acpimode && brightness_enable > 1) { | ||
6275 | printk(TPACPI_NOTICE | ||
6276 | "Standard ACPI backlight interface not " | ||
6277 | "available, thinkpad_acpi native " | ||
6278 | "brightness control enabled\n"); | ||
6279 | } | ||
6280 | |||
6276 | /* | 6281 | /* |
6277 | * Check for module parameter bogosity, note that we | 6282 | * Check for module parameter bogosity, note that we |
6278 | * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be | 6283 | * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 95a895dd4f13..c8dc392edd57 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <linux/delay.h> | 56 | #include <linux/delay.h> |
57 | #include <linux/dma-mapping.h> | 57 | #include <linux/dma-mapping.h> |
58 | #include <linux/timer.h> | 58 | #include <linux/timer.h> |
59 | #include <linux/slab.h> | ||
59 | #include <linux/pci.h> | 60 | #include <linux/pci.h> |
60 | #include <linux/aer.h> | 61 | #include <linux/aer.h> |
61 | #include <asm/dma.h> | 62 | #include <asm/dma.h> |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index f065204e401b..95a26fb1626c 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -132,7 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha); | |||
132 | int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); | 132 | int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); |
133 | void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); | 133 | void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); |
134 | void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); | 134 | void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); |
135 | inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); | 135 | void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); |
136 | 136 | ||
137 | extern int ql4xextended_error_logging; | 137 | extern int ql4xextended_error_logging; |
138 | extern int ql4xdiscoverywait; | 138 | extern int ql4xdiscoverywait; |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index e031a734836e..5d4a3822382d 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha) | |||
1418 | return QLA_SUCCESS; | 1418 | return QLA_SUCCESS; |
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | inline void | 1421 | void |
1422 | qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) | 1422 | qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) |
1423 | { | 1423 | { |
1424 | uint32_t drv_active; | 1424 | uint32_t drv_active; |
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 659a695bdad6..2af8fd113123 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c | |||
@@ -14,11 +14,10 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/serial_core.h> | 15 | #include <linux/serial_core.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | #include <linux/of_address.h> | ||
17 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
18 | #include <linux/nwpserial.h> | 19 | #include <linux/nwpserial.h> |
19 | 20 | ||
20 | #include <asm/prom.h> | ||
21 | |||
22 | struct of_serial_info { | 21 | struct of_serial_info { |
23 | int type; | 22 | int type; |
24 | int line; | 23 | int line; |
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c index 544f2e25d0e5..6381a0282ee7 100644 --- a/drivers/serial/suncore.c +++ b/drivers/serial/suncore.c | |||
@@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors); | |||
55 | int sunserial_console_match(struct console *con, struct device_node *dp, | 55 | int sunserial_console_match(struct console *con, struct device_node *dp, |
56 | struct uart_driver *drv, int line, bool ignore_line) | 56 | struct uart_driver *drv, int line, bool ignore_line) |
57 | { | 57 | { |
58 | if (!con || of_console_device != dp) | 58 | if (!con) |
59 | return 0; | ||
60 | |||
61 | drv->cons = con; | ||
62 | |||
63 | if (of_console_device != dp) | ||
59 | return 0; | 64 | return 0; |
60 | 65 | ||
61 | if (!ignore_line) { | 66 | if (!ignore_line) { |
@@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp, | |||
69 | return 0; | 74 | return 0; |
70 | } | 75 | } |
71 | 76 | ||
72 | con->index = line; | 77 | if (!console_set_on_cmdline) { |
73 | drv->cons = con; | 78 | con->index = line; |
74 | |||
75 | if (!console_set_on_cmdline) | ||
76 | add_preferred_console(con->name, line, NULL); | 79 | add_preferred_console(con->name, line, NULL); |
77 | 80 | } | |
78 | return 1; | 81 | return 1; |
79 | } | 82 | } |
80 | EXPORT_SYMBOL(sunserial_console_match); | 83 | EXPORT_SYMBOL(sunserial_console_match); |
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c index 59be3efe0636..052b3c7fa6a0 100644 --- a/drivers/spi/coldfire_qspi.c +++ b/drivers/spi/coldfire_qspi.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/sched.h> | ||
27 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
28 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
29 | #include <linux/io.h> | 30 | #include <linux/io.h> |
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c index cdc4dd50d638..8ec83d2dffb7 100644 --- a/drivers/staging/pohmelfs/path_entry.c +++ b/drivers/staging/pohmelfs/path_entry.c | |||
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le | |||
44 | return -ENOENT; | 44 | return -ENOENT; |
45 | } | 45 | } |
46 | 46 | ||
47 | read_lock(¤t->fs->lock); | 47 | spin_lock(¤t->fs->lock); |
48 | path.mnt = mntget(current->fs->root.mnt); | 48 | path.mnt = mntget(current->fs->root.mnt); |
49 | read_unlock(¤t->fs->lock); | 49 | spin_unlock(¤t->fs->lock); |
50 | 50 | ||
51 | path.dentry = d; | 51 | path.dentry = d; |
52 | 52 | ||
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi) | |||
91 | return -ENOENT; | 91 | return -ENOENT; |
92 | } | 92 | } |
93 | 93 | ||
94 | read_lock(¤t->fs->lock); | 94 | spin_lock(¤t->fs->lock); |
95 | root = dget(current->fs->root.dentry); | 95 | root = dget(current->fs->root.dentry); |
96 | read_unlock(¤t->fs->lock); | 96 | spin_unlock(¤t->fs->lock); |
97 | 97 | ||
98 | spin_lock(&dcache_lock); | 98 | spin_lock(&dcache_lock); |
99 | 99 | ||
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index afe21e6eb544..1c2c68356ea7 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c | |||
@@ -80,7 +80,10 @@ static void clcdfb_disable(struct clcd_fb *fb) | |||
80 | /* | 80 | /* |
81 | * Disable CLCD clock source. | 81 | * Disable CLCD clock source. |
82 | */ | 82 | */ |
83 | clk_disable(fb->clk); | 83 | if (fb->clk_enabled) { |
84 | fb->clk_enabled = false; | ||
85 | clk_disable(fb->clk); | ||
86 | } | ||
84 | } | 87 | } |
85 | 88 | ||
86 | static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) | 89 | static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) |
@@ -88,7 +91,10 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) | |||
88 | /* | 91 | /* |
89 | * Enable the CLCD clock source. | 92 | * Enable the CLCD clock source. |
90 | */ | 93 | */ |
91 | clk_enable(fb->clk); | 94 | if (!fb->clk_enabled) { |
95 | fb->clk_enabled = true; | ||
96 | clk_enable(fb->clk); | ||
97 | } | ||
92 | 98 | ||
93 | /* | 99 | /* |
94 | * Bring up by first enabling.. | 100 | * Bring up by first enabling.. |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h index f3a4e15672d9..f96a471cb1a8 100644 --- a/drivers/video/matrox/matroxfb_base.h +++ b/drivers/video/matrox/matroxfb_base.h | |||
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) { | |||
151 | static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { | 151 | static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { |
152 | #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) | 152 | #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) |
153 | /* | 153 | /* |
154 | * memcpy_toio works for us if: | 154 | * iowrite32_rep works for us if: |
155 | * (1) Copies data as 32bit quantities, not byte after byte, | 155 | * (1) Copies data as 32bit quantities, not byte after byte, |
156 | * (2) Performs LE ordered stores, and | 156 | * (2) Performs LE ordered stores, and |
157 | * (3) It copes with unaligned source (destination is guaranteed to be page | 157 | * (3) It copes with unaligned source (destination is guaranteed to be page |
158 | * aligned and length is guaranteed to be multiple of 4). | 158 | * aligned and length is guaranteed to be multiple of 4). |
159 | */ | 159 | */ |
160 | memcpy_toio(va.vaddr, src, len); | 160 | iowrite32_rep(va.vaddr, src, len >> 2); |
161 | #else | 161 | #else |
162 | u_int32_t __iomem* addr = va.vaddr; | 162 | u_int32_t __iomem* addr = va.vaddr; |
163 | 163 | ||
diff --git a/firmware/Makefile b/firmware/Makefile index b27f09f05d17..9c2d19452d0b 100644 --- a/firmware/Makefile +++ b/firmware/Makefile | |||
@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin | |||
142 | fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) | 142 | fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) |
143 | 143 | ||
144 | # Directories which we _might_ need to create, so we have a rule for them. | 144 | # Directories which we _might_ need to create, so we have a rule for them. |
145 | firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all)))) | 145 | firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) |
146 | 146 | ||
147 | quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) | 147 | quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) |
148 | cmd_mkdir = mkdir -p $@ | 148 | cmd_mkdir = mkdir -p $@ |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 9e60fd201716..a7528b913936 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
@@ -108,7 +108,7 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
108 | Node *fmt; | 108 | Node *fmt; |
109 | struct file * interp_file = NULL; | 109 | struct file * interp_file = NULL; |
110 | char iname[BINPRM_BUF_SIZE]; | 110 | char iname[BINPRM_BUF_SIZE]; |
111 | char *iname_addr = iname; | 111 | const char *iname_addr = iname; |
112 | int retval; | 112 | int retval; |
113 | int fd_binary = -1; | 113 | int fd_binary = -1; |
114 | 114 | ||
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index aca9d55afb22..396a9884591f 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -16,7 +16,8 @@ | |||
16 | 16 | ||
17 | static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) | 17 | static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) |
18 | { | 18 | { |
19 | char *cp, *i_name, *i_arg; | 19 | const char *i_arg, *i_name; |
20 | char *cp; | ||
20 | struct file *file; | 21 | struct file *file; |
21 | char interp[BINPRM_BUF_SIZE]; | 22 | char interp[BINPRM_BUF_SIZE]; |
22 | int retval; | 23 | int retval; |
diff --git a/fs/buffer.c b/fs/buffer.c index 50efa339e051..3e7dca279d1c 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -770,11 +770,12 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
770 | spin_unlock(lock); | 770 | spin_unlock(lock); |
771 | /* | 771 | /* |
772 | * Ensure any pending I/O completes so that | 772 | * Ensure any pending I/O completes so that |
773 | * ll_rw_block() actually writes the current | 773 | * write_dirty_buffer() actually writes the |
774 | * contents - it is a noop if I/O is still in | 774 | * current contents - it is a noop if I/O is |
775 | * flight on potentially older contents. | 775 | * still in flight on potentially older |
776 | * contents. | ||
776 | */ | 777 | */ |
777 | ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); | 778 | write_dirty_buffer(bh, WRITE_SYNC_PLUG); |
778 | 779 | ||
779 | /* | 780 | /* |
780 | * Kick off IO for the previous mapping. Note | 781 | * Kick off IO for the previous mapping. Note |
@@ -2912,13 +2913,6 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
2912 | BUG_ON(buffer_unwritten(bh)); | 2913 | BUG_ON(buffer_unwritten(bh)); |
2913 | 2914 | ||
2914 | /* | 2915 | /* |
2915 | * Mask in barrier bit for a write (could be either a WRITE or a | ||
2916 | * WRITE_SYNC | ||
2917 | */ | ||
2918 | if (buffer_ordered(bh) && (rw & WRITE)) | ||
2919 | rw |= WRITE_BARRIER; | ||
2920 | |||
2921 | /* | ||
2922 | * Only clear out a write error when rewriting | 2916 | * Only clear out a write error when rewriting |
2923 | */ | 2917 | */ |
2924 | if (test_set_buffer_req(bh) && (rw & WRITE)) | 2918 | if (test_set_buffer_req(bh) && (rw & WRITE)) |
@@ -2956,22 +2950,21 @@ EXPORT_SYMBOL(submit_bh); | |||
2956 | 2950 | ||
2957 | /** | 2951 | /** |
2958 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 2952 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
2959 | * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead) | 2953 | * @rw: whether to %READ or %WRITE or maybe %READA (readahead) |
2960 | * @nr: number of &struct buffer_heads in the array | 2954 | * @nr: number of &struct buffer_heads in the array |
2961 | * @bhs: array of pointers to &struct buffer_head | 2955 | * @bhs: array of pointers to &struct buffer_head |
2962 | * | 2956 | * |
2963 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, and | 2957 | * ll_rw_block() takes an array of pointers to &struct buffer_heads, and |
2964 | * requests an I/O operation on them, either a %READ or a %WRITE. The third | 2958 | * requests an I/O operation on them, either a %READ or a %WRITE. The third |
2965 | * %SWRITE is like %WRITE only we make sure that the *current* data in buffers | 2959 | * %READA option is described in the documentation for generic_make_request() |
2966 | * are sent to disk. The fourth %READA option is described in the documentation | 2960 | * which ll_rw_block() calls. |
2967 | * for generic_make_request() which ll_rw_block() calls. | ||
2968 | * | 2961 | * |
2969 | * This function drops any buffer that it cannot get a lock on (with the | 2962 | * This function drops any buffer that it cannot get a lock on (with the |
2970 | * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be | 2963 | * BH_Lock state bit), any buffer that appears to be clean when doing a write |
2971 | * clean when doing a write request, and any buffer that appears to be | 2964 | * request, and any buffer that appears to be up-to-date when doing read |
2972 | * up-to-date when doing read request. Further it marks as clean buffers that | 2965 | * request. Further it marks as clean buffers that are processed for |
2973 | * are processed for writing (the buffer cache won't assume that they are | 2966 | * writing (the buffer cache won't assume that they are actually clean |
2974 | * actually clean until the buffer gets unlocked). | 2967 | * until the buffer gets unlocked). |
2975 | * | 2968 | * |
2976 | * ll_rw_block sets b_end_io to simple completion handler that marks | 2969 | * ll_rw_block sets b_end_io to simple completion handler that marks |
2977 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes | 2970 | * the buffer up-to-date (if approriate), unlocks the buffer and wakes |
@@ -2987,20 +2980,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
2987 | for (i = 0; i < nr; i++) { | 2980 | for (i = 0; i < nr; i++) { |
2988 | struct buffer_head *bh = bhs[i]; | 2981 | struct buffer_head *bh = bhs[i]; |
2989 | 2982 | ||
2990 | if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) | 2983 | if (!trylock_buffer(bh)) |
2991 | lock_buffer(bh); | ||
2992 | else if (!trylock_buffer(bh)) | ||
2993 | continue; | 2984 | continue; |
2994 | 2985 | if (rw == WRITE) { | |
2995 | if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || | ||
2996 | rw == SWRITE_SYNC_PLUG) { | ||
2997 | if (test_clear_buffer_dirty(bh)) { | 2986 | if (test_clear_buffer_dirty(bh)) { |
2998 | bh->b_end_io = end_buffer_write_sync; | 2987 | bh->b_end_io = end_buffer_write_sync; |
2999 | get_bh(bh); | 2988 | get_bh(bh); |
3000 | if (rw == SWRITE_SYNC) | 2989 | submit_bh(WRITE, bh); |
3001 | submit_bh(WRITE_SYNC, bh); | ||
3002 | else | ||
3003 | submit_bh(WRITE, bh); | ||
3004 | continue; | 2990 | continue; |
3005 | } | 2991 | } |
3006 | } else { | 2992 | } else { |
@@ -3016,12 +3002,25 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3016 | } | 3002 | } |
3017 | EXPORT_SYMBOL(ll_rw_block); | 3003 | EXPORT_SYMBOL(ll_rw_block); |
3018 | 3004 | ||
3005 | void write_dirty_buffer(struct buffer_head *bh, int rw) | ||
3006 | { | ||
3007 | lock_buffer(bh); | ||
3008 | if (!test_clear_buffer_dirty(bh)) { | ||
3009 | unlock_buffer(bh); | ||
3010 | return; | ||
3011 | } | ||
3012 | bh->b_end_io = end_buffer_write_sync; | ||
3013 | get_bh(bh); | ||
3014 | submit_bh(rw, bh); | ||
3015 | } | ||
3016 | EXPORT_SYMBOL(write_dirty_buffer); | ||
3017 | |||
3019 | /* | 3018 | /* |
3020 | * For a data-integrity writeout, we need to wait upon any in-progress I/O | 3019 | * For a data-integrity writeout, we need to wait upon any in-progress I/O |
3021 | * and then start new I/O and then wait upon it. The caller must have a ref on | 3020 | * and then start new I/O and then wait upon it. The caller must have a ref on |
3022 | * the buffer_head. | 3021 | * the buffer_head. |
3023 | */ | 3022 | */ |
3024 | int sync_dirty_buffer(struct buffer_head *bh) | 3023 | int __sync_dirty_buffer(struct buffer_head *bh, int rw) |
3025 | { | 3024 | { |
3026 | int ret = 0; | 3025 | int ret = 0; |
3027 | 3026 | ||
@@ -3030,7 +3029,7 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
3030 | if (test_clear_buffer_dirty(bh)) { | 3029 | if (test_clear_buffer_dirty(bh)) { |
3031 | get_bh(bh); | 3030 | get_bh(bh); |
3032 | bh->b_end_io = end_buffer_write_sync; | 3031 | bh->b_end_io = end_buffer_write_sync; |
3033 | ret = submit_bh(WRITE_SYNC, bh); | 3032 | ret = submit_bh(rw, bh); |
3034 | wait_on_buffer(bh); | 3033 | wait_on_buffer(bh); |
3035 | if (buffer_eopnotsupp(bh)) { | 3034 | if (buffer_eopnotsupp(bh)) { |
3036 | clear_buffer_eopnotsupp(bh); | 3035 | clear_buffer_eopnotsupp(bh); |
@@ -3043,6 +3042,12 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
3043 | } | 3042 | } |
3044 | return ret; | 3043 | return ret; |
3045 | } | 3044 | } |
3045 | EXPORT_SYMBOL(__sync_dirty_buffer); | ||
3046 | |||
3047 | int sync_dirty_buffer(struct buffer_head *bh) | ||
3048 | { | ||
3049 | return __sync_dirty_buffer(bh, WRITE_SYNC); | ||
3050 | } | ||
3046 | EXPORT_SYMBOL(sync_dirty_buffer); | 3051 | EXPORT_SYMBOL(sync_dirty_buffer); |
3047 | 3052 | ||
3048 | /* | 3053 | /* |
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index a53b130b366c..1e7a33028d33 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c | |||
@@ -80,7 +80,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, | |||
80 | } | 80 | } |
81 | } else { | 81 | } else { |
82 | inode = iget_locked(sb, CRAMINO(cramfs_inode)); | 82 | inode = iget_locked(sb, CRAMINO(cramfs_inode)); |
83 | if (inode) { | 83 | if (inode && (inode->i_state & I_NEW)) { |
84 | setup_inode(inode, cramfs_inode); | 84 | setup_inode(inode, cramfs_inode); |
85 | unlock_new_inode(inode); | 85 | unlock_new_inode(inode); |
86 | } | 86 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 4d13bf50b7b1..83293be48149 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1332,31 +1332,13 @@ EXPORT_SYMBOL(d_add_ci); | |||
1332 | * d_lookup - search for a dentry | 1332 | * d_lookup - search for a dentry |
1333 | * @parent: parent dentry | 1333 | * @parent: parent dentry |
1334 | * @name: qstr of name we wish to find | 1334 | * @name: qstr of name we wish to find |
1335 | * Returns: dentry, or NULL | ||
1335 | * | 1336 | * |
1336 | * Searches the children of the parent dentry for the name in question. If | 1337 | * d_lookup searches the children of the parent dentry for the name in |
1337 | * the dentry is found its reference count is incremented and the dentry | 1338 | * question. If the dentry is found its reference count is incremented and the |
1338 | * is returned. The caller must use dput to free the entry when it has | 1339 | * dentry is returned. The caller must use dput to free the entry when it has |
1339 | * finished using it. %NULL is returned on failure. | 1340 | * finished using it. %NULL is returned if the dentry does not exist. |
1340 | * | ||
1341 | * __d_lookup is dcache_lock free. The hash list is protected using RCU. | ||
1342 | * Memory barriers are used while updating and doing lockless traversal. | ||
1343 | * To avoid races with d_move while rename is happening, d_lock is used. | ||
1344 | * | ||
1345 | * Overflows in memcmp(), while d_move, are avoided by keeping the length | ||
1346 | * and name pointer in one structure pointed by d_qstr. | ||
1347 | * | ||
1348 | * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while | ||
1349 | * lookup is going on. | ||
1350 | * | ||
1351 | * The dentry unused LRU is not updated even if lookup finds the required dentry | ||
1352 | * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, | ||
1353 | * select_parent and __dget_locked. This laziness saves lookup from dcache_lock | ||
1354 | * acquisition. | ||
1355 | * | ||
1356 | * d_lookup() is protected against the concurrent renames in some unrelated | ||
1357 | * directory using the seqlockt_t rename_lock. | ||
1358 | */ | 1341 | */ |
1359 | |||
1360 | struct dentry * d_lookup(struct dentry * parent, struct qstr * name) | 1342 | struct dentry * d_lookup(struct dentry * parent, struct qstr * name) |
1361 | { | 1343 | { |
1362 | struct dentry * dentry = NULL; | 1344 | struct dentry * dentry = NULL; |
@@ -1372,6 +1354,21 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name) | |||
1372 | } | 1354 | } |
1373 | EXPORT_SYMBOL(d_lookup); | 1355 | EXPORT_SYMBOL(d_lookup); |
1374 | 1356 | ||
1357 | /* | ||
1358 | * __d_lookup - search for a dentry (racy) | ||
1359 | * @parent: parent dentry | ||
1360 | * @name: qstr of name we wish to find | ||
1361 | * Returns: dentry, or NULL | ||
1362 | * | ||
1363 | * __d_lookup is like d_lookup, however it may (rarely) return a | ||
1364 | * false-negative result due to unrelated rename activity. | ||
1365 | * | ||
1366 | * __d_lookup is slightly faster by avoiding rename_lock read seqlock, | ||
1367 | * however it must be used carefully, eg. with a following d_lookup in | ||
1368 | * the case of failure. | ||
1369 | * | ||
1370 | * __d_lookup callers must be commented. | ||
1371 | */ | ||
1375 | struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | 1372 | struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) |
1376 | { | 1373 | { |
1377 | unsigned int len = name->len; | 1374 | unsigned int len = name->len; |
@@ -1382,6 +1379,19 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1382 | struct hlist_node *node; | 1379 | struct hlist_node *node; |
1383 | struct dentry *dentry; | 1380 | struct dentry *dentry; |
1384 | 1381 | ||
1382 | /* | ||
1383 | * The hash list is protected using RCU. | ||
1384 | * | ||
1385 | * Take d_lock when comparing a candidate dentry, to avoid races | ||
1386 | * with d_move(). | ||
1387 | * | ||
1388 | * It is possible that concurrent renames can mess up our list | ||
1389 | * walk here and result in missing our dentry, resulting in the | ||
1390 | * false-negative result. d_lookup() protects against concurrent | ||
1391 | * renames using rename_lock seqlock. | ||
1392 | * | ||
1393 | * See Documentation/vfs/dcache-locking.txt for more details. | ||
1394 | */ | ||
1385 | rcu_read_lock(); | 1395 | rcu_read_lock(); |
1386 | 1396 | ||
1387 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { | 1397 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { |
@@ -1396,8 +1406,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1396 | 1406 | ||
1397 | /* | 1407 | /* |
1398 | * Recheck the dentry after taking the lock - d_move may have | 1408 | * Recheck the dentry after taking the lock - d_move may have |
1399 | * changed things. Don't bother checking the hash because we're | 1409 | * changed things. Don't bother checking the hash because |
1400 | * about to compare the whole name anyway. | 1410 | * we're about to compare the whole name anyway. |
1401 | */ | 1411 | */ |
1402 | if (dentry->d_parent != parent) | 1412 | if (dentry->d_parent != parent) |
1403 | goto next; | 1413 | goto next; |
@@ -1925,7 +1935,7 @@ static int prepend_path(const struct path *path, struct path *root, | |||
1925 | bool slash = false; | 1935 | bool slash = false; |
1926 | int error = 0; | 1936 | int error = 0; |
1927 | 1937 | ||
1928 | spin_lock(&vfsmount_lock); | 1938 | br_read_lock(vfsmount_lock); |
1929 | while (dentry != root->dentry || vfsmnt != root->mnt) { | 1939 | while (dentry != root->dentry || vfsmnt != root->mnt) { |
1930 | struct dentry * parent; | 1940 | struct dentry * parent; |
1931 | 1941 | ||
@@ -1954,7 +1964,7 @@ out: | |||
1954 | if (!error && !slash) | 1964 | if (!error && !slash) |
1955 | error = prepend(buffer, buflen, "/", 1); | 1965 | error = prepend(buffer, buflen, "/", 1); |
1956 | 1966 | ||
1957 | spin_unlock(&vfsmount_lock); | 1967 | br_read_unlock(vfsmount_lock); |
1958 | return error; | 1968 | return error; |
1959 | 1969 | ||
1960 | global_root: | 1970 | global_root: |
@@ -2292,11 +2302,12 @@ int path_is_under(struct path *path1, struct path *path2) | |||
2292 | struct vfsmount *mnt = path1->mnt; | 2302 | struct vfsmount *mnt = path1->mnt; |
2293 | struct dentry *dentry = path1->dentry; | 2303 | struct dentry *dentry = path1->dentry; |
2294 | int res; | 2304 | int res; |
2295 | spin_lock(&vfsmount_lock); | 2305 | |
2306 | br_read_lock(vfsmount_lock); | ||
2296 | if (mnt != path2->mnt) { | 2307 | if (mnt != path2->mnt) { |
2297 | for (;;) { | 2308 | for (;;) { |
2298 | if (mnt->mnt_parent == mnt) { | 2309 | if (mnt->mnt_parent == mnt) { |
2299 | spin_unlock(&vfsmount_lock); | 2310 | br_read_unlock(vfsmount_lock); |
2300 | return 0; | 2311 | return 0; |
2301 | } | 2312 | } |
2302 | if (mnt->mnt_parent == path2->mnt) | 2313 | if (mnt->mnt_parent == path2->mnt) |
@@ -2306,7 +2317,7 @@ int path_is_under(struct path *path1, struct path *path2) | |||
2306 | dentry = mnt->mnt_mountpoint; | 2317 | dentry = mnt->mnt_mountpoint; |
2307 | } | 2318 | } |
2308 | res = is_subdir(dentry, path2->dentry); | 2319 | res = is_subdir(dentry, path2->dentry); |
2309 | spin_unlock(&vfsmount_lock); | 2320 | br_read_unlock(vfsmount_lock); |
2310 | return res; | 2321 | return res; |
2311 | } | 2322 | } |
2312 | EXPORT_SYMBOL(path_is_under); | 2323 | EXPORT_SYMBOL(path_is_under); |
@@ -361,13 +361,13 @@ err: | |||
361 | /* | 361 | /* |
362 | * count() counts the number of strings in array ARGV. | 362 | * count() counts the number of strings in array ARGV. |
363 | */ | 363 | */ |
364 | static int count(char __user * __user * argv, int max) | 364 | static int count(const char __user * const __user * argv, int max) |
365 | { | 365 | { |
366 | int i = 0; | 366 | int i = 0; |
367 | 367 | ||
368 | if (argv != NULL) { | 368 | if (argv != NULL) { |
369 | for (;;) { | 369 | for (;;) { |
370 | char __user * p; | 370 | const char __user * p; |
371 | 371 | ||
372 | if (get_user(p, argv)) | 372 | if (get_user(p, argv)) |
373 | return -EFAULT; | 373 | return -EFAULT; |
@@ -387,7 +387,7 @@ static int count(char __user * __user * argv, int max) | |||
387 | * processes's memory to the new process's stack. The call to get_user_pages() | 387 | * processes's memory to the new process's stack. The call to get_user_pages() |
388 | * ensures the destination page is created and not swapped out. | 388 | * ensures the destination page is created and not swapped out. |
389 | */ | 389 | */ |
390 | static int copy_strings(int argc, char __user * __user * argv, | 390 | static int copy_strings(int argc, const char __user *const __user *argv, |
391 | struct linux_binprm *bprm) | 391 | struct linux_binprm *bprm) |
392 | { | 392 | { |
393 | struct page *kmapped_page = NULL; | 393 | struct page *kmapped_page = NULL; |
@@ -396,7 +396,7 @@ static int copy_strings(int argc, char __user * __user * argv, | |||
396 | int ret; | 396 | int ret; |
397 | 397 | ||
398 | while (argc-- > 0) { | 398 | while (argc-- > 0) { |
399 | char __user *str; | 399 | const char __user *str; |
400 | int len; | 400 | int len; |
401 | unsigned long pos; | 401 | unsigned long pos; |
402 | 402 | ||
@@ -470,12 +470,13 @@ out: | |||
470 | /* | 470 | /* |
471 | * Like copy_strings, but get argv and its values from kernel memory. | 471 | * Like copy_strings, but get argv and its values from kernel memory. |
472 | */ | 472 | */ |
473 | int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm) | 473 | int copy_strings_kernel(int argc, const char *const *argv, |
474 | struct linux_binprm *bprm) | ||
474 | { | 475 | { |
475 | int r; | 476 | int r; |
476 | mm_segment_t oldfs = get_fs(); | 477 | mm_segment_t oldfs = get_fs(); |
477 | set_fs(KERNEL_DS); | 478 | set_fs(KERNEL_DS); |
478 | r = copy_strings(argc, (char __user * __user *)argv, bprm); | 479 | r = copy_strings(argc, (const char __user *const __user *)argv, bprm); |
479 | set_fs(oldfs); | 480 | set_fs(oldfs); |
480 | return r; | 481 | return r; |
481 | } | 482 | } |
@@ -997,7 +998,7 @@ EXPORT_SYMBOL(flush_old_exec); | |||
997 | void setup_new_exec(struct linux_binprm * bprm) | 998 | void setup_new_exec(struct linux_binprm * bprm) |
998 | { | 999 | { |
999 | int i, ch; | 1000 | int i, ch; |
1000 | char * name; | 1001 | const char *name; |
1001 | char tcomm[sizeof(current->comm)]; | 1002 | char tcomm[sizeof(current->comm)]; |
1002 | 1003 | ||
1003 | arch_pick_mmap_layout(current->mm); | 1004 | arch_pick_mmap_layout(current->mm); |
@@ -1117,7 +1118,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) | |||
1117 | bprm->unsafe = tracehook_unsafe_exec(p); | 1118 | bprm->unsafe = tracehook_unsafe_exec(p); |
1118 | 1119 | ||
1119 | n_fs = 1; | 1120 | n_fs = 1; |
1120 | write_lock(&p->fs->lock); | 1121 | spin_lock(&p->fs->lock); |
1121 | rcu_read_lock(); | 1122 | rcu_read_lock(); |
1122 | for (t = next_thread(p); t != p; t = next_thread(t)) { | 1123 | for (t = next_thread(p); t != p; t = next_thread(t)) { |
1123 | if (t->fs == p->fs) | 1124 | if (t->fs == p->fs) |
@@ -1134,7 +1135,7 @@ int check_unsafe_exec(struct linux_binprm *bprm) | |||
1134 | res = 1; | 1135 | res = 1; |
1135 | } | 1136 | } |
1136 | } | 1137 | } |
1137 | write_unlock(&p->fs->lock); | 1138 | spin_unlock(&p->fs->lock); |
1138 | 1139 | ||
1139 | return res; | 1140 | return res; |
1140 | } | 1141 | } |
@@ -1316,9 +1317,9 @@ EXPORT_SYMBOL(search_binary_handler); | |||
1316 | /* | 1317 | /* |
1317 | * sys_execve() executes a new program. | 1318 | * sys_execve() executes a new program. |
1318 | */ | 1319 | */ |
1319 | int do_execve(char * filename, | 1320 | int do_execve(const char * filename, |
1320 | char __user *__user *argv, | 1321 | const char __user *const __user *argv, |
1321 | char __user *__user *envp, | 1322 | const char __user *const __user *envp, |
1322 | struct pt_regs * regs) | 1323 | struct pt_regs * regs) |
1323 | { | 1324 | { |
1324 | struct linux_binprm *bprm; | 1325 | struct linux_binprm *bprm; |
diff --git a/fs/fat/misc.c b/fs/fat/misc.c index 1fa23f6ffba5..1736f2356388 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c | |||
@@ -250,7 +250,9 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) | |||
250 | { | 250 | { |
251 | int i, err = 0; | 251 | int i, err = 0; |
252 | 252 | ||
253 | ll_rw_block(SWRITE, nr_bhs, bhs); | 253 | for (i = 0; i < nr_bhs; i++) |
254 | write_dirty_buffer(bhs[i], WRITE); | ||
255 | |||
254 | for (i = 0; i < nr_bhs; i++) { | 256 | for (i = 0; i < nr_bhs; i++) { |
255 | wait_on_buffer(bhs[i]); | 257 | wait_on_buffer(bhs[i]); |
256 | if (buffer_eopnotsupp(bhs[i])) { | 258 | if (buffer_eopnotsupp(bhs[i])) { |
diff --git a/fs/file_table.c b/fs/file_table.c index edecd36fed9b..a04bdd81c11c 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -20,7 +20,9 @@ | |||
20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
21 | #include <linux/fsnotify.h> | 21 | #include <linux/fsnotify.h> |
22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
23 | #include <linux/lglock.h> | ||
23 | #include <linux/percpu_counter.h> | 24 | #include <linux/percpu_counter.h> |
25 | #include <linux/percpu.h> | ||
24 | #include <linux/ima.h> | 26 | #include <linux/ima.h> |
25 | 27 | ||
26 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
@@ -32,8 +34,8 @@ struct files_stat_struct files_stat = { | |||
32 | .max_files = NR_FILE | 34 | .max_files = NR_FILE |
33 | }; | 35 | }; |
34 | 36 | ||
35 | /* public. Not pretty! */ | 37 | DECLARE_LGLOCK(files_lglock); |
36 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); | 38 | DEFINE_LGLOCK(files_lglock); |
37 | 39 | ||
38 | /* SLAB cache for file structures */ | 40 | /* SLAB cache for file structures */ |
39 | static struct kmem_cache *filp_cachep __read_mostly; | 41 | static struct kmem_cache *filp_cachep __read_mostly; |
@@ -249,7 +251,7 @@ static void __fput(struct file *file) | |||
249 | cdev_put(inode->i_cdev); | 251 | cdev_put(inode->i_cdev); |
250 | fops_put(file->f_op); | 252 | fops_put(file->f_op); |
251 | put_pid(file->f_owner.pid); | 253 | put_pid(file->f_owner.pid); |
252 | file_kill(file); | 254 | file_sb_list_del(file); |
253 | if (file->f_mode & FMODE_WRITE) | 255 | if (file->f_mode & FMODE_WRITE) |
254 | drop_file_write_access(file); | 256 | drop_file_write_access(file); |
255 | file->f_path.dentry = NULL; | 257 | file->f_path.dentry = NULL; |
@@ -328,41 +330,107 @@ struct file *fget_light(unsigned int fd, int *fput_needed) | |||
328 | return file; | 330 | return file; |
329 | } | 331 | } |
330 | 332 | ||
331 | |||
332 | void put_filp(struct file *file) | 333 | void put_filp(struct file *file) |
333 | { | 334 | { |
334 | if (atomic_long_dec_and_test(&file->f_count)) { | 335 | if (atomic_long_dec_and_test(&file->f_count)) { |
335 | security_file_free(file); | 336 | security_file_free(file); |
336 | file_kill(file); | 337 | file_sb_list_del(file); |
337 | file_free(file); | 338 | file_free(file); |
338 | } | 339 | } |
339 | } | 340 | } |
340 | 341 | ||
341 | void file_move(struct file *file, struct list_head *list) | 342 | static inline int file_list_cpu(struct file *file) |
342 | { | 343 | { |
343 | if (!list) | 344 | #ifdef CONFIG_SMP |
344 | return; | 345 | return file->f_sb_list_cpu; |
345 | file_list_lock(); | 346 | #else |
346 | list_move(&file->f_u.fu_list, list); | 347 | return smp_processor_id(); |
347 | file_list_unlock(); | 348 | #endif |
349 | } | ||
350 | |||
351 | /* helper for file_sb_list_add to reduce ifdefs */ | ||
352 | static inline void __file_sb_list_add(struct file *file, struct super_block *sb) | ||
353 | { | ||
354 | struct list_head *list; | ||
355 | #ifdef CONFIG_SMP | ||
356 | int cpu; | ||
357 | cpu = smp_processor_id(); | ||
358 | file->f_sb_list_cpu = cpu; | ||
359 | list = per_cpu_ptr(sb->s_files, cpu); | ||
360 | #else | ||
361 | list = &sb->s_files; | ||
362 | #endif | ||
363 | list_add(&file->f_u.fu_list, list); | ||
348 | } | 364 | } |
349 | 365 | ||
350 | void file_kill(struct file *file) | 366 | /** |
367 | * file_sb_list_add - add a file to the sb's file list | ||
368 | * @file: file to add | ||
369 | * @sb: sb to add it to | ||
370 | * | ||
371 | * Use this function to associate a file with the superblock of the inode it | ||
372 | * refers to. | ||
373 | */ | ||
374 | void file_sb_list_add(struct file *file, struct super_block *sb) | ||
375 | { | ||
376 | lg_local_lock(files_lglock); | ||
377 | __file_sb_list_add(file, sb); | ||
378 | lg_local_unlock(files_lglock); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * file_sb_list_del - remove a file from the sb's file list | ||
383 | * @file: file to remove | ||
384 | * @sb: sb to remove it from | ||
385 | * | ||
386 | * Use this function to remove a file from its superblock. | ||
387 | */ | ||
388 | void file_sb_list_del(struct file *file) | ||
351 | { | 389 | { |
352 | if (!list_empty(&file->f_u.fu_list)) { | 390 | if (!list_empty(&file->f_u.fu_list)) { |
353 | file_list_lock(); | 391 | lg_local_lock_cpu(files_lglock, file_list_cpu(file)); |
354 | list_del_init(&file->f_u.fu_list); | 392 | list_del_init(&file->f_u.fu_list); |
355 | file_list_unlock(); | 393 | lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); |
356 | } | 394 | } |
357 | } | 395 | } |
358 | 396 | ||
397 | #ifdef CONFIG_SMP | ||
398 | |||
399 | /* | ||
400 | * These macros iterate all files on all CPUs for a given superblock. | ||
401 | * files_lglock must be held globally. | ||
402 | */ | ||
403 | #define do_file_list_for_each_entry(__sb, __file) \ | ||
404 | { \ | ||
405 | int i; \ | ||
406 | for_each_possible_cpu(i) { \ | ||
407 | struct list_head *list; \ | ||
408 | list = per_cpu_ptr((__sb)->s_files, i); \ | ||
409 | list_for_each_entry((__file), list, f_u.fu_list) | ||
410 | |||
411 | #define while_file_list_for_each_entry \ | ||
412 | } \ | ||
413 | } | ||
414 | |||
415 | #else | ||
416 | |||
417 | #define do_file_list_for_each_entry(__sb, __file) \ | ||
418 | { \ | ||
419 | struct list_head *list; \ | ||
420 | list = &(sb)->s_files; \ | ||
421 | list_for_each_entry((__file), list, f_u.fu_list) | ||
422 | |||
423 | #define while_file_list_for_each_entry \ | ||
424 | } | ||
425 | |||
426 | #endif | ||
427 | |||
359 | int fs_may_remount_ro(struct super_block *sb) | 428 | int fs_may_remount_ro(struct super_block *sb) |
360 | { | 429 | { |
361 | struct file *file; | 430 | struct file *file; |
362 | |||
363 | /* Check that no files are currently opened for writing. */ | 431 | /* Check that no files are currently opened for writing. */ |
364 | file_list_lock(); | 432 | lg_global_lock(files_lglock); |
365 | list_for_each_entry(file, &sb->s_files, f_u.fu_list) { | 433 | do_file_list_for_each_entry(sb, file) { |
366 | struct inode *inode = file->f_path.dentry->d_inode; | 434 | struct inode *inode = file->f_path.dentry->d_inode; |
367 | 435 | ||
368 | /* File with pending delete? */ | 436 | /* File with pending delete? */ |
@@ -372,11 +440,11 @@ int fs_may_remount_ro(struct super_block *sb) | |||
372 | /* Writeable file? */ | 440 | /* Writeable file? */ |
373 | if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) | 441 | if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) |
374 | goto too_bad; | 442 | goto too_bad; |
375 | } | 443 | } while_file_list_for_each_entry; |
376 | file_list_unlock(); | 444 | lg_global_unlock(files_lglock); |
377 | return 1; /* Tis' cool bro. */ | 445 | return 1; /* Tis' cool bro. */ |
378 | too_bad: | 446 | too_bad: |
379 | file_list_unlock(); | 447 | lg_global_unlock(files_lglock); |
380 | return 0; | 448 | return 0; |
381 | } | 449 | } |
382 | 450 | ||
@@ -392,8 +460,8 @@ void mark_files_ro(struct super_block *sb) | |||
392 | struct file *f; | 460 | struct file *f; |
393 | 461 | ||
394 | retry: | 462 | retry: |
395 | file_list_lock(); | 463 | lg_global_lock(files_lglock); |
396 | list_for_each_entry(f, &sb->s_files, f_u.fu_list) { | 464 | do_file_list_for_each_entry(sb, f) { |
397 | struct vfsmount *mnt; | 465 | struct vfsmount *mnt; |
398 | if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) | 466 | if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) |
399 | continue; | 467 | continue; |
@@ -408,16 +476,13 @@ retry: | |||
408 | continue; | 476 | continue; |
409 | file_release_write(f); | 477 | file_release_write(f); |
410 | mnt = mntget(f->f_path.mnt); | 478 | mnt = mntget(f->f_path.mnt); |
411 | file_list_unlock(); | 479 | /* This can sleep, so we can't hold the spinlock. */ |
412 | /* | 480 | lg_global_unlock(files_lglock); |
413 | * This can sleep, so we can't hold | ||
414 | * the file_list_lock() spinlock. | ||
415 | */ | ||
416 | mnt_drop_write(mnt); | 481 | mnt_drop_write(mnt); |
417 | mntput(mnt); | 482 | mntput(mnt); |
418 | goto retry; | 483 | goto retry; |
419 | } | 484 | } while_file_list_for_each_entry; |
420 | file_list_unlock(); | 485 | lg_global_unlock(files_lglock); |
421 | } | 486 | } |
422 | 487 | ||
423 | void __init files_init(unsigned long mempages) | 488 | void __init files_init(unsigned long mempages) |
@@ -437,5 +502,6 @@ void __init files_init(unsigned long mempages) | |||
437 | if (files_stat.max_files < NR_FILE) | 502 | if (files_stat.max_files < NR_FILE) |
438 | files_stat.max_files = NR_FILE; | 503 | files_stat.max_files = NR_FILE; |
439 | files_defer_init(); | 504 | files_defer_init(); |
505 | lg_lock_init(files_lglock); | ||
440 | percpu_counter_init(&nr_files, 0); | 506 | percpu_counter_init(&nr_files, 0); |
441 | } | 507 | } |
diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 1ee40eb9a2c0..ed45a9cf5f3d 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c | |||
@@ -13,11 +13,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path) | |||
13 | { | 13 | { |
14 | struct path old_root; | 14 | struct path old_root; |
15 | 15 | ||
16 | write_lock(&fs->lock); | 16 | spin_lock(&fs->lock); |
17 | old_root = fs->root; | 17 | old_root = fs->root; |
18 | fs->root = *path; | 18 | fs->root = *path; |
19 | path_get(path); | 19 | path_get(path); |
20 | write_unlock(&fs->lock); | 20 | spin_unlock(&fs->lock); |
21 | if (old_root.dentry) | 21 | if (old_root.dentry) |
22 | path_put(&old_root); | 22 | path_put(&old_root); |
23 | } | 23 | } |
@@ -30,11 +30,11 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) | |||
30 | { | 30 | { |
31 | struct path old_pwd; | 31 | struct path old_pwd; |
32 | 32 | ||
33 | write_lock(&fs->lock); | 33 | spin_lock(&fs->lock); |
34 | old_pwd = fs->pwd; | 34 | old_pwd = fs->pwd; |
35 | fs->pwd = *path; | 35 | fs->pwd = *path; |
36 | path_get(path); | 36 | path_get(path); |
37 | write_unlock(&fs->lock); | 37 | spin_unlock(&fs->lock); |
38 | 38 | ||
39 | if (old_pwd.dentry) | 39 | if (old_pwd.dentry) |
40 | path_put(&old_pwd); | 40 | path_put(&old_pwd); |
@@ -51,7 +51,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) | |||
51 | task_lock(p); | 51 | task_lock(p); |
52 | fs = p->fs; | 52 | fs = p->fs; |
53 | if (fs) { | 53 | if (fs) { |
54 | write_lock(&fs->lock); | 54 | spin_lock(&fs->lock); |
55 | if (fs->root.dentry == old_root->dentry | 55 | if (fs->root.dentry == old_root->dentry |
56 | && fs->root.mnt == old_root->mnt) { | 56 | && fs->root.mnt == old_root->mnt) { |
57 | path_get(new_root); | 57 | path_get(new_root); |
@@ -64,7 +64,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) | |||
64 | fs->pwd = *new_root; | 64 | fs->pwd = *new_root; |
65 | count++; | 65 | count++; |
66 | } | 66 | } |
67 | write_unlock(&fs->lock); | 67 | spin_unlock(&fs->lock); |
68 | } | 68 | } |
69 | task_unlock(p); | 69 | task_unlock(p); |
70 | } while_each_thread(g, p); | 70 | } while_each_thread(g, p); |
@@ -87,10 +87,10 @@ void exit_fs(struct task_struct *tsk) | |||
87 | if (fs) { | 87 | if (fs) { |
88 | int kill; | 88 | int kill; |
89 | task_lock(tsk); | 89 | task_lock(tsk); |
90 | write_lock(&fs->lock); | 90 | spin_lock(&fs->lock); |
91 | tsk->fs = NULL; | 91 | tsk->fs = NULL; |
92 | kill = !--fs->users; | 92 | kill = !--fs->users; |
93 | write_unlock(&fs->lock); | 93 | spin_unlock(&fs->lock); |
94 | task_unlock(tsk); | 94 | task_unlock(tsk); |
95 | if (kill) | 95 | if (kill) |
96 | free_fs_struct(fs); | 96 | free_fs_struct(fs); |
@@ -104,7 +104,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) | |||
104 | if (fs) { | 104 | if (fs) { |
105 | fs->users = 1; | 105 | fs->users = 1; |
106 | fs->in_exec = 0; | 106 | fs->in_exec = 0; |
107 | rwlock_init(&fs->lock); | 107 | spin_lock_init(&fs->lock); |
108 | fs->umask = old->umask; | 108 | fs->umask = old->umask; |
109 | get_fs_root_and_pwd(old, &fs->root, &fs->pwd); | 109 | get_fs_root_and_pwd(old, &fs->root, &fs->pwd); |
110 | } | 110 | } |
@@ -121,10 +121,10 @@ int unshare_fs_struct(void) | |||
121 | return -ENOMEM; | 121 | return -ENOMEM; |
122 | 122 | ||
123 | task_lock(current); | 123 | task_lock(current); |
124 | write_lock(&fs->lock); | 124 | spin_lock(&fs->lock); |
125 | kill = !--fs->users; | 125 | kill = !--fs->users; |
126 | current->fs = new_fs; | 126 | current->fs = new_fs; |
127 | write_unlock(&fs->lock); | 127 | spin_unlock(&fs->lock); |
128 | task_unlock(current); | 128 | task_unlock(current); |
129 | 129 | ||
130 | if (kill) | 130 | if (kill) |
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(current_umask); | |||
143 | /* to be mentioned only in INIT_TASK */ | 143 | /* to be mentioned only in INIT_TASK */ |
144 | struct fs_struct init_fs = { | 144 | struct fs_struct init_fs = { |
145 | .users = 1, | 145 | .users = 1, |
146 | .lock = __RW_LOCK_UNLOCKED(init_fs.lock), | 146 | .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), |
147 | .umask = 0022, | 147 | .umask = 0022, |
148 | }; | 148 | }; |
149 | 149 | ||
@@ -156,14 +156,14 @@ void daemonize_fs_struct(void) | |||
156 | 156 | ||
157 | task_lock(current); | 157 | task_lock(current); |
158 | 158 | ||
159 | write_lock(&init_fs.lock); | 159 | spin_lock(&init_fs.lock); |
160 | init_fs.users++; | 160 | init_fs.users++; |
161 | write_unlock(&init_fs.lock); | 161 | spin_unlock(&init_fs.lock); |
162 | 162 | ||
163 | write_lock(&fs->lock); | 163 | spin_lock(&fs->lock); |
164 | current->fs = &init_fs; | 164 | current->fs = &init_fs; |
165 | kill = !--fs->users; | 165 | kill = !--fs->users; |
166 | write_unlock(&fs->lock); | 166 | spin_unlock(&fs->lock); |
167 | 167 | ||
168 | task_unlock(current); | 168 | task_unlock(current); |
169 | if (kill) | 169 | if (kill) |
diff --git a/fs/generic_acl.c b/fs/generic_acl.c index 99800e564157..6bc9e3a5a693 100644 --- a/fs/generic_acl.c +++ b/fs/generic_acl.c | |||
@@ -94,6 +94,7 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value, | |||
94 | if (error < 0) | 94 | if (error < 0) |
95 | goto failed; | 95 | goto failed; |
96 | inode->i_mode = mode; | 96 | inode->i_mode = mode; |
97 | inode->i_ctime = CURRENT_TIME; | ||
97 | if (error == 0) { | 98 | if (error == 0) { |
98 | posix_acl_release(acl); | 99 | posix_acl_release(acl); |
99 | acl = NULL; | 100 | acl = NULL; |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index dd1e55535a4e..f7dc9b5f9ef8 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -104,7 +104,7 @@ static char *__dentry_name(struct dentry *dentry, char *name) | |||
104 | __putname(name); | 104 | __putname(name); |
105 | return NULL; | 105 | return NULL; |
106 | } | 106 | } |
107 | strncpy(name, root, PATH_MAX); | 107 | strlcpy(name, root, PATH_MAX); |
108 | if (len > p - name) { | 108 | if (len > p - name) { |
109 | __putname(name); | 109 | __putname(name); |
110 | return NULL; | 110 | return NULL; |
@@ -876,7 +876,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
876 | char *path = dentry_name(dentry); | 876 | char *path = dentry_name(dentry); |
877 | int err = -ENOMEM; | 877 | int err = -ENOMEM; |
878 | if (path) { | 878 | if (path) { |
879 | int err = hostfs_do_readlink(path, link, PATH_MAX); | 879 | err = hostfs_do_readlink(path, link, PATH_MAX); |
880 | if (err == PATH_MAX) | 880 | if (err == PATH_MAX) |
881 | err = -E2BIG; | 881 | err = -E2BIG; |
882 | __putname(path); | 882 | __putname(path); |
diff --git a/fs/internal.h b/fs/internal.h index 6b706bc60a66..a6910e91cee8 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -9,6 +9,8 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/lglock.h> | ||
13 | |||
12 | struct super_block; | 14 | struct super_block; |
13 | struct linux_binprm; | 15 | struct linux_binprm; |
14 | struct path; | 16 | struct path; |
@@ -70,7 +72,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); | |||
70 | 72 | ||
71 | extern void __init mnt_init(void); | 73 | extern void __init mnt_init(void); |
72 | 74 | ||
73 | extern spinlock_t vfsmount_lock; | 75 | DECLARE_BRLOCK(vfsmount_lock); |
76 | |||
74 | 77 | ||
75 | /* | 78 | /* |
76 | * fs_struct.c | 79 | * fs_struct.c |
@@ -80,6 +83,8 @@ extern void chroot_fs_refs(struct path *, struct path *); | |||
80 | /* | 83 | /* |
81 | * file_table.c | 84 | * file_table.c |
82 | */ | 85 | */ |
86 | extern void file_sb_list_add(struct file *f, struct super_block *sb); | ||
87 | extern void file_sb_list_del(struct file *f); | ||
83 | extern void mark_files_ro(struct super_block *); | 88 | extern void mark_files_ro(struct super_block *); |
84 | extern struct file *get_empty_filp(void); | 89 | extern struct file *get_empty_filp(void); |
85 | 90 | ||
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index b0435dd0654d..05a38b9c4c0e 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c | |||
@@ -254,7 +254,9 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count) | |||
254 | { | 254 | { |
255 | int i; | 255 | int i; |
256 | 256 | ||
257 | ll_rw_block(SWRITE, *batch_count, bhs); | 257 | for (i = 0; i < *batch_count; i++) |
258 | write_dirty_buffer(bhs[i], WRITE); | ||
259 | |||
258 | for (i = 0; i < *batch_count; i++) { | 260 | for (i = 0; i < *batch_count; i++) { |
259 | struct buffer_head *bh = bhs[i]; | 261 | struct buffer_head *bh = bhs[i]; |
260 | clear_buffer_jwrite(bh); | 262 | clear_buffer_jwrite(bh); |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 28a9ddaa0c49..95d8c11c929e 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal, | |||
119 | struct buffer_head *bh; | 119 | struct buffer_head *bh; |
120 | journal_header_t *header; | 120 | journal_header_t *header; |
121 | int ret; | 121 | int ret; |
122 | int barrier_done = 0; | ||
123 | 122 | ||
124 | if (is_journal_aborted(journal)) | 123 | if (is_journal_aborted(journal)) |
125 | return 0; | 124 | return 0; |
@@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal, | |||
137 | 136 | ||
138 | JBUFFER_TRACE(descriptor, "write commit block"); | 137 | JBUFFER_TRACE(descriptor, "write commit block"); |
139 | set_buffer_dirty(bh); | 138 | set_buffer_dirty(bh); |
139 | |||
140 | if (journal->j_flags & JFS_BARRIER) { | 140 | if (journal->j_flags & JFS_BARRIER) { |
141 | set_buffer_ordered(bh); | 141 | ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER); |
142 | barrier_done = 1; | ||
143 | } | ||
144 | ret = sync_dirty_buffer(bh); | ||
145 | if (barrier_done) | ||
146 | clear_buffer_ordered(bh); | ||
147 | /* is it possible for another commit to fail at roughly | ||
148 | * the same time as this one? If so, we don't want to | ||
149 | * trust the barrier flag in the super, but instead want | ||
150 | * to remember if we sent a barrier request | ||
151 | */ | ||
152 | if (ret == -EOPNOTSUPP && barrier_done) { | ||
153 | char b[BDEVNAME_SIZE]; | ||
154 | 142 | ||
155 | printk(KERN_WARNING | 143 | /* |
156 | "JBD: barrier-based sync failed on %s - " | 144 | * Is it possible for another commit to fail at roughly |
157 | "disabling barriers\n", | 145 | * the same time as this one? If so, we don't want to |
158 | bdevname(journal->j_dev, b)); | 146 | * trust the barrier flag in the super, but instead want |
159 | spin_lock(&journal->j_state_lock); | 147 | * to remember if we sent a barrier request |
160 | journal->j_flags &= ~JFS_BARRIER; | 148 | */ |
161 | spin_unlock(&journal->j_state_lock); | 149 | if (ret == -EOPNOTSUPP) { |
150 | char b[BDEVNAME_SIZE]; | ||
162 | 151 | ||
163 | /* And try again, without the barrier */ | 152 | printk(KERN_WARNING |
164 | set_buffer_uptodate(bh); | 153 | "JBD: barrier-based sync failed on %s - " |
165 | set_buffer_dirty(bh); | 154 | "disabling barriers\n", |
155 | bdevname(journal->j_dev, b)); | ||
156 | spin_lock(&journal->j_state_lock); | ||
157 | journal->j_flags &= ~JFS_BARRIER; | ||
158 | spin_unlock(&journal->j_state_lock); | ||
159 | |||
160 | /* And try again, without the barrier */ | ||
161 | set_buffer_uptodate(bh); | ||
162 | set_buffer_dirty(bh); | ||
163 | ret = sync_dirty_buffer(bh); | ||
164 | } | ||
165 | } else { | ||
166 | ret = sync_dirty_buffer(bh); | 166 | ret = sync_dirty_buffer(bh); |
167 | } | 167 | } |
168 | |||
168 | put_bh(bh); /* One for getblk() */ | 169 | put_bh(bh); /* One for getblk() */ |
169 | journal_put_journal_head(descriptor); | 170 | journal_put_journal_head(descriptor); |
170 | 171 | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index f19ce94693d8..2c4b1f109da9 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -1024,7 +1024,7 @@ void journal_update_superblock(journal_t *journal, int wait) | |||
1024 | if (wait) | 1024 | if (wait) |
1025 | sync_dirty_buffer(bh); | 1025 | sync_dirty_buffer(bh); |
1026 | else | 1026 | else |
1027 | ll_rw_block(SWRITE, 1, &bh); | 1027 | write_dirty_buffer(bh, WRITE); |
1028 | 1028 | ||
1029 | out: | 1029 | out: |
1030 | /* If we have just flushed the log (by marking s_start==0), then | 1030 | /* If we have just flushed the log (by marking s_start==0), then |
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index ad717328343a..d29018307e2e 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c | |||
@@ -617,7 +617,7 @@ static void flush_descriptor(journal_t *journal, | |||
617 | set_buffer_jwrite(bh); | 617 | set_buffer_jwrite(bh); |
618 | BUFFER_TRACE(bh, "write"); | 618 | BUFFER_TRACE(bh, "write"); |
619 | set_buffer_dirty(bh); | 619 | set_buffer_dirty(bh); |
620 | ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); | 620 | write_dirty_buffer(bh, write_op); |
621 | } | 621 | } |
622 | #endif | 622 | #endif |
623 | 623 | ||
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 1c23a0f4e8a3..5247e7ffdcb4 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c | |||
@@ -255,7 +255,9 @@ __flush_batch(journal_t *journal, int *batch_count) | |||
255 | { | 255 | { |
256 | int i; | 256 | int i; |
257 | 257 | ||
258 | ll_rw_block(SWRITE, *batch_count, journal->j_chkpt_bhs); | 258 | for (i = 0; i < *batch_count; i++) |
259 | write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE); | ||
260 | |||
259 | for (i = 0; i < *batch_count; i++) { | 261 | for (i = 0; i < *batch_count; i++) { |
260 | struct buffer_head *bh = journal->j_chkpt_bhs[i]; | 262 | struct buffer_head *bh = journal->j_chkpt_bhs[i]; |
261 | clear_buffer_jwrite(bh); | 263 | clear_buffer_jwrite(bh); |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index f52e5e8049f1..7c068c189d80 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal, | |||
101 | struct commit_header *tmp; | 101 | struct commit_header *tmp; |
102 | struct buffer_head *bh; | 102 | struct buffer_head *bh; |
103 | int ret; | 103 | int ret; |
104 | int barrier_done = 0; | ||
105 | struct timespec now = current_kernel_time(); | 104 | struct timespec now = current_kernel_time(); |
106 | 105 | ||
107 | if (is_journal_aborted(journal)) | 106 | if (is_journal_aborted(journal)) |
@@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal, | |||
136 | if (journal->j_flags & JBD2_BARRIER && | 135 | if (journal->j_flags & JBD2_BARRIER && |
137 | !JBD2_HAS_INCOMPAT_FEATURE(journal, | 136 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
138 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { | 137 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { |
139 | set_buffer_ordered(bh); | 138 | ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh); |
140 | barrier_done = 1; | 139 | if (ret == -EOPNOTSUPP) { |
141 | } | 140 | printk(KERN_WARNING |
142 | ret = submit_bh(WRITE_SYNC_PLUG, bh); | 141 | "JBD2: Disabling barriers on %s, " |
143 | if (barrier_done) | 142 | "not supported by device\n", journal->j_devname); |
144 | clear_buffer_ordered(bh); | 143 | write_lock(&journal->j_state_lock); |
145 | 144 | journal->j_flags &= ~JBD2_BARRIER; | |
146 | /* is it possible for another commit to fail at roughly | 145 | write_unlock(&journal->j_state_lock); |
147 | * the same time as this one? If so, we don't want to | ||
148 | * trust the barrier flag in the super, but instead want | ||
149 | * to remember if we sent a barrier request | ||
150 | */ | ||
151 | if (ret == -EOPNOTSUPP && barrier_done) { | ||
152 | printk(KERN_WARNING | ||
153 | "JBD2: Disabling barriers on %s, " | ||
154 | "not supported by device\n", journal->j_devname); | ||
155 | write_lock(&journal->j_state_lock); | ||
156 | journal->j_flags &= ~JBD2_BARRIER; | ||
157 | write_unlock(&journal->j_state_lock); | ||
158 | 146 | ||
159 | /* And try again, without the barrier */ | 147 | /* And try again, without the barrier */ |
160 | lock_buffer(bh); | 148 | lock_buffer(bh); |
161 | set_buffer_uptodate(bh); | 149 | set_buffer_uptodate(bh); |
162 | clear_buffer_dirty(bh); | 150 | clear_buffer_dirty(bh); |
151 | ret = submit_bh(WRITE_SYNC_PLUG, bh); | ||
152 | } | ||
153 | } else { | ||
163 | ret = submit_bh(WRITE_SYNC_PLUG, bh); | 154 | ret = submit_bh(WRITE_SYNC_PLUG, bh); |
164 | } | 155 | } |
165 | *cbh = bh; | 156 | *cbh = bh; |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index ad5866aaf0f9..0e8014ea6b94 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -1124,7 +1124,7 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait) | |||
1124 | set_buffer_uptodate(bh); | 1124 | set_buffer_uptodate(bh); |
1125 | } | 1125 | } |
1126 | } else | 1126 | } else |
1127 | ll_rw_block(SWRITE, 1, &bh); | 1127 | write_dirty_buffer(bh, WRITE); |
1128 | 1128 | ||
1129 | out: | 1129 | out: |
1130 | /* If we have just flushed the log (by marking s_start==0), then | 1130 | /* If we have just flushed the log (by marking s_start==0), then |
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index a360b06af2e3..9ad321fd63fd 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c | |||
@@ -625,7 +625,7 @@ static void flush_descriptor(journal_t *journal, | |||
625 | set_buffer_jwrite(bh); | 625 | set_buffer_jwrite(bh); |
626 | BUFFER_TRACE(bh, "write"); | 626 | BUFFER_TRACE(bh, "write"); |
627 | set_buffer_dirty(bh); | 627 | set_buffer_dirty(bh); |
628 | ll_rw_block((write_op == WRITE) ? SWRITE : SWRITE_SYNC_PLUG, 1, &bh); | 628 | write_dirty_buffer(bh, write_op); |
629 | } | 629 | } |
630 | #endif | 630 | #endif |
631 | 631 | ||
diff --git a/fs/mbcache.c b/fs/mbcache.c index cf4e6cdfd15b..93444747237b 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -80,6 +80,7 @@ struct mb_cache { | |||
80 | struct list_head c_cache_list; | 80 | struct list_head c_cache_list; |
81 | const char *c_name; | 81 | const char *c_name; |
82 | atomic_t c_entry_count; | 82 | atomic_t c_entry_count; |
83 | int c_max_entries; | ||
83 | int c_bucket_bits; | 84 | int c_bucket_bits; |
84 | struct kmem_cache *c_entry_cache; | 85 | struct kmem_cache *c_entry_cache; |
85 | struct list_head *c_block_hash; | 86 | struct list_head *c_block_hash; |
@@ -243,6 +244,12 @@ mb_cache_create(const char *name, int bucket_bits) | |||
243 | if (!cache->c_entry_cache) | 244 | if (!cache->c_entry_cache) |
244 | goto fail2; | 245 | goto fail2; |
245 | 246 | ||
247 | /* | ||
248 | * Set an upper limit on the number of cache entries so that the hash | ||
249 | * chains won't grow too long. | ||
250 | */ | ||
251 | cache->c_max_entries = bucket_count << 4; | ||
252 | |||
246 | spin_lock(&mb_cache_spinlock); | 253 | spin_lock(&mb_cache_spinlock); |
247 | list_add(&cache->c_cache_list, &mb_cache_list); | 254 | list_add(&cache->c_cache_list, &mb_cache_list); |
248 | spin_unlock(&mb_cache_spinlock); | 255 | spin_unlock(&mb_cache_spinlock); |
@@ -333,7 +340,6 @@ mb_cache_destroy(struct mb_cache *cache) | |||
333 | kfree(cache); | 340 | kfree(cache); |
334 | } | 341 | } |
335 | 342 | ||
336 | |||
337 | /* | 343 | /* |
338 | * mb_cache_entry_alloc() | 344 | * mb_cache_entry_alloc() |
339 | * | 345 | * |
@@ -345,17 +351,29 @@ mb_cache_destroy(struct mb_cache *cache) | |||
345 | struct mb_cache_entry * | 351 | struct mb_cache_entry * |
346 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) | 352 | mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) |
347 | { | 353 | { |
348 | struct mb_cache_entry *ce; | 354 | struct mb_cache_entry *ce = NULL; |
349 | 355 | ||
350 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); | 356 | if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) { |
351 | if (ce) { | 357 | spin_lock(&mb_cache_spinlock); |
358 | if (!list_empty(&mb_cache_lru_list)) { | ||
359 | ce = list_entry(mb_cache_lru_list.next, | ||
360 | struct mb_cache_entry, e_lru_list); | ||
361 | list_del_init(&ce->e_lru_list); | ||
362 | __mb_cache_entry_unhash(ce); | ||
363 | } | ||
364 | spin_unlock(&mb_cache_spinlock); | ||
365 | } | ||
366 | if (!ce) { | ||
367 | ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); | ||
368 | if (!ce) | ||
369 | return NULL; | ||
352 | atomic_inc(&cache->c_entry_count); | 370 | atomic_inc(&cache->c_entry_count); |
353 | INIT_LIST_HEAD(&ce->e_lru_list); | 371 | INIT_LIST_HEAD(&ce->e_lru_list); |
354 | INIT_LIST_HEAD(&ce->e_block_list); | 372 | INIT_LIST_HEAD(&ce->e_block_list); |
355 | ce->e_cache = cache; | 373 | ce->e_cache = cache; |
356 | ce->e_used = 1 + MB_CACHE_WRITER; | ||
357 | ce->e_queued = 0; | 374 | ce->e_queued = 0; |
358 | } | 375 | } |
376 | ce->e_used = 1 + MB_CACHE_WRITER; | ||
359 | return ce; | 377 | return ce; |
360 | } | 378 | } |
361 | 379 | ||
diff --git a/fs/namei.c b/fs/namei.c index 17ea76bf2fbe..24896e833565 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -595,15 +595,16 @@ int follow_up(struct path *path) | |||
595 | { | 595 | { |
596 | struct vfsmount *parent; | 596 | struct vfsmount *parent; |
597 | struct dentry *mountpoint; | 597 | struct dentry *mountpoint; |
598 | spin_lock(&vfsmount_lock); | 598 | |
599 | br_read_lock(vfsmount_lock); | ||
599 | parent = path->mnt->mnt_parent; | 600 | parent = path->mnt->mnt_parent; |
600 | if (parent == path->mnt) { | 601 | if (parent == path->mnt) { |
601 | spin_unlock(&vfsmount_lock); | 602 | br_read_unlock(vfsmount_lock); |
602 | return 0; | 603 | return 0; |
603 | } | 604 | } |
604 | mntget(parent); | 605 | mntget(parent); |
605 | mountpoint = dget(path->mnt->mnt_mountpoint); | 606 | mountpoint = dget(path->mnt->mnt_mountpoint); |
606 | spin_unlock(&vfsmount_lock); | 607 | br_read_unlock(vfsmount_lock); |
607 | dput(path->dentry); | 608 | dput(path->dentry); |
608 | path->dentry = mountpoint; | 609 | path->dentry = mountpoint; |
609 | mntput(path->mnt); | 610 | mntput(path->mnt); |
@@ -686,6 +687,35 @@ static __always_inline void follow_dotdot(struct nameidata *nd) | |||
686 | } | 687 | } |
687 | 688 | ||
688 | /* | 689 | /* |
690 | * Allocate a dentry with name and parent, and perform a parent | ||
691 | * directory ->lookup on it. Returns the new dentry, or ERR_PTR | ||
692 | * on error. parent->d_inode->i_mutex must be held. d_lookup must | ||
693 | * have verified that no child exists while under i_mutex. | ||
694 | */ | ||
695 | static struct dentry *d_alloc_and_lookup(struct dentry *parent, | ||
696 | struct qstr *name, struct nameidata *nd) | ||
697 | { | ||
698 | struct inode *inode = parent->d_inode; | ||
699 | struct dentry *dentry; | ||
700 | struct dentry *old; | ||
701 | |||
702 | /* Don't create child dentry for a dead directory. */ | ||
703 | if (unlikely(IS_DEADDIR(inode))) | ||
704 | return ERR_PTR(-ENOENT); | ||
705 | |||
706 | dentry = d_alloc(parent, name); | ||
707 | if (unlikely(!dentry)) | ||
708 | return ERR_PTR(-ENOMEM); | ||
709 | |||
710 | old = inode->i_op->lookup(inode, dentry, nd); | ||
711 | if (unlikely(old)) { | ||
712 | dput(dentry); | ||
713 | dentry = old; | ||
714 | } | ||
715 | return dentry; | ||
716 | } | ||
717 | |||
718 | /* | ||
689 | * It's more convoluted than I'd like it to be, but... it's still fairly | 719 | * It's more convoluted than I'd like it to be, but... it's still fairly |
690 | * small and for now I'd prefer to have fast path as straight as possible. | 720 | * small and for now I'd prefer to have fast path as straight as possible. |
691 | * It _is_ time-critical. | 721 | * It _is_ time-critical. |
@@ -706,9 +736,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, | |||
706 | return err; | 736 | return err; |
707 | } | 737 | } |
708 | 738 | ||
739 | /* | ||
740 | * Rename seqlock is not required here because in the off chance | ||
741 | * of a false negative due to a concurrent rename, we're going to | ||
742 | * do the non-racy lookup, below. | ||
743 | */ | ||
709 | dentry = __d_lookup(nd->path.dentry, name); | 744 | dentry = __d_lookup(nd->path.dentry, name); |
710 | if (!dentry) | 745 | if (!dentry) |
711 | goto need_lookup; | 746 | goto need_lookup; |
747 | found: | ||
712 | if (dentry->d_op && dentry->d_op->d_revalidate) | 748 | if (dentry->d_op && dentry->d_op->d_revalidate) |
713 | goto need_revalidate; | 749 | goto need_revalidate; |
714 | done: | 750 | done: |
@@ -724,56 +760,28 @@ need_lookup: | |||
724 | mutex_lock(&dir->i_mutex); | 760 | mutex_lock(&dir->i_mutex); |
725 | /* | 761 | /* |
726 | * First re-do the cached lookup just in case it was created | 762 | * First re-do the cached lookup just in case it was created |
727 | * while we waited for the directory semaphore.. | 763 | * while we waited for the directory semaphore, or the first |
764 | * lookup failed due to an unrelated rename. | ||
728 | * | 765 | * |
729 | * FIXME! This could use version numbering or similar to | 766 | * This could use version numbering or similar to avoid unnecessary |
730 | * avoid unnecessary cache lookups. | 767 | * cache lookups, but then we'd have to do the first lookup in the |
731 | * | 768 | * non-racy way. However in the common case here, everything should |
732 | * The "dcache_lock" is purely to protect the RCU list walker | 769 | * be hot in cache, so would it be a big win? |
733 | * from concurrent renames at this point (we mustn't get false | ||
734 | * negatives from the RCU list walk here, unlike the optimistic | ||
735 | * fast walk). | ||
736 | * | ||
737 | * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup | ||
738 | */ | 770 | */ |
739 | dentry = d_lookup(parent, name); | 771 | dentry = d_lookup(parent, name); |
740 | if (!dentry) { | 772 | if (likely(!dentry)) { |
741 | struct dentry *new; | 773 | dentry = d_alloc_and_lookup(parent, name, nd); |
742 | |||
743 | /* Don't create child dentry for a dead directory. */ | ||
744 | dentry = ERR_PTR(-ENOENT); | ||
745 | if (IS_DEADDIR(dir)) | ||
746 | goto out_unlock; | ||
747 | |||
748 | new = d_alloc(parent, name); | ||
749 | dentry = ERR_PTR(-ENOMEM); | ||
750 | if (new) { | ||
751 | dentry = dir->i_op->lookup(dir, new, nd); | ||
752 | if (dentry) | ||
753 | dput(new); | ||
754 | else | ||
755 | dentry = new; | ||
756 | } | ||
757 | out_unlock: | ||
758 | mutex_unlock(&dir->i_mutex); | 774 | mutex_unlock(&dir->i_mutex); |
759 | if (IS_ERR(dentry)) | 775 | if (IS_ERR(dentry)) |
760 | goto fail; | 776 | goto fail; |
761 | goto done; | 777 | goto done; |
762 | } | 778 | } |
763 | |||
764 | /* | 779 | /* |
765 | * Uhhuh! Nasty case: the cache was re-populated while | 780 | * Uhhuh! Nasty case: the cache was re-populated while |
766 | * we waited on the semaphore. Need to revalidate. | 781 | * we waited on the semaphore. Need to revalidate. |
767 | */ | 782 | */ |
768 | mutex_unlock(&dir->i_mutex); | 783 | mutex_unlock(&dir->i_mutex); |
769 | if (dentry->d_op && dentry->d_op->d_revalidate) { | 784 | goto found; |
770 | dentry = do_revalidate(dentry, nd); | ||
771 | if (!dentry) | ||
772 | dentry = ERR_PTR(-ENOENT); | ||
773 | } | ||
774 | if (IS_ERR(dentry)) | ||
775 | goto fail; | ||
776 | goto done; | ||
777 | 785 | ||
778 | need_revalidate: | 786 | need_revalidate: |
779 | dentry = do_revalidate(dentry, nd); | 787 | dentry = do_revalidate(dentry, nd); |
@@ -1130,35 +1138,18 @@ static struct dentry *__lookup_hash(struct qstr *name, | |||
1130 | goto out; | 1138 | goto out; |
1131 | } | 1139 | } |
1132 | 1140 | ||
1133 | dentry = __d_lookup(base, name); | 1141 | /* |
1134 | 1142 | * Don't bother with __d_lookup: callers are for creat as | |
1135 | /* lockess __d_lookup may fail due to concurrent d_move() | 1143 | * well as unlink, so a lot of the time it would cost |
1136 | * in some unrelated directory, so try with d_lookup | 1144 | * a double lookup. |
1137 | */ | 1145 | */ |
1138 | if (!dentry) | 1146 | dentry = d_lookup(base, name); |
1139 | dentry = d_lookup(base, name); | ||
1140 | 1147 | ||
1141 | if (dentry && dentry->d_op && dentry->d_op->d_revalidate) | 1148 | if (dentry && dentry->d_op && dentry->d_op->d_revalidate) |
1142 | dentry = do_revalidate(dentry, nd); | 1149 | dentry = do_revalidate(dentry, nd); |
1143 | 1150 | ||
1144 | if (!dentry) { | 1151 | if (!dentry) |
1145 | struct dentry *new; | 1152 | dentry = d_alloc_and_lookup(base, name, nd); |
1146 | |||
1147 | /* Don't create child dentry for a dead directory. */ | ||
1148 | dentry = ERR_PTR(-ENOENT); | ||
1149 | if (IS_DEADDIR(inode)) | ||
1150 | goto out; | ||
1151 | |||
1152 | new = d_alloc(base, name); | ||
1153 | dentry = ERR_PTR(-ENOMEM); | ||
1154 | if (!new) | ||
1155 | goto out; | ||
1156 | dentry = inode->i_op->lookup(inode, new, nd); | ||
1157 | if (!dentry) | ||
1158 | dentry = new; | ||
1159 | else | ||
1160 | dput(new); | ||
1161 | } | ||
1162 | out: | 1153 | out: |
1163 | return dentry; | 1154 | return dentry; |
1164 | } | 1155 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 2e10cb19c5b0..de402eb6eafb 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/syscalls.h> | 11 | #include <linux/syscalls.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/percpu.h> | ||
14 | #include <linux/smp_lock.h> | 16 | #include <linux/smp_lock.h> |
15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
@@ -38,12 +40,10 @@ | |||
38 | #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) | 40 | #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) |
39 | #define HASH_SIZE (1UL << HASH_SHIFT) | 41 | #define HASH_SIZE (1UL << HASH_SHIFT) |
40 | 42 | ||
41 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ | ||
42 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); | ||
43 | |||
44 | static int event; | 43 | static int event; |
45 | static DEFINE_IDA(mnt_id_ida); | 44 | static DEFINE_IDA(mnt_id_ida); |
46 | static DEFINE_IDA(mnt_group_ida); | 45 | static DEFINE_IDA(mnt_group_ida); |
46 | static DEFINE_SPINLOCK(mnt_id_lock); | ||
47 | static int mnt_id_start = 0; | 47 | static int mnt_id_start = 0; |
48 | static int mnt_group_start = 1; | 48 | static int mnt_group_start = 1; |
49 | 49 | ||
@@ -55,6 +55,16 @@ static struct rw_semaphore namespace_sem; | |||
55 | struct kobject *fs_kobj; | 55 | struct kobject *fs_kobj; |
56 | EXPORT_SYMBOL_GPL(fs_kobj); | 56 | EXPORT_SYMBOL_GPL(fs_kobj); |
57 | 57 | ||
58 | /* | ||
59 | * vfsmount lock may be taken for read to prevent changes to the | ||
60 | * vfsmount hash, ie. during mountpoint lookups or walking back | ||
61 | * up the tree. | ||
62 | * | ||
63 | * It should be taken for write in all cases where the vfsmount | ||
64 | * tree or hash is modified or when a vfsmount structure is modified. | ||
65 | */ | ||
66 | DEFINE_BRLOCK(vfsmount_lock); | ||
67 | |||
58 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | 68 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) |
59 | { | 69 | { |
60 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); | 70 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
@@ -65,18 +75,21 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | |||
65 | 75 | ||
66 | #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) | 76 | #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) |
67 | 77 | ||
68 | /* allocation is serialized by namespace_sem */ | 78 | /* |
79 | * allocation is serialized by namespace_sem, but we need the spinlock to | ||
80 | * serialize with freeing. | ||
81 | */ | ||
69 | static int mnt_alloc_id(struct vfsmount *mnt) | 82 | static int mnt_alloc_id(struct vfsmount *mnt) |
70 | { | 83 | { |
71 | int res; | 84 | int res; |
72 | 85 | ||
73 | retry: | 86 | retry: |
74 | ida_pre_get(&mnt_id_ida, GFP_KERNEL); | 87 | ida_pre_get(&mnt_id_ida, GFP_KERNEL); |
75 | spin_lock(&vfsmount_lock); | 88 | spin_lock(&mnt_id_lock); |
76 | res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); | 89 | res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); |
77 | if (!res) | 90 | if (!res) |
78 | mnt_id_start = mnt->mnt_id + 1; | 91 | mnt_id_start = mnt->mnt_id + 1; |
79 | spin_unlock(&vfsmount_lock); | 92 | spin_unlock(&mnt_id_lock); |
80 | if (res == -EAGAIN) | 93 | if (res == -EAGAIN) |
81 | goto retry; | 94 | goto retry; |
82 | 95 | ||
@@ -86,11 +99,11 @@ retry: | |||
86 | static void mnt_free_id(struct vfsmount *mnt) | 99 | static void mnt_free_id(struct vfsmount *mnt) |
87 | { | 100 | { |
88 | int id = mnt->mnt_id; | 101 | int id = mnt->mnt_id; |
89 | spin_lock(&vfsmount_lock); | 102 | spin_lock(&mnt_id_lock); |
90 | ida_remove(&mnt_id_ida, id); | 103 | ida_remove(&mnt_id_ida, id); |
91 | if (mnt_id_start > id) | 104 | if (mnt_id_start > id) |
92 | mnt_id_start = id; | 105 | mnt_id_start = id; |
93 | spin_unlock(&vfsmount_lock); | 106 | spin_unlock(&mnt_id_lock); |
94 | } | 107 | } |
95 | 108 | ||
96 | /* | 109 | /* |
@@ -348,7 +361,7 @@ static int mnt_make_readonly(struct vfsmount *mnt) | |||
348 | { | 361 | { |
349 | int ret = 0; | 362 | int ret = 0; |
350 | 363 | ||
351 | spin_lock(&vfsmount_lock); | 364 | br_write_lock(vfsmount_lock); |
352 | mnt->mnt_flags |= MNT_WRITE_HOLD; | 365 | mnt->mnt_flags |= MNT_WRITE_HOLD; |
353 | /* | 366 | /* |
354 | * After storing MNT_WRITE_HOLD, we'll read the counters. This store | 367 | * After storing MNT_WRITE_HOLD, we'll read the counters. This store |
@@ -382,15 +395,15 @@ static int mnt_make_readonly(struct vfsmount *mnt) | |||
382 | */ | 395 | */ |
383 | smp_wmb(); | 396 | smp_wmb(); |
384 | mnt->mnt_flags &= ~MNT_WRITE_HOLD; | 397 | mnt->mnt_flags &= ~MNT_WRITE_HOLD; |
385 | spin_unlock(&vfsmount_lock); | 398 | br_write_unlock(vfsmount_lock); |
386 | return ret; | 399 | return ret; |
387 | } | 400 | } |
388 | 401 | ||
389 | static void __mnt_unmake_readonly(struct vfsmount *mnt) | 402 | static void __mnt_unmake_readonly(struct vfsmount *mnt) |
390 | { | 403 | { |
391 | spin_lock(&vfsmount_lock); | 404 | br_write_lock(vfsmount_lock); |
392 | mnt->mnt_flags &= ~MNT_READONLY; | 405 | mnt->mnt_flags &= ~MNT_READONLY; |
393 | spin_unlock(&vfsmount_lock); | 406 | br_write_unlock(vfsmount_lock); |
394 | } | 407 | } |
395 | 408 | ||
396 | void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) | 409 | void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) |
@@ -414,6 +427,7 @@ void free_vfsmnt(struct vfsmount *mnt) | |||
414 | /* | 427 | /* |
415 | * find the first or last mount at @dentry on vfsmount @mnt depending on | 428 | * find the first or last mount at @dentry on vfsmount @mnt depending on |
416 | * @dir. If @dir is set return the first mount else return the last mount. | 429 | * @dir. If @dir is set return the first mount else return the last mount. |
430 | * vfsmount_lock must be held for read or write. | ||
417 | */ | 431 | */ |
418 | struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, | 432 | struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, |
419 | int dir) | 433 | int dir) |
@@ -443,10 +457,11 @@ struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, | |||
443 | struct vfsmount *lookup_mnt(struct path *path) | 457 | struct vfsmount *lookup_mnt(struct path *path) |
444 | { | 458 | { |
445 | struct vfsmount *child_mnt; | 459 | struct vfsmount *child_mnt; |
446 | spin_lock(&vfsmount_lock); | 460 | |
461 | br_read_lock(vfsmount_lock); | ||
447 | if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1))) | 462 | if ((child_mnt = __lookup_mnt(path->mnt, path->dentry, 1))) |
448 | mntget(child_mnt); | 463 | mntget(child_mnt); |
449 | spin_unlock(&vfsmount_lock); | 464 | br_read_unlock(vfsmount_lock); |
450 | return child_mnt; | 465 | return child_mnt; |
451 | } | 466 | } |
452 | 467 | ||
@@ -455,6 +470,9 @@ static inline int check_mnt(struct vfsmount *mnt) | |||
455 | return mnt->mnt_ns == current->nsproxy->mnt_ns; | 470 | return mnt->mnt_ns == current->nsproxy->mnt_ns; |
456 | } | 471 | } |
457 | 472 | ||
473 | /* | ||
474 | * vfsmount lock must be held for write | ||
475 | */ | ||
458 | static void touch_mnt_namespace(struct mnt_namespace *ns) | 476 | static void touch_mnt_namespace(struct mnt_namespace *ns) |
459 | { | 477 | { |
460 | if (ns) { | 478 | if (ns) { |
@@ -463,6 +481,9 @@ static void touch_mnt_namespace(struct mnt_namespace *ns) | |||
463 | } | 481 | } |
464 | } | 482 | } |
465 | 483 | ||
484 | /* | ||
485 | * vfsmount lock must be held for write | ||
486 | */ | ||
466 | static void __touch_mnt_namespace(struct mnt_namespace *ns) | 487 | static void __touch_mnt_namespace(struct mnt_namespace *ns) |
467 | { | 488 | { |
468 | if (ns && ns->event != event) { | 489 | if (ns && ns->event != event) { |
@@ -471,6 +492,9 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) | |||
471 | } | 492 | } |
472 | } | 493 | } |
473 | 494 | ||
495 | /* | ||
496 | * vfsmount lock must be held for write | ||
497 | */ | ||
474 | static void detach_mnt(struct vfsmount *mnt, struct path *old_path) | 498 | static void detach_mnt(struct vfsmount *mnt, struct path *old_path) |
475 | { | 499 | { |
476 | old_path->dentry = mnt->mnt_mountpoint; | 500 | old_path->dentry = mnt->mnt_mountpoint; |
@@ -482,6 +506,9 @@ static void detach_mnt(struct vfsmount *mnt, struct path *old_path) | |||
482 | old_path->dentry->d_mounted--; | 506 | old_path->dentry->d_mounted--; |
483 | } | 507 | } |
484 | 508 | ||
509 | /* | ||
510 | * vfsmount lock must be held for write | ||
511 | */ | ||
485 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | 512 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, |
486 | struct vfsmount *child_mnt) | 513 | struct vfsmount *child_mnt) |
487 | { | 514 | { |
@@ -490,6 +517,9 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | |||
490 | dentry->d_mounted++; | 517 | dentry->d_mounted++; |
491 | } | 518 | } |
492 | 519 | ||
520 | /* | ||
521 | * vfsmount lock must be held for write | ||
522 | */ | ||
493 | static void attach_mnt(struct vfsmount *mnt, struct path *path) | 523 | static void attach_mnt(struct vfsmount *mnt, struct path *path) |
494 | { | 524 | { |
495 | mnt_set_mountpoint(path->mnt, path->dentry, mnt); | 525 | mnt_set_mountpoint(path->mnt, path->dentry, mnt); |
@@ -499,7 +529,7 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path) | |||
499 | } | 529 | } |
500 | 530 | ||
501 | /* | 531 | /* |
502 | * the caller must hold vfsmount_lock | 532 | * vfsmount lock must be held for write |
503 | */ | 533 | */ |
504 | static void commit_tree(struct vfsmount *mnt) | 534 | static void commit_tree(struct vfsmount *mnt) |
505 | { | 535 | { |
@@ -623,39 +653,43 @@ static inline void __mntput(struct vfsmount *mnt) | |||
623 | void mntput_no_expire(struct vfsmount *mnt) | 653 | void mntput_no_expire(struct vfsmount *mnt) |
624 | { | 654 | { |
625 | repeat: | 655 | repeat: |
626 | if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) { | 656 | if (atomic_add_unless(&mnt->mnt_count, -1, 1)) |
627 | if (likely(!mnt->mnt_pinned)) { | 657 | return; |
628 | spin_unlock(&vfsmount_lock); | 658 | br_write_lock(vfsmount_lock); |
629 | __mntput(mnt); | 659 | if (!atomic_dec_and_test(&mnt->mnt_count)) { |
630 | return; | 660 | br_write_unlock(vfsmount_lock); |
631 | } | 661 | return; |
632 | atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); | 662 | } |
633 | mnt->mnt_pinned = 0; | 663 | if (likely(!mnt->mnt_pinned)) { |
634 | spin_unlock(&vfsmount_lock); | 664 | br_write_unlock(vfsmount_lock); |
635 | acct_auto_close_mnt(mnt); | 665 | __mntput(mnt); |
636 | goto repeat; | 666 | return; |
637 | } | 667 | } |
668 | atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count); | ||
669 | mnt->mnt_pinned = 0; | ||
670 | br_write_unlock(vfsmount_lock); | ||
671 | acct_auto_close_mnt(mnt); | ||
672 | goto repeat; | ||
638 | } | 673 | } |
639 | |||
640 | EXPORT_SYMBOL(mntput_no_expire); | 674 | EXPORT_SYMBOL(mntput_no_expire); |
641 | 675 | ||
642 | void mnt_pin(struct vfsmount *mnt) | 676 | void mnt_pin(struct vfsmount *mnt) |
643 | { | 677 | { |
644 | spin_lock(&vfsmount_lock); | 678 | br_write_lock(vfsmount_lock); |
645 | mnt->mnt_pinned++; | 679 | mnt->mnt_pinned++; |
646 | spin_unlock(&vfsmount_lock); | 680 | br_write_unlock(vfsmount_lock); |
647 | } | 681 | } |
648 | 682 | ||
649 | EXPORT_SYMBOL(mnt_pin); | 683 | EXPORT_SYMBOL(mnt_pin); |
650 | 684 | ||
651 | void mnt_unpin(struct vfsmount *mnt) | 685 | void mnt_unpin(struct vfsmount *mnt) |
652 | { | 686 | { |
653 | spin_lock(&vfsmount_lock); | 687 | br_write_lock(vfsmount_lock); |
654 | if (mnt->mnt_pinned) { | 688 | if (mnt->mnt_pinned) { |
655 | atomic_inc(&mnt->mnt_count); | 689 | atomic_inc(&mnt->mnt_count); |
656 | mnt->mnt_pinned--; | 690 | mnt->mnt_pinned--; |
657 | } | 691 | } |
658 | spin_unlock(&vfsmount_lock); | 692 | br_write_unlock(vfsmount_lock); |
659 | } | 693 | } |
660 | 694 | ||
661 | EXPORT_SYMBOL(mnt_unpin); | 695 | EXPORT_SYMBOL(mnt_unpin); |
@@ -746,12 +780,12 @@ int mnt_had_events(struct proc_mounts *p) | |||
746 | struct mnt_namespace *ns = p->ns; | 780 | struct mnt_namespace *ns = p->ns; |
747 | int res = 0; | 781 | int res = 0; |
748 | 782 | ||
749 | spin_lock(&vfsmount_lock); | 783 | br_read_lock(vfsmount_lock); |
750 | if (p->event != ns->event) { | 784 | if (p->event != ns->event) { |
751 | p->event = ns->event; | 785 | p->event = ns->event; |
752 | res = 1; | 786 | res = 1; |
753 | } | 787 | } |
754 | spin_unlock(&vfsmount_lock); | 788 | br_read_unlock(vfsmount_lock); |
755 | 789 | ||
756 | return res; | 790 | return res; |
757 | } | 791 | } |
@@ -952,12 +986,12 @@ int may_umount_tree(struct vfsmount *mnt) | |||
952 | int minimum_refs = 0; | 986 | int minimum_refs = 0; |
953 | struct vfsmount *p; | 987 | struct vfsmount *p; |
954 | 988 | ||
955 | spin_lock(&vfsmount_lock); | 989 | br_read_lock(vfsmount_lock); |
956 | for (p = mnt; p; p = next_mnt(p, mnt)) { | 990 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
957 | actual_refs += atomic_read(&p->mnt_count); | 991 | actual_refs += atomic_read(&p->mnt_count); |
958 | minimum_refs += 2; | 992 | minimum_refs += 2; |
959 | } | 993 | } |
960 | spin_unlock(&vfsmount_lock); | 994 | br_read_unlock(vfsmount_lock); |
961 | 995 | ||
962 | if (actual_refs > minimum_refs) | 996 | if (actual_refs > minimum_refs) |
963 | return 0; | 997 | return 0; |
@@ -984,10 +1018,10 @@ int may_umount(struct vfsmount *mnt) | |||
984 | { | 1018 | { |
985 | int ret = 1; | 1019 | int ret = 1; |
986 | down_read(&namespace_sem); | 1020 | down_read(&namespace_sem); |
987 | spin_lock(&vfsmount_lock); | 1021 | br_read_lock(vfsmount_lock); |
988 | if (propagate_mount_busy(mnt, 2)) | 1022 | if (propagate_mount_busy(mnt, 2)) |
989 | ret = 0; | 1023 | ret = 0; |
990 | spin_unlock(&vfsmount_lock); | 1024 | br_read_unlock(vfsmount_lock); |
991 | up_read(&namespace_sem); | 1025 | up_read(&namespace_sem); |
992 | return ret; | 1026 | return ret; |
993 | } | 1027 | } |
@@ -1003,13 +1037,14 @@ void release_mounts(struct list_head *head) | |||
1003 | if (mnt->mnt_parent != mnt) { | 1037 | if (mnt->mnt_parent != mnt) { |
1004 | struct dentry *dentry; | 1038 | struct dentry *dentry; |
1005 | struct vfsmount *m; | 1039 | struct vfsmount *m; |
1006 | spin_lock(&vfsmount_lock); | 1040 | |
1041 | br_write_lock(vfsmount_lock); | ||
1007 | dentry = mnt->mnt_mountpoint; | 1042 | dentry = mnt->mnt_mountpoint; |
1008 | m = mnt->mnt_parent; | 1043 | m = mnt->mnt_parent; |
1009 | mnt->mnt_mountpoint = mnt->mnt_root; | 1044 | mnt->mnt_mountpoint = mnt->mnt_root; |
1010 | mnt->mnt_parent = mnt; | 1045 | mnt->mnt_parent = mnt; |
1011 | m->mnt_ghosts--; | 1046 | m->mnt_ghosts--; |
1012 | spin_unlock(&vfsmount_lock); | 1047 | br_write_unlock(vfsmount_lock); |
1013 | dput(dentry); | 1048 | dput(dentry); |
1014 | mntput(m); | 1049 | mntput(m); |
1015 | } | 1050 | } |
@@ -1017,6 +1052,10 @@ void release_mounts(struct list_head *head) | |||
1017 | } | 1052 | } |
1018 | } | 1053 | } |
1019 | 1054 | ||
1055 | /* | ||
1056 | * vfsmount lock must be held for write | ||
1057 | * namespace_sem must be held for write | ||
1058 | */ | ||
1020 | void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) | 1059 | void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) |
1021 | { | 1060 | { |
1022 | struct vfsmount *p; | 1061 | struct vfsmount *p; |
@@ -1107,7 +1146,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
1107 | } | 1146 | } |
1108 | 1147 | ||
1109 | down_write(&namespace_sem); | 1148 | down_write(&namespace_sem); |
1110 | spin_lock(&vfsmount_lock); | 1149 | br_write_lock(vfsmount_lock); |
1111 | event++; | 1150 | event++; |
1112 | 1151 | ||
1113 | if (!(flags & MNT_DETACH)) | 1152 | if (!(flags & MNT_DETACH)) |
@@ -1119,7 +1158,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
1119 | umount_tree(mnt, 1, &umount_list); | 1158 | umount_tree(mnt, 1, &umount_list); |
1120 | retval = 0; | 1159 | retval = 0; |
1121 | } | 1160 | } |
1122 | spin_unlock(&vfsmount_lock); | 1161 | br_write_unlock(vfsmount_lock); |
1123 | up_write(&namespace_sem); | 1162 | up_write(&namespace_sem); |
1124 | release_mounts(&umount_list); | 1163 | release_mounts(&umount_list); |
1125 | return retval; | 1164 | return retval; |
@@ -1231,19 +1270,19 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, | |||
1231 | q = clone_mnt(p, p->mnt_root, flag); | 1270 | q = clone_mnt(p, p->mnt_root, flag); |
1232 | if (!q) | 1271 | if (!q) |
1233 | goto Enomem; | 1272 | goto Enomem; |
1234 | spin_lock(&vfsmount_lock); | 1273 | br_write_lock(vfsmount_lock); |
1235 | list_add_tail(&q->mnt_list, &res->mnt_list); | 1274 | list_add_tail(&q->mnt_list, &res->mnt_list); |
1236 | attach_mnt(q, &path); | 1275 | attach_mnt(q, &path); |
1237 | spin_unlock(&vfsmount_lock); | 1276 | br_write_unlock(vfsmount_lock); |
1238 | } | 1277 | } |
1239 | } | 1278 | } |
1240 | return res; | 1279 | return res; |
1241 | Enomem: | 1280 | Enomem: |
1242 | if (res) { | 1281 | if (res) { |
1243 | LIST_HEAD(umount_list); | 1282 | LIST_HEAD(umount_list); |
1244 | spin_lock(&vfsmount_lock); | 1283 | br_write_lock(vfsmount_lock); |
1245 | umount_tree(res, 0, &umount_list); | 1284 | umount_tree(res, 0, &umount_list); |
1246 | spin_unlock(&vfsmount_lock); | 1285 | br_write_unlock(vfsmount_lock); |
1247 | release_mounts(&umount_list); | 1286 | release_mounts(&umount_list); |
1248 | } | 1287 | } |
1249 | return NULL; | 1288 | return NULL; |
@@ -1262,9 +1301,9 @@ void drop_collected_mounts(struct vfsmount *mnt) | |||
1262 | { | 1301 | { |
1263 | LIST_HEAD(umount_list); | 1302 | LIST_HEAD(umount_list); |
1264 | down_write(&namespace_sem); | 1303 | down_write(&namespace_sem); |
1265 | spin_lock(&vfsmount_lock); | 1304 | br_write_lock(vfsmount_lock); |
1266 | umount_tree(mnt, 0, &umount_list); | 1305 | umount_tree(mnt, 0, &umount_list); |
1267 | spin_unlock(&vfsmount_lock); | 1306 | br_write_unlock(vfsmount_lock); |
1268 | up_write(&namespace_sem); | 1307 | up_write(&namespace_sem); |
1269 | release_mounts(&umount_list); | 1308 | release_mounts(&umount_list); |
1270 | } | 1309 | } |
@@ -1392,7 +1431,7 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt, | |||
1392 | if (err) | 1431 | if (err) |
1393 | goto out_cleanup_ids; | 1432 | goto out_cleanup_ids; |
1394 | 1433 | ||
1395 | spin_lock(&vfsmount_lock); | 1434 | br_write_lock(vfsmount_lock); |
1396 | 1435 | ||
1397 | if (IS_MNT_SHARED(dest_mnt)) { | 1436 | if (IS_MNT_SHARED(dest_mnt)) { |
1398 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | 1437 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) |
@@ -1411,7 +1450,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt, | |||
1411 | list_del_init(&child->mnt_hash); | 1450 | list_del_init(&child->mnt_hash); |
1412 | commit_tree(child); | 1451 | commit_tree(child); |
1413 | } | 1452 | } |
1414 | spin_unlock(&vfsmount_lock); | 1453 | br_write_unlock(vfsmount_lock); |
1454 | |||
1415 | return 0; | 1455 | return 0; |
1416 | 1456 | ||
1417 | out_cleanup_ids: | 1457 | out_cleanup_ids: |
@@ -1466,10 +1506,10 @@ static int do_change_type(struct path *path, int flag) | |||
1466 | goto out_unlock; | 1506 | goto out_unlock; |
1467 | } | 1507 | } |
1468 | 1508 | ||
1469 | spin_lock(&vfsmount_lock); | 1509 | br_write_lock(vfsmount_lock); |
1470 | for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) | 1510 | for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) |
1471 | change_mnt_propagation(m, type); | 1511 | change_mnt_propagation(m, type); |
1472 | spin_unlock(&vfsmount_lock); | 1512 | br_write_unlock(vfsmount_lock); |
1473 | 1513 | ||
1474 | out_unlock: | 1514 | out_unlock: |
1475 | up_write(&namespace_sem); | 1515 | up_write(&namespace_sem); |
@@ -1513,9 +1553,10 @@ static int do_loopback(struct path *path, char *old_name, | |||
1513 | err = graft_tree(mnt, path); | 1553 | err = graft_tree(mnt, path); |
1514 | if (err) { | 1554 | if (err) { |
1515 | LIST_HEAD(umount_list); | 1555 | LIST_HEAD(umount_list); |
1516 | spin_lock(&vfsmount_lock); | 1556 | |
1557 | br_write_lock(vfsmount_lock); | ||
1517 | umount_tree(mnt, 0, &umount_list); | 1558 | umount_tree(mnt, 0, &umount_list); |
1518 | spin_unlock(&vfsmount_lock); | 1559 | br_write_unlock(vfsmount_lock); |
1519 | release_mounts(&umount_list); | 1560 | release_mounts(&umount_list); |
1520 | } | 1561 | } |
1521 | 1562 | ||
@@ -1568,16 +1609,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags, | |||
1568 | else | 1609 | else |
1569 | err = do_remount_sb(sb, flags, data, 0); | 1610 | err = do_remount_sb(sb, flags, data, 0); |
1570 | if (!err) { | 1611 | if (!err) { |
1571 | spin_lock(&vfsmount_lock); | 1612 | br_write_lock(vfsmount_lock); |
1572 | mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK; | 1613 | mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK; |
1573 | path->mnt->mnt_flags = mnt_flags; | 1614 | path->mnt->mnt_flags = mnt_flags; |
1574 | spin_unlock(&vfsmount_lock); | 1615 | br_write_unlock(vfsmount_lock); |
1575 | } | 1616 | } |
1576 | up_write(&sb->s_umount); | 1617 | up_write(&sb->s_umount); |
1577 | if (!err) { | 1618 | if (!err) { |
1578 | spin_lock(&vfsmount_lock); | 1619 | br_write_lock(vfsmount_lock); |
1579 | touch_mnt_namespace(path->mnt->mnt_ns); | 1620 | touch_mnt_namespace(path->mnt->mnt_ns); |
1580 | spin_unlock(&vfsmount_lock); | 1621 | br_write_unlock(vfsmount_lock); |
1581 | } | 1622 | } |
1582 | return err; | 1623 | return err; |
1583 | } | 1624 | } |
@@ -1754,7 +1795,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
1754 | return; | 1795 | return; |
1755 | 1796 | ||
1756 | down_write(&namespace_sem); | 1797 | down_write(&namespace_sem); |
1757 | spin_lock(&vfsmount_lock); | 1798 | br_write_lock(vfsmount_lock); |
1758 | 1799 | ||
1759 | /* extract from the expiration list every vfsmount that matches the | 1800 | /* extract from the expiration list every vfsmount that matches the |
1760 | * following criteria: | 1801 | * following criteria: |
@@ -1773,7 +1814,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) | |||
1773 | touch_mnt_namespace(mnt->mnt_ns); | 1814 | touch_mnt_namespace(mnt->mnt_ns); |
1774 | umount_tree(mnt, 1, &umounts); | 1815 | umount_tree(mnt, 1, &umounts); |
1775 | } | 1816 | } |
1776 | spin_unlock(&vfsmount_lock); | 1817 | br_write_unlock(vfsmount_lock); |
1777 | up_write(&namespace_sem); | 1818 | up_write(&namespace_sem); |
1778 | 1819 | ||
1779 | release_mounts(&umounts); | 1820 | release_mounts(&umounts); |
@@ -1830,6 +1871,8 @@ resume: | |||
1830 | /* | 1871 | /* |
1831 | * process a list of expirable mountpoints with the intent of discarding any | 1872 | * process a list of expirable mountpoints with the intent of discarding any |
1832 | * submounts of a specific parent mountpoint | 1873 | * submounts of a specific parent mountpoint |
1874 | * | ||
1875 | * vfsmount_lock must be held for write | ||
1833 | */ | 1876 | */ |
1834 | static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) | 1877 | static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts) |
1835 | { | 1878 | { |
@@ -2048,9 +2091,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, | |||
2048 | kfree(new_ns); | 2091 | kfree(new_ns); |
2049 | return ERR_PTR(-ENOMEM); | 2092 | return ERR_PTR(-ENOMEM); |
2050 | } | 2093 | } |
2051 | spin_lock(&vfsmount_lock); | 2094 | br_write_lock(vfsmount_lock); |
2052 | list_add_tail(&new_ns->list, &new_ns->root->mnt_list); | 2095 | list_add_tail(&new_ns->list, &new_ns->root->mnt_list); |
2053 | spin_unlock(&vfsmount_lock); | 2096 | br_write_unlock(vfsmount_lock); |
2054 | 2097 | ||
2055 | /* | 2098 | /* |
2056 | * Second pass: switch the tsk->fs->* elements and mark new vfsmounts | 2099 | * Second pass: switch the tsk->fs->* elements and mark new vfsmounts |
@@ -2244,7 +2287,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, | |||
2244 | goto out2; /* not attached */ | 2287 | goto out2; /* not attached */ |
2245 | /* make sure we can reach put_old from new_root */ | 2288 | /* make sure we can reach put_old from new_root */ |
2246 | tmp = old.mnt; | 2289 | tmp = old.mnt; |
2247 | spin_lock(&vfsmount_lock); | 2290 | br_write_lock(vfsmount_lock); |
2248 | if (tmp != new.mnt) { | 2291 | if (tmp != new.mnt) { |
2249 | for (;;) { | 2292 | for (;;) { |
2250 | if (tmp->mnt_parent == tmp) | 2293 | if (tmp->mnt_parent == tmp) |
@@ -2264,7 +2307,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, | |||
2264 | /* mount new_root on / */ | 2307 | /* mount new_root on / */ |
2265 | attach_mnt(new.mnt, &root_parent); | 2308 | attach_mnt(new.mnt, &root_parent); |
2266 | touch_mnt_namespace(current->nsproxy->mnt_ns); | 2309 | touch_mnt_namespace(current->nsproxy->mnt_ns); |
2267 | spin_unlock(&vfsmount_lock); | 2310 | br_write_unlock(vfsmount_lock); |
2268 | chroot_fs_refs(&root, &new); | 2311 | chroot_fs_refs(&root, &new); |
2269 | error = 0; | 2312 | error = 0; |
2270 | path_put(&root_parent); | 2313 | path_put(&root_parent); |
@@ -2279,7 +2322,7 @@ out1: | |||
2279 | out0: | 2322 | out0: |
2280 | return error; | 2323 | return error; |
2281 | out3: | 2324 | out3: |
2282 | spin_unlock(&vfsmount_lock); | 2325 | br_write_unlock(vfsmount_lock); |
2283 | goto out2; | 2326 | goto out2; |
2284 | } | 2327 | } |
2285 | 2328 | ||
@@ -2326,6 +2369,8 @@ void __init mnt_init(void) | |||
2326 | for (u = 0; u < HASH_SIZE; u++) | 2369 | for (u = 0; u < HASH_SIZE; u++) |
2327 | INIT_LIST_HEAD(&mount_hashtable[u]); | 2370 | INIT_LIST_HEAD(&mount_hashtable[u]); |
2328 | 2371 | ||
2372 | br_lock_init(vfsmount_lock); | ||
2373 | |||
2329 | err = sysfs_init(); | 2374 | err = sysfs_init(); |
2330 | if (err) | 2375 | if (err) |
2331 | printk(KERN_WARNING "%s: sysfs_init error: %d\n", | 2376 | printk(KERN_WARNING "%s: sysfs_init error: %d\n", |
@@ -2344,9 +2389,9 @@ void put_mnt_ns(struct mnt_namespace *ns) | |||
2344 | if (!atomic_dec_and_test(&ns->count)) | 2389 | if (!atomic_dec_and_test(&ns->count)) |
2345 | return; | 2390 | return; |
2346 | down_write(&namespace_sem); | 2391 | down_write(&namespace_sem); |
2347 | spin_lock(&vfsmount_lock); | 2392 | br_write_lock(vfsmount_lock); |
2348 | umount_tree(ns->root, 0, &umount_list); | 2393 | umount_tree(ns->root, 0, &umount_list); |
2349 | spin_unlock(&vfsmount_lock); | 2394 | br_write_unlock(vfsmount_lock); |
2350 | up_write(&namespace_sem); | 2395 | up_write(&namespace_sem); |
2351 | release_mounts(&umount_list); | 2396 | release_mounts(&umount_list); |
2352 | kfree(ns); | 2397 | kfree(ns); |
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 26a510a7be09..6c2aad49d731 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig | |||
@@ -63,7 +63,6 @@ config NFS_V3_ACL | |||
63 | config NFS_V4 | 63 | config NFS_V4 |
64 | bool "NFS client support for NFS version 4" | 64 | bool "NFS client support for NFS version 4" |
65 | depends on NFS_FS | 65 | depends on NFS_FS |
66 | select RPCSEC_GSS_KRB5 | ||
67 | help | 66 | help |
68 | This option enables support for version 4 of the NFS protocol | 67 | This option enables support for version 4 of the NFS protocol |
69 | (RFC 3530) in the kernel's NFS client. | 68 | (RFC 3530) in the kernel's NFS client. |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 29539ceeb745..e257172d438c 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -140,6 +140,13 @@ nfs_opendir(struct inode *inode, struct file *filp) | |||
140 | 140 | ||
141 | /* Call generic open code in order to cache credentials */ | 141 | /* Call generic open code in order to cache credentials */ |
142 | res = nfs_open(inode, filp); | 142 | res = nfs_open(inode, filp); |
143 | if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) { | ||
144 | /* This is a mountpoint, so d_revalidate will never | ||
145 | * have been called, so we need to refresh the | ||
146 | * inode (for close-open consistency) ourselves. | ||
147 | */ | ||
148 | __nfs_revalidate_inode(NFS_SERVER(inode), inode); | ||
149 | } | ||
143 | return res; | 150 | return res; |
144 | } | 151 | } |
145 | 152 | ||
@@ -1103,7 +1110,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
1103 | if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) | 1110 | if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) |
1104 | goto no_open_dput; | 1111 | goto no_open_dput; |
1105 | /* We can't create new files, or truncate existing ones here */ | 1112 | /* We can't create new files, or truncate existing ones here */ |
1106 | openflags &= ~(O_CREAT|O_TRUNC); | 1113 | openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); |
1107 | 1114 | ||
1108 | /* | 1115 | /* |
1109 | * Note: we're not holding inode->i_mutex and so may be racing with | 1116 | * Note: we're not holding inode->i_mutex and so may be racing with |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 2d141a74ae82..eb51bd6201da 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -323,7 +323,7 @@ nfs_file_fsync(struct file *file, int datasync) | |||
323 | have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); | 323 | have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); |
324 | if (have_error) | 324 | if (have_error) |
325 | ret = xchg(&ctx->error, 0); | 325 | ret = xchg(&ctx->error, 0); |
326 | if (!ret) | 326 | if (!ret && status < 0) |
327 | ret = status; | 327 | ret = status; |
328 | return ret; | 328 | return ret; |
329 | } | 329 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7ffbb98ddec3..089da5b5d20a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2036,7 +2036,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
2036 | struct rpc_cred *cred; | 2036 | struct rpc_cred *cred; |
2037 | struct nfs4_state *state; | 2037 | struct nfs4_state *state; |
2038 | struct dentry *res; | 2038 | struct dentry *res; |
2039 | fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); | 2039 | int open_flags = nd->intent.open.flags; |
2040 | fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); | ||
2040 | 2041 | ||
2041 | if (nd->flags & LOOKUP_CREATE) { | 2042 | if (nd->flags & LOOKUP_CREATE) { |
2042 | attr.ia_mode = nd->intent.open.create_mode; | 2043 | attr.ia_mode = nd->intent.open.create_mode; |
@@ -2044,8 +2045,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
2044 | if (!IS_POSIXACL(dir)) | 2045 | if (!IS_POSIXACL(dir)) |
2045 | attr.ia_mode &= ~current_umask(); | 2046 | attr.ia_mode &= ~current_umask(); |
2046 | } else { | 2047 | } else { |
2048 | open_flags &= ~O_EXCL; | ||
2047 | attr.ia_valid = 0; | 2049 | attr.ia_valid = 0; |
2048 | BUG_ON(nd->intent.open.flags & O_CREAT); | 2050 | BUG_ON(open_flags & O_CREAT); |
2049 | } | 2051 | } |
2050 | 2052 | ||
2051 | cred = rpc_lookup_cred(); | 2053 | cred = rpc_lookup_cred(); |
@@ -2054,7 +2056,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
2054 | parent = dentry->d_parent; | 2056 | parent = dentry->d_parent; |
2055 | /* Protect against concurrent sillydeletes */ | 2057 | /* Protect against concurrent sillydeletes */ |
2056 | nfs_block_sillyrename(parent); | 2058 | nfs_block_sillyrename(parent); |
2057 | state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred); | 2059 | state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred); |
2058 | put_rpccred(cred); | 2060 | put_rpccred(cred); |
2059 | if (IS_ERR(state)) { | 2061 | if (IS_ERR(state)) { |
2060 | if (PTR_ERR(state) == -ENOENT) { | 2062 | if (PTR_ERR(state) == -ENOENT) { |
@@ -2273,8 +2275,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct | |||
2273 | out: | 2275 | out: |
2274 | if (page) | 2276 | if (page) |
2275 | __free_page(page); | 2277 | __free_page(page); |
2276 | if (locations) | 2278 | kfree(locations); |
2277 | kfree(locations); | ||
2278 | return status; | 2279 | return status; |
2279 | } | 2280 | } |
2280 | 2281 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ee26316ad1f4..ec3966e4706b 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -655,6 +655,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, | |||
655 | 655 | ||
656 | if (nfss->options & NFS_OPTION_FSCACHE) | 656 | if (nfss->options & NFS_OPTION_FSCACHE) |
657 | seq_printf(m, ",fsc"); | 657 | seq_printf(m, ",fsc"); |
658 | |||
659 | if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) { | ||
660 | if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) | ||
661 | seq_printf(m, ",lookupcache=none"); | ||
662 | else | ||
663 | seq_printf(m, ",lookupcache=pos"); | ||
664 | } | ||
658 | } | 665 | } |
659 | 666 | ||
660 | /* | 667 | /* |
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 503b9da159a3..95932f523aef 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig | |||
@@ -69,7 +69,6 @@ config NFSD_V4 | |||
69 | depends on NFSD && PROC_FS && EXPERIMENTAL | 69 | depends on NFSD && PROC_FS && EXPERIMENTAL |
70 | select NFSD_V3 | 70 | select NFSD_V3 |
71 | select FS_POSIX_ACL | 71 | select FS_POSIX_ACL |
72 | select RPCSEC_GSS_KRB5 | ||
73 | help | 72 | help |
74 | This option enables support in your system's NFS server for | 73 | This option enables support in your system's NFS server for |
75 | version 4 of the NFS protocol (RFC 3530). | 74 | version 4 of the NFS protocol (RFC 3530). |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1fa86b9df73b..922263393c76 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag) | |||
175 | { | 175 | { |
176 | struct the_nilfs *nilfs = sbi->s_nilfs; | 176 | struct the_nilfs *nilfs = sbi->s_nilfs; |
177 | int err; | 177 | int err; |
178 | int barrier_done = 0; | ||
179 | 178 | ||
180 | if (nilfs_test_opt(sbi, BARRIER)) { | ||
181 | set_buffer_ordered(nilfs->ns_sbh[0]); | ||
182 | barrier_done = 1; | ||
183 | } | ||
184 | retry: | 179 | retry: |
185 | set_buffer_dirty(nilfs->ns_sbh[0]); | 180 | set_buffer_dirty(nilfs->ns_sbh[0]); |
186 | err = sync_dirty_buffer(nilfs->ns_sbh[0]); | 181 | |
187 | if (err == -EOPNOTSUPP && barrier_done) { | 182 | if (nilfs_test_opt(sbi, BARRIER)) { |
188 | nilfs_warning(sbi->s_super, __func__, | 183 | err = __sync_dirty_buffer(nilfs->ns_sbh[0], |
189 | "barrier-based sync failed. " | 184 | WRITE_SYNC | WRITE_BARRIER); |
190 | "disabling barriers\n"); | 185 | if (err == -EOPNOTSUPP) { |
191 | nilfs_clear_opt(sbi, BARRIER); | 186 | nilfs_warning(sbi->s_super, __func__, |
192 | barrier_done = 0; | 187 | "barrier-based sync failed. " |
193 | clear_buffer_ordered(nilfs->ns_sbh[0]); | 188 | "disabling barriers\n"); |
194 | goto retry; | 189 | nilfs_clear_opt(sbi, BARRIER); |
190 | goto retry; | ||
191 | } | ||
192 | } else { | ||
193 | err = sync_dirty_buffer(nilfs->ns_sbh[0]); | ||
195 | } | 194 | } |
195 | |||
196 | if (unlikely(err)) { | 196 | if (unlikely(err)) { |
197 | printk(KERN_ERR | 197 | printk(KERN_ERR |
198 | "NILFS: unable to write superblock (err=%d)\n", err); | 198 | "NILFS: unable to write superblock (err=%d)\n", err); |
@@ -400,9 +400,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) | |||
400 | list_add(&sbi->s_list, &nilfs->ns_supers); | 400 | list_add(&sbi->s_list, &nilfs->ns_supers); |
401 | up_write(&nilfs->ns_super_sem); | 401 | up_write(&nilfs->ns_super_sem); |
402 | 402 | ||
403 | err = -ENOMEM; | ||
403 | sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size); | 404 | sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size); |
404 | if (!sbi->s_ifile) | 405 | if (!sbi->s_ifile) |
405 | return -ENOMEM; | 406 | goto delist; |
406 | 407 | ||
407 | down_read(&nilfs->ns_segctor_sem); | 408 | down_read(&nilfs->ns_segctor_sem); |
408 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, | 409 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, |
@@ -433,6 +434,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) | |||
433 | nilfs_mdt_destroy(sbi->s_ifile); | 434 | nilfs_mdt_destroy(sbi->s_ifile); |
434 | sbi->s_ifile = NULL; | 435 | sbi->s_ifile = NULL; |
435 | 436 | ||
437 | delist: | ||
436 | down_write(&nilfs->ns_super_sem); | 438 | down_write(&nilfs->ns_super_sem); |
437 | list_del_init(&sbi->s_list); | 439 | list_del_init(&sbi->s_list); |
438 | up_write(&nilfs->ns_super_sem); | 440 | up_write(&nilfs->ns_super_sem); |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 37de1f062d81..4317f177ea7c 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
@@ -608,11 +608,11 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, | |||
608 | return -EINVAL; | 608 | return -EINVAL; |
609 | } | 609 | } |
610 | 610 | ||
611 | if (swp) { | 611 | if (!valid[!swp]) |
612 | printk(KERN_WARNING "NILFS warning: broken superblock. " | 612 | printk(KERN_WARNING "NILFS warning: broken superblock. " |
613 | "using spare superblock.\n"); | 613 | "using spare superblock.\n"); |
614 | if (swp) | ||
614 | nilfs_swap_super_block(nilfs); | 615 | nilfs_swap_super_block(nilfs); |
615 | } | ||
616 | 616 | ||
617 | nilfs->ns_sbwcount = 0; | 617 | nilfs->ns_sbwcount = 0; |
618 | nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); | 618 | nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); |
@@ -775,6 +775,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, | |||
775 | start * sects_per_block, | 775 | start * sects_per_block, |
776 | nblocks * sects_per_block, | 776 | nblocks * sects_per_block, |
777 | GFP_NOFS, | 777 | GFP_NOFS, |
778 | BLKDEV_IFL_WAIT | | ||
778 | BLKDEV_IFL_BARRIER); | 779 | BLKDEV_IFL_BARRIER); |
779 | if (ret < 0) | 780 | if (ret < 0) |
780 | return ret; | 781 | return ret; |
@@ -785,7 +786,8 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, | |||
785 | ret = blkdev_issue_discard(nilfs->ns_bdev, | 786 | ret = blkdev_issue_discard(nilfs->ns_bdev, |
786 | start * sects_per_block, | 787 | start * sects_per_block, |
787 | nblocks * sects_per_block, | 788 | nblocks * sects_per_block, |
788 | GFP_NOFS, BLKDEV_IFL_BARRIER); | 789 | GFP_NOFS, |
790 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
789 | return ret; | 791 | return ret; |
790 | } | 792 | } |
791 | 793 | ||
@@ -675,7 +675,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, | |||
675 | f->f_path.mnt = mnt; | 675 | f->f_path.mnt = mnt; |
676 | f->f_pos = 0; | 676 | f->f_pos = 0; |
677 | f->f_op = fops_get(inode->i_fop); | 677 | f->f_op = fops_get(inode->i_fop); |
678 | file_move(f, &inode->i_sb->s_files); | 678 | file_sb_list_add(f, inode->i_sb); |
679 | 679 | ||
680 | error = security_dentry_open(f, cred); | 680 | error = security_dentry_open(f, cred); |
681 | if (error) | 681 | if (error) |
@@ -721,7 +721,7 @@ cleanup_all: | |||
721 | mnt_drop_write(mnt); | 721 | mnt_drop_write(mnt); |
722 | } | 722 | } |
723 | } | 723 | } |
724 | file_kill(f); | 724 | file_sb_list_del(f); |
725 | f->f_path.dentry = NULL; | 725 | f->f_path.dentry = NULL; |
726 | f->f_path.mnt = NULL; | 726 | f->f_path.mnt = NULL; |
727 | cleanup_file: | 727 | cleanup_file: |
diff --git a/fs/pnode.c b/fs/pnode.c index 5cc564a83149..8066b8dd748f 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -126,6 +126,9 @@ static int do_make_slave(struct vfsmount *mnt) | |||
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* | ||
130 | * vfsmount lock must be held for write | ||
131 | */ | ||
129 | void change_mnt_propagation(struct vfsmount *mnt, int type) | 132 | void change_mnt_propagation(struct vfsmount *mnt, int type) |
130 | { | 133 | { |
131 | if (type == MS_SHARED) { | 134 | if (type == MS_SHARED) { |
@@ -270,12 +273,12 @@ int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry, | |||
270 | prev_src_mnt = child; | 273 | prev_src_mnt = child; |
271 | } | 274 | } |
272 | out: | 275 | out: |
273 | spin_lock(&vfsmount_lock); | 276 | br_write_lock(vfsmount_lock); |
274 | while (!list_empty(&tmp_list)) { | 277 | while (!list_empty(&tmp_list)) { |
275 | child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash); | 278 | child = list_first_entry(&tmp_list, struct vfsmount, mnt_hash); |
276 | umount_tree(child, 0, &umount_list); | 279 | umount_tree(child, 0, &umount_list); |
277 | } | 280 | } |
278 | spin_unlock(&vfsmount_lock); | 281 | br_write_unlock(vfsmount_lock); |
279 | release_mounts(&umount_list); | 282 | release_mounts(&umount_list); |
280 | return ret; | 283 | return ret; |
281 | } | 284 | } |
@@ -296,6 +299,8 @@ static inline int do_refcount_check(struct vfsmount *mnt, int count) | |||
296 | * other mounts its parent propagates to. | 299 | * other mounts its parent propagates to. |
297 | * Check if any of these mounts that **do not have submounts** | 300 | * Check if any of these mounts that **do not have submounts** |
298 | * have more references than 'refcnt'. If so return busy. | 301 | * have more references than 'refcnt'. If so return busy. |
302 | * | ||
303 | * vfsmount lock must be held for read or write | ||
299 | */ | 304 | */ |
300 | int propagate_mount_busy(struct vfsmount *mnt, int refcnt) | 305 | int propagate_mount_busy(struct vfsmount *mnt, int refcnt) |
301 | { | 306 | { |
@@ -353,6 +358,8 @@ static void __propagate_umount(struct vfsmount *mnt) | |||
353 | * collect all mounts that receive propagation from the mount in @list, | 358 | * collect all mounts that receive propagation from the mount in @list, |
354 | * and return these additional mounts in the same list. | 359 | * and return these additional mounts in the same list. |
355 | * @list: the list of mounts to be unmounted. | 360 | * @list: the list of mounts to be unmounted. |
361 | * | ||
362 | * vfsmount lock must be held for write | ||
356 | */ | 363 | */ |
357 | int propagate_umount(struct list_head *list) | 364 | int propagate_umount(struct list_head *list) |
358 | { | 365 | { |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index ae35413dcbe1..caa758377d66 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -83,6 +83,7 @@ void reiserfs_evict_inode(struct inode *inode) | |||
83 | dquot_drop(inode); | 83 | dquot_drop(inode); |
84 | inode->i_blocks = 0; | 84 | inode->i_blocks = 0; |
85 | reiserfs_write_unlock_once(inode->i_sb, depth); | 85 | reiserfs_write_unlock_once(inode->i_sb, depth); |
86 | return; | ||
86 | 87 | ||
87 | no_delete: | 88 | no_delete: |
88 | end_writeback(inode); | 89 | end_writeback(inode); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 1ec952b1f036..812e2c05aa29 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -2311,7 +2311,7 @@ static int journal_read_transaction(struct super_block *sb, | |||
2311 | /* flush out the real blocks */ | 2311 | /* flush out the real blocks */ |
2312 | for (i = 0; i < get_desc_trans_len(desc); i++) { | 2312 | for (i = 0; i < get_desc_trans_len(desc); i++) { |
2313 | set_buffer_dirty(real_blocks[i]); | 2313 | set_buffer_dirty(real_blocks[i]); |
2314 | ll_rw_block(SWRITE, 1, real_blocks + i); | 2314 | write_dirty_buffer(real_blocks[i], WRITE); |
2315 | } | 2315 | } |
2316 | for (i = 0; i < get_desc_trans_len(desc); i++) { | 2316 | for (i = 0; i < get_desc_trans_len(desc); i++) { |
2317 | wait_on_buffer(real_blocks[i]); | 2317 | wait_on_buffer(real_blocks[i]); |
diff --git a/fs/super.c b/fs/super.c index 9674ab2c8718..8819e3a7ff20 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -54,7 +54,22 @@ static struct super_block *alloc_super(struct file_system_type *type) | |||
54 | s = NULL; | 54 | s = NULL; |
55 | goto out; | 55 | goto out; |
56 | } | 56 | } |
57 | #ifdef CONFIG_SMP | ||
58 | s->s_files = alloc_percpu(struct list_head); | ||
59 | if (!s->s_files) { | ||
60 | security_sb_free(s); | ||
61 | kfree(s); | ||
62 | s = NULL; | ||
63 | goto out; | ||
64 | } else { | ||
65 | int i; | ||
66 | |||
67 | for_each_possible_cpu(i) | ||
68 | INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i)); | ||
69 | } | ||
70 | #else | ||
57 | INIT_LIST_HEAD(&s->s_files); | 71 | INIT_LIST_HEAD(&s->s_files); |
72 | #endif | ||
58 | INIT_LIST_HEAD(&s->s_instances); | 73 | INIT_LIST_HEAD(&s->s_instances); |
59 | INIT_HLIST_HEAD(&s->s_anon); | 74 | INIT_HLIST_HEAD(&s->s_anon); |
60 | INIT_LIST_HEAD(&s->s_inodes); | 75 | INIT_LIST_HEAD(&s->s_inodes); |
@@ -108,6 +123,9 @@ out: | |||
108 | */ | 123 | */ |
109 | static inline void destroy_super(struct super_block *s) | 124 | static inline void destroy_super(struct super_block *s) |
110 | { | 125 | { |
126 | #ifdef CONFIG_SMP | ||
127 | free_percpu(s->s_files); | ||
128 | #endif | ||
111 | security_sb_free(s); | 129 | security_sb_free(s); |
112 | kfree(s->s_subtype); | 130 | kfree(s->s_subtype); |
113 | kfree(s->s_options); | 131 | kfree(s->s_options); |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 048484fb10d2..46f7a807bbc1 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -114,10 +114,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) | |||
114 | 114 | ||
115 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 115 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
116 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 116 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
117 | if (sb->s_flags & MS_SYNCHRONOUS) { | 117 | if (sb->s_flags & MS_SYNCHRONOUS) |
118 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 118 | ubh_sync_block(UCPI_UBH(ucpi)); |
119 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
120 | } | ||
121 | sb->s_dirt = 1; | 119 | sb->s_dirt = 1; |
122 | 120 | ||
123 | unlock_super (sb); | 121 | unlock_super (sb); |
@@ -207,10 +205,8 @@ do_more: | |||
207 | 205 | ||
208 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 206 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
209 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 207 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
210 | if (sb->s_flags & MS_SYNCHRONOUS) { | 208 | if (sb->s_flags & MS_SYNCHRONOUS) |
211 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 209 | ubh_sync_block(UCPI_UBH(ucpi)); |
212 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
213 | } | ||
214 | 210 | ||
215 | if (overflow) { | 211 | if (overflow) { |
216 | fragment += count; | 212 | fragment += count; |
@@ -558,10 +554,8 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, | |||
558 | 554 | ||
559 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 555 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
560 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 556 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
561 | if (sb->s_flags & MS_SYNCHRONOUS) { | 557 | if (sb->s_flags & MS_SYNCHRONOUS) |
562 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 558 | ubh_sync_block(UCPI_UBH(ucpi)); |
563 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
564 | } | ||
565 | sb->s_dirt = 1; | 559 | sb->s_dirt = 1; |
566 | 560 | ||
567 | UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); | 561 | UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); |
@@ -680,10 +674,8 @@ cg_found: | |||
680 | succed: | 674 | succed: |
681 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 675 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
682 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 676 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
683 | if (sb->s_flags & MS_SYNCHRONOUS) { | 677 | if (sb->s_flags & MS_SYNCHRONOUS) |
684 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 678 | ubh_sync_block(UCPI_UBH(ucpi)); |
685 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
686 | } | ||
687 | sb->s_dirt = 1; | 679 | sb->s_dirt = 1; |
688 | 680 | ||
689 | result += cgno * uspi->s_fpg; | 681 | result += cgno * uspi->s_fpg; |
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 428017e018fe..2eabf04af3de 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
@@ -113,10 +113,8 @@ void ufs_free_inode (struct inode * inode) | |||
113 | 113 | ||
114 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 114 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
115 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 115 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
116 | if (sb->s_flags & MS_SYNCHRONOUS) { | 116 | if (sb->s_flags & MS_SYNCHRONOUS) |
117 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 117 | ubh_sync_block(UCPI_UBH(ucpi)); |
118 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
119 | } | ||
120 | 118 | ||
121 | sb->s_dirt = 1; | 119 | sb->s_dirt = 1; |
122 | unlock_super (sb); | 120 | unlock_super (sb); |
@@ -156,10 +154,8 @@ static void ufs2_init_inodes_chunk(struct super_block *sb, | |||
156 | 154 | ||
157 | fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb); | 155 | fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb); |
158 | ubh_mark_buffer_dirty(UCPI_UBH(ucpi)); | 156 | ubh_mark_buffer_dirty(UCPI_UBH(ucpi)); |
159 | if (sb->s_flags & MS_SYNCHRONOUS) { | 157 | if (sb->s_flags & MS_SYNCHRONOUS) |
160 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 158 | ubh_sync_block(UCPI_UBH(ucpi)); |
161 | ubh_wait_on_buffer(UCPI_UBH(ucpi)); | ||
162 | } | ||
163 | 159 | ||
164 | UFSD("EXIT\n"); | 160 | UFSD("EXIT\n"); |
165 | } | 161 | } |
@@ -290,10 +286,8 @@ cg_found: | |||
290 | } | 286 | } |
291 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); | 287 | ubh_mark_buffer_dirty (USPI_UBH(uspi)); |
292 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); | 288 | ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); |
293 | if (sb->s_flags & MS_SYNCHRONOUS) { | 289 | if (sb->s_flags & MS_SYNCHRONOUS) |
294 | ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); | 290 | ubh_sync_block(UCPI_UBH(ucpi)); |
295 | ubh_wait_on_buffer (UCPI_UBH(ucpi)); | ||
296 | } | ||
297 | sb->s_dirt = 1; | 291 | sb->s_dirt = 1; |
298 | 292 | ||
299 | inode->i_ino = cg * uspi->s_ipg + bit; | 293 | inode->i_ino = cg * uspi->s_ipg + bit; |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 34d5cb135320..a58f9155fc9a 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -243,10 +243,8 @@ static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) | |||
243 | ubh_bforget(ind_ubh); | 243 | ubh_bforget(ind_ubh); |
244 | ind_ubh = NULL; | 244 | ind_ubh = NULL; |
245 | } | 245 | } |
246 | if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { | 246 | if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) |
247 | ubh_ll_rw_block(SWRITE, ind_ubh); | 247 | ubh_sync_block(ind_ubh); |
248 | ubh_wait_on_buffer (ind_ubh); | ||
249 | } | ||
250 | ubh_brelse (ind_ubh); | 248 | ubh_brelse (ind_ubh); |
251 | 249 | ||
252 | UFSD("EXIT: ino %lu\n", inode->i_ino); | 250 | UFSD("EXIT: ino %lu\n", inode->i_ino); |
@@ -307,10 +305,8 @@ static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) | |||
307 | ubh_bforget(dind_bh); | 305 | ubh_bforget(dind_bh); |
308 | dind_bh = NULL; | 306 | dind_bh = NULL; |
309 | } | 307 | } |
310 | if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { | 308 | if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) |
311 | ubh_ll_rw_block(SWRITE, dind_bh); | 309 | ubh_sync_block(dind_bh); |
312 | ubh_wait_on_buffer (dind_bh); | ||
313 | } | ||
314 | ubh_brelse (dind_bh); | 310 | ubh_brelse (dind_bh); |
315 | 311 | ||
316 | UFSD("EXIT: ino %lu\n", inode->i_ino); | 312 | UFSD("EXIT: ino %lu\n", inode->i_ino); |
@@ -367,10 +363,8 @@ static int ufs_trunc_tindirect(struct inode *inode) | |||
367 | ubh_bforget(tind_bh); | 363 | ubh_bforget(tind_bh); |
368 | tind_bh = NULL; | 364 | tind_bh = NULL; |
369 | } | 365 | } |
370 | if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { | 366 | if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) |
371 | ubh_ll_rw_block(SWRITE, tind_bh); | 367 | ubh_sync_block(tind_bh); |
372 | ubh_wait_on_buffer (tind_bh); | ||
373 | } | ||
374 | ubh_brelse (tind_bh); | 368 | ubh_brelse (tind_bh); |
375 | 369 | ||
376 | UFSD("EXIT: ino %lu\n", inode->i_ino); | 370 | UFSD("EXIT: ino %lu\n", inode->i_ino); |
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 85a7fc9e4a4e..d2c36d53fe66 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
@@ -113,21 +113,17 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag) | |||
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh) | 116 | void ubh_sync_block(struct ufs_buffer_head *ubh) |
117 | { | 117 | { |
118 | if (!ubh) | 118 | if (ubh) { |
119 | return; | 119 | unsigned i; |
120 | 120 | ||
121 | ll_rw_block(rw, ubh->count, ubh->bh); | 121 | for (i = 0; i < ubh->count; i++) |
122 | } | 122 | write_dirty_buffer(ubh->bh[i], WRITE); |
123 | 123 | ||
124 | void ubh_wait_on_buffer (struct ufs_buffer_head * ubh) | 124 | for (i = 0; i < ubh->count; i++) |
125 | { | 125 | wait_on_buffer(ubh->bh[i]); |
126 | unsigned i; | 126 | } |
127 | if (!ubh) | ||
128 | return; | ||
129 | for ( i = 0; i < ubh->count; i++ ) | ||
130 | wait_on_buffer (ubh->bh[i]); | ||
131 | } | 127 | } |
132 | 128 | ||
133 | void ubh_bforget (struct ufs_buffer_head * ubh) | 129 | void ubh_bforget (struct ufs_buffer_head * ubh) |
diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 0466036912f1..9f8775ce381c 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h | |||
@@ -269,8 +269,7 @@ extern void ubh_brelse (struct ufs_buffer_head *); | |||
269 | extern void ubh_brelse_uspi (struct ufs_sb_private_info *); | 269 | extern void ubh_brelse_uspi (struct ufs_sb_private_info *); |
270 | extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); | 270 | extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); |
271 | extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); | 271 | extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); |
272 | extern void ubh_ll_rw_block(int, struct ufs_buffer_head *); | 272 | extern void ubh_sync_block(struct ufs_buffer_head *); |
273 | extern void ubh_wait_on_buffer (struct ufs_buffer_head *); | ||
274 | extern void ubh_bforget (struct ufs_buffer_head *); | 273 | extern void ubh_bforget (struct ufs_buffer_head *); |
275 | extern int ubh_buffer_dirty (struct ufs_buffer_head *); | 274 | extern int ubh_buffer_dirty (struct ufs_buffer_head *); |
276 | #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) | 275 | #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) |
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index df84e3b04555..d89dec864d42 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h | |||
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs); | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifndef sys_execve | 25 | #ifndef sys_execve |
26 | asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, | 26 | asmlinkage long sys_execve(const char __user *filename, |
27 | char __user * __user *envp, struct pt_regs *regs); | 27 | const char __user *const __user *argv, |
28 | const char __user *const __user *envp, | ||
29 | struct pt_regs *regs); | ||
28 | #endif | 30 | #endif |
29 | 31 | ||
30 | #ifndef sys_mmap2 | 32 | #ifndef sys_mmap2 |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 2a512bc0d4ab..7809d230adee 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -305,14 +305,16 @@ struct drm_ioctl_desc { | |||
305 | unsigned int cmd; | 305 | unsigned int cmd; |
306 | int flags; | 306 | int flags; |
307 | drm_ioctl_t *func; | 307 | drm_ioctl_t *func; |
308 | unsigned int cmd_drv; | ||
308 | }; | 309 | }; |
309 | 310 | ||
310 | /** | 311 | /** |
311 | * Creates a driver or general drm_ioctl_desc array entry for the given | 312 | * Creates a driver or general drm_ioctl_desc array entry for the given |
312 | * ioctl, for use by drm_ioctl(). | 313 | * ioctl, for use by drm_ioctl(). |
313 | */ | 314 | */ |
314 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | 315 | |
315 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags} | 316 | #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
317 | [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} | ||
316 | 318 | ||
317 | struct drm_magic_entry { | 319 | struct drm_magic_entry { |
318 | struct list_head head; | 320 | struct list_head head; |
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h index 4b00d2dd4f68..61315c29b8f3 100644 --- a/include/drm/i830_drm.h +++ b/include/drm/i830_drm.h | |||
@@ -264,20 +264,20 @@ typedef struct _drm_i830_sarea { | |||
264 | #define DRM_I830_GETPARAM 0x0c | 264 | #define DRM_I830_GETPARAM 0x0c |
265 | #define DRM_I830_SETPARAM 0x0d | 265 | #define DRM_I830_SETPARAM 0x0d |
266 | 266 | ||
267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t) | 267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t) |
268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t) | 268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t) |
269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t) | 269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t) |
270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH) | 270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH) |
271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE) | 271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE) |
272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t) | 272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t) |
273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP) | 273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP) |
274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t) | 274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t) |
275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY) | 275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY) |
276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP) | 276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP) |
277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t) | 277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t) |
278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t) | 278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t) |
279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t) | 279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t) |
280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t) | 280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t) |
281 | 281 | ||
282 | typedef struct _drm_i830_clear { | 282 | typedef struct _drm_i830_clear { |
283 | int clear_color; | 283 | int clear_color; |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8f8b072c4c7b..e41c74facb6a 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -215,6 +215,7 @@ typedef struct _drm_i915_sarea { | |||
215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | 217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
218 | #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) | ||
218 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | 219 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
219 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | 220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | 221 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h index 3ffbc4798afa..c16097f99be0 100644 --- a/include/drm/mga_drm.h +++ b/include/drm/mga_drm.h | |||
@@ -248,7 +248,7 @@ typedef struct _drm_mga_sarea { | |||
248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c | 248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c |
249 | 249 | ||
250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) | 250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) |
251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) | 251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock) |
252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) | 252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) |
253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) | 253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) |
254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) | 254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index fe917dee723a..01a714119506 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
@@ -197,4 +197,17 @@ struct drm_nouveau_sarea { | |||
197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 | 197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 |
198 | #define DRM_NOUVEAU_GEM_INFO 0x44 | 198 | #define DRM_NOUVEAU_GEM_INFO 0x44 |
199 | 199 | ||
200 | #define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) | ||
201 | #define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) | ||
202 | #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) | ||
203 | #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) | ||
204 | #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) | ||
205 | #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) | ||
206 | #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) | ||
207 | #define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new) | ||
208 | #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf) | ||
209 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep) | ||
210 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini) | ||
211 | #define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info) | ||
212 | |||
200 | #endif /* __NOUVEAU_DRM_H__ */ | 213 | #endif /* __NOUVEAU_DRM_H__ */ |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 0acaf8f91437..10f8b53bdd40 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -547,8 +547,8 @@ typedef struct { | |||
547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) | 547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) | 548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) | 549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
550 | #define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) | 550 | #define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
551 | #define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | 551 | #define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) | 552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
553 | 553 | ||
554 | typedef struct drm_radeon_init { | 554 | typedef struct drm_radeon_init { |
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h index 8a576ef01821..4863cf6bf96f 100644 --- a/include/drm/savage_drm.h +++ b/include/drm/savage_drm.h | |||
@@ -63,10 +63,10 @@ typedef struct _drm_savage_sarea { | |||
63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 | 63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 |
64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 | 64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 |
65 | 65 | ||
66 | #define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) | 66 | #define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) |
67 | #define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) | 67 | #define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) |
68 | #define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) | 68 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) |
69 | #define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) | 69 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) |
70 | 70 | ||
71 | #define SAVAGE_DMA_PCI 1 | 71 | #define SAVAGE_DMA_PCI 1 |
72 | #define SAVAGE_DMA_AGP 3 | 72 | #define SAVAGE_DMA_AGP 3 |
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index ca16c3801a1e..be33b3affc8a 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h | |||
@@ -150,6 +150,7 @@ struct clcd_fb { | |||
150 | u16 off_cntl; | 150 | u16 off_cntl; |
151 | u32 clcd_cntl; | 151 | u32 clcd_cntl; |
152 | u32 cmap[16]; | 152 | u32 cmap[16]; |
153 | bool clk_enabled; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) | 156 | static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index c809e286d213..a065612fc928 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -50,8 +50,8 @@ struct linux_binprm{ | |||
50 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ | 50 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ |
51 | unsigned int per_clear; /* bits to clear in current->personality */ | 51 | unsigned int per_clear; /* bits to clear in current->personality */ |
52 | int argc, envc; | 52 | int argc, envc; |
53 | char * filename; /* Name of binary as seen by procps */ | 53 | const char * filename; /* Name of binary as seen by procps */ |
54 | char * interp; /* Name of the binary really executed. Most | 54 | const char * interp; /* Name of the binary really executed. Most |
55 | of the time same as filename, but could be | 55 | of the time same as filename, but could be |
56 | different for binfmt_{misc,script} */ | 56 | different for binfmt_{misc,script} */ |
57 | unsigned interp_flags; | 57 | unsigned interp_flags; |
@@ -126,7 +126,8 @@ extern int setup_arg_pages(struct linux_binprm * bprm, | |||
126 | unsigned long stack_top, | 126 | unsigned long stack_top, |
127 | int executable_stack); | 127 | int executable_stack); |
128 | extern int bprm_mm_init(struct linux_binprm *bprm); | 128 | extern int bprm_mm_init(struct linux_binprm *bprm); |
129 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); | 129 | extern int copy_strings_kernel(int argc, const char *const *argv, |
130 | struct linux_binprm *bprm); | ||
130 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | 131 | extern int prepare_bprm_creds(struct linux_binprm *bprm); |
131 | extern void install_exec_creds(struct linux_binprm *bprm); | 132 | extern void install_exec_creds(struct linux_binprm *bprm); |
132 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); | 133 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 43e649a72529..ec94c12f21da 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -32,7 +32,6 @@ enum bh_state_bits { | |||
32 | BH_Delay, /* Buffer is not yet allocated on disk */ | 32 | BH_Delay, /* Buffer is not yet allocated on disk */ |
33 | BH_Boundary, /* Block is followed by a discontiguity */ | 33 | BH_Boundary, /* Block is followed by a discontiguity */ |
34 | BH_Write_EIO, /* I/O error on write */ | 34 | BH_Write_EIO, /* I/O error on write */ |
35 | BH_Ordered, /* ordered write */ | ||
36 | BH_Eopnotsupp, /* operation not supported (barrier) */ | 35 | BH_Eopnotsupp, /* operation not supported (barrier) */ |
37 | BH_Unwritten, /* Buffer is allocated on disk but not written */ | 36 | BH_Unwritten, /* Buffer is allocated on disk but not written */ |
38 | BH_Quiet, /* Buffer Error Prinks to be quiet */ | 37 | BH_Quiet, /* Buffer Error Prinks to be quiet */ |
@@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write) | |||
125 | BUFFER_FNS(Delay, delay) | 124 | BUFFER_FNS(Delay, delay) |
126 | BUFFER_FNS(Boundary, boundary) | 125 | BUFFER_FNS(Boundary, boundary) |
127 | BUFFER_FNS(Write_EIO, write_io_error) | 126 | BUFFER_FNS(Write_EIO, write_io_error) |
128 | BUFFER_FNS(Ordered, ordered) | ||
129 | BUFFER_FNS(Eopnotsupp, eopnotsupp) | 127 | BUFFER_FNS(Eopnotsupp, eopnotsupp) |
130 | BUFFER_FNS(Unwritten, unwritten) | 128 | BUFFER_FNS(Unwritten, unwritten) |
131 | 129 | ||
@@ -183,6 +181,8 @@ void unlock_buffer(struct buffer_head *bh); | |||
183 | void __lock_buffer(struct buffer_head *bh); | 181 | void __lock_buffer(struct buffer_head *bh); |
184 | void ll_rw_block(int, int, struct buffer_head * bh[]); | 182 | void ll_rw_block(int, int, struct buffer_head * bh[]); |
185 | int sync_dirty_buffer(struct buffer_head *bh); | 183 | int sync_dirty_buffer(struct buffer_head *bh); |
184 | int __sync_dirty_buffer(struct buffer_head *bh, int rw); | ||
185 | void write_dirty_buffer(struct buffer_head *bh, int rw); | ||
186 | int submit_bh(int, struct buffer_head *); | 186 | int submit_bh(int, struct buffer_head *); |
187 | void write_boundary_block(struct block_device *bdev, | 187 | void write_boundary_block(struct block_device *bdev, |
188 | sector_t bblock, unsigned blocksize); | 188 | sector_t bblock, unsigned blocksize); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9a96b4d83fc1..76041b614758 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -125,9 +125,6 @@ struct inodes_stat_t { | |||
125 | * block layer could (in theory) choose to ignore this | 125 | * block layer could (in theory) choose to ignore this |
126 | * request if it runs into resource problems. | 126 | * request if it runs into resource problems. |
127 | * WRITE A normal async write. Device will be plugged. | 127 | * WRITE A normal async write. Device will be plugged. |
128 | * SWRITE Like WRITE, but a special case for ll_rw_block() that | ||
129 | * tells it to lock the buffer first. Normally a buffer | ||
130 | * must be locked before doing IO. | ||
131 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | 128 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down |
132 | * the hint that someone will be waiting on this IO | 129 | * the hint that someone will be waiting on this IO |
133 | * shortly. The device must still be unplugged explicitly, | 130 | * shortly. The device must still be unplugged explicitly, |
@@ -138,9 +135,6 @@ struct inodes_stat_t { | |||
138 | * immediately after submission. The write equivalent | 135 | * immediately after submission. The write equivalent |
139 | * of READ_SYNC. | 136 | * of READ_SYNC. |
140 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. | 137 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. |
141 | * SWRITE_SYNC | ||
142 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. | ||
143 | * See SWRITE. | ||
144 | * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all | 138 | * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all |
145 | * previously submitted writes must be safely on storage | 139 | * previously submitted writes must be safely on storage |
146 | * before this one is started. Also guarantees that when | 140 | * before this one is started. Also guarantees that when |
@@ -155,7 +149,6 @@ struct inodes_stat_t { | |||
155 | #define READ 0 | 149 | #define READ 0 |
156 | #define WRITE RW_MASK | 150 | #define WRITE RW_MASK |
157 | #define READA RWA_MASK | 151 | #define READA RWA_MASK |
158 | #define SWRITE (WRITE | READA) | ||
159 | 152 | ||
160 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) | 153 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) |
161 | #define READ_META (READ | REQ_META) | 154 | #define READ_META (READ | REQ_META) |
@@ -165,8 +158,6 @@ struct inodes_stat_t { | |||
165 | #define WRITE_META (WRITE | REQ_META) | 158 | #define WRITE_META (WRITE | REQ_META) |
166 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | 159 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
167 | REQ_HARDBARRIER) | 160 | REQ_HARDBARRIER) |
168 | #define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) | ||
169 | #define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | ||
170 | 161 | ||
171 | /* | 162 | /* |
172 | * These aren't really reads or writes, they pass down information about | 163 | * These aren't really reads or writes, they pass down information about |
@@ -929,6 +920,9 @@ struct file { | |||
929 | #define f_vfsmnt f_path.mnt | 920 | #define f_vfsmnt f_path.mnt |
930 | const struct file_operations *f_op; | 921 | const struct file_operations *f_op; |
931 | spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ | 922 | spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ |
923 | #ifdef CONFIG_SMP | ||
924 | int f_sb_list_cpu; | ||
925 | #endif | ||
932 | atomic_long_t f_count; | 926 | atomic_long_t f_count; |
933 | unsigned int f_flags; | 927 | unsigned int f_flags; |
934 | fmode_t f_mode; | 928 | fmode_t f_mode; |
@@ -953,9 +947,6 @@ struct file { | |||
953 | unsigned long f_mnt_write_state; | 947 | unsigned long f_mnt_write_state; |
954 | #endif | 948 | #endif |
955 | }; | 949 | }; |
956 | extern spinlock_t files_lock; | ||
957 | #define file_list_lock() spin_lock(&files_lock); | ||
958 | #define file_list_unlock() spin_unlock(&files_lock); | ||
959 | 950 | ||
960 | #define get_file(x) atomic_long_inc(&(x)->f_count) | 951 | #define get_file(x) atomic_long_inc(&(x)->f_count) |
961 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) | 952 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) |
@@ -1346,7 +1337,11 @@ struct super_block { | |||
1346 | 1337 | ||
1347 | struct list_head s_inodes; /* all inodes */ | 1338 | struct list_head s_inodes; /* all inodes */ |
1348 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ | 1339 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ |
1340 | #ifdef CONFIG_SMP | ||
1341 | struct list_head __percpu *s_files; | ||
1342 | #else | ||
1349 | struct list_head s_files; | 1343 | struct list_head s_files; |
1344 | #endif | ||
1350 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ | 1345 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ |
1351 | struct list_head s_dentry_lru; /* unused dentry lru */ | 1346 | struct list_head s_dentry_lru; /* unused dentry lru */ |
1352 | int s_nr_dentry_unused; /* # of dentry on lru */ | 1347 | int s_nr_dentry_unused; /* # of dentry on lru */ |
@@ -2197,8 +2192,6 @@ static inline void insert_inode_hash(struct inode *inode) { | |||
2197 | __insert_inode_hash(inode, inode->i_ino); | 2192 | __insert_inode_hash(inode, inode->i_ino); |
2198 | } | 2193 | } |
2199 | 2194 | ||
2200 | extern void file_move(struct file *f, struct list_head *list); | ||
2201 | extern void file_kill(struct file *f); | ||
2202 | #ifdef CONFIG_BLOCK | 2195 | #ifdef CONFIG_BLOCK |
2203 | extern void submit_bio(int, struct bio *); | 2196 | extern void submit_bio(int, struct bio *); |
2204 | extern int bdev_read_only(struct block_device *); | 2197 | extern int bdev_read_only(struct block_device *); |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index eca3d5202138..a42b5bf02f8b 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | struct fs_struct { | 6 | struct fs_struct { |
7 | int users; | 7 | int users; |
8 | rwlock_t lock; | 8 | spinlock_t lock; |
9 | int umask; | 9 | int umask; |
10 | int in_exec; | 10 | int in_exec; |
11 | struct path root, pwd; | 11 | struct path root, pwd; |
@@ -23,29 +23,29 @@ extern int unshare_fs_struct(void); | |||
23 | 23 | ||
24 | static inline void get_fs_root(struct fs_struct *fs, struct path *root) | 24 | static inline void get_fs_root(struct fs_struct *fs, struct path *root) |
25 | { | 25 | { |
26 | read_lock(&fs->lock); | 26 | spin_lock(&fs->lock); |
27 | *root = fs->root; | 27 | *root = fs->root; |
28 | path_get(root); | 28 | path_get(root); |
29 | read_unlock(&fs->lock); | 29 | spin_unlock(&fs->lock); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) | 32 | static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) |
33 | { | 33 | { |
34 | read_lock(&fs->lock); | 34 | spin_lock(&fs->lock); |
35 | *pwd = fs->pwd; | 35 | *pwd = fs->pwd; |
36 | path_get(pwd); | 36 | path_get(pwd); |
37 | read_unlock(&fs->lock); | 37 | spin_unlock(&fs->lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, | 40 | static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, |
41 | struct path *pwd) | 41 | struct path *pwd) |
42 | { | 42 | { |
43 | read_lock(&fs->lock); | 43 | spin_lock(&fs->lock); |
44 | *root = fs->root; | 44 | *root = fs->root; |
45 | path_get(root); | 45 | path_get(root); |
46 | *pwd = fs->pwd; | 46 | *pwd = fs->pwd; |
47 | path_get(pwd); | 47 | path_get(pwd); |
48 | read_unlock(&fs->lock); | 48 | spin_unlock(&fs->lock); |
49 | } | 49 | } |
50 | 50 | ||
51 | #endif /* _LINUX_FS_STRUCT_H */ | 51 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 311f8753d713..4aa95f203f3e 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -836,6 +836,8 @@ extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); | |||
836 | 836 | ||
837 | extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); | 837 | extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); |
838 | 838 | ||
839 | extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); | ||
840 | |||
839 | extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, | 841 | extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, |
840 | void *buf, unsigned int len, size_t recsize); | 842 | void *buf, unsigned int len, size_t recsize); |
841 | 843 | ||
diff --git a/include/linux/lglock.h b/include/linux/lglock.h new file mode 100644 index 000000000000..b288cb713b90 --- /dev/null +++ b/include/linux/lglock.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Specialised local-global spinlock. Can only be declared as global variables | ||
3 | * to avoid overhead and keep things simple (and we don't want to start using | ||
4 | * these inside dynamically allocated structures). | ||
5 | * | ||
6 | * "local/global locks" (lglocks) can be used to: | ||
7 | * | ||
8 | * - Provide fast exclusive access to per-CPU data, with exclusive access to | ||
9 | * another CPU's data allowed but possibly subject to contention, and to | ||
10 | * provide very slow exclusive access to all per-CPU data. | ||
11 | * - Or to provide very fast and scalable read serialisation, and to provide | ||
12 | * very slow exclusive serialisation of data (not necessarily per-CPU data). | ||
13 | * | ||
14 | * Brlocks are also implemented as a short-hand notation for the latter use | ||
15 | * case. | ||
16 | * | ||
17 | * Copyright 2009, 2010, Nick Piggin, Novell Inc. | ||
18 | */ | ||
19 | #ifndef __LINUX_LGLOCK_H | ||
20 | #define __LINUX_LGLOCK_H | ||
21 | |||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/lockdep.h> | ||
24 | #include <linux/percpu.h> | ||
25 | |||
26 | /* can make br locks by using local lock for read side, global lock for write */ | ||
27 | #define br_lock_init(name) name##_lock_init() | ||
28 | #define br_read_lock(name) name##_local_lock() | ||
29 | #define br_read_unlock(name) name##_local_unlock() | ||
30 | #define br_write_lock(name) name##_global_lock_online() | ||
31 | #define br_write_unlock(name) name##_global_unlock_online() | ||
32 | |||
33 | #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) | ||
34 | #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) | ||
35 | |||
36 | |||
37 | #define lg_lock_init(name) name##_lock_init() | ||
38 | #define lg_local_lock(name) name##_local_lock() | ||
39 | #define lg_local_unlock(name) name##_local_unlock() | ||
40 | #define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu) | ||
41 | #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) | ||
42 | #define lg_global_lock(name) name##_global_lock() | ||
43 | #define lg_global_unlock(name) name##_global_unlock() | ||
44 | #define lg_global_lock_online(name) name##_global_lock_online() | ||
45 | #define lg_global_unlock_online(name) name##_global_unlock_online() | ||
46 | |||
47 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
48 | #define LOCKDEP_INIT_MAP lockdep_init_map | ||
49 | |||
50 | #define DEFINE_LGLOCK_LOCKDEP(name) \ | ||
51 | struct lock_class_key name##_lock_key; \ | ||
52 | struct lockdep_map name##_lock_dep_map; \ | ||
53 | EXPORT_SYMBOL(name##_lock_dep_map) | ||
54 | |||
55 | #else | ||
56 | #define LOCKDEP_INIT_MAP(a, b, c, d) | ||
57 | |||
58 | #define DEFINE_LGLOCK_LOCKDEP(name) | ||
59 | #endif | ||
60 | |||
61 | |||
62 | #define DECLARE_LGLOCK(name) \ | ||
63 | extern void name##_lock_init(void); \ | ||
64 | extern void name##_local_lock(void); \ | ||
65 | extern void name##_local_unlock(void); \ | ||
66 | extern void name##_local_lock_cpu(int cpu); \ | ||
67 | extern void name##_local_unlock_cpu(int cpu); \ | ||
68 | extern void name##_global_lock(void); \ | ||
69 | extern void name##_global_unlock(void); \ | ||
70 | extern void name##_global_lock_online(void); \ | ||
71 | extern void name##_global_unlock_online(void); \ | ||
72 | |||
73 | #define DEFINE_LGLOCK(name) \ | ||
74 | \ | ||
75 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | ||
76 | DEFINE_LGLOCK_LOCKDEP(name); \ | ||
77 | \ | ||
78 | void name##_lock_init(void) { \ | ||
79 | int i; \ | ||
80 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | ||
81 | for_each_possible_cpu(i) { \ | ||
82 | arch_spinlock_t *lock; \ | ||
83 | lock = &per_cpu(name##_lock, i); \ | ||
84 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | ||
85 | } \ | ||
86 | } \ | ||
87 | EXPORT_SYMBOL(name##_lock_init); \ | ||
88 | \ | ||
89 | void name##_local_lock(void) { \ | ||
90 | arch_spinlock_t *lock; \ | ||
91 | preempt_disable(); \ | ||
92 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
93 | lock = &__get_cpu_var(name##_lock); \ | ||
94 | arch_spin_lock(lock); \ | ||
95 | } \ | ||
96 | EXPORT_SYMBOL(name##_local_lock); \ | ||
97 | \ | ||
98 | void name##_local_unlock(void) { \ | ||
99 | arch_spinlock_t *lock; \ | ||
100 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
101 | lock = &__get_cpu_var(name##_lock); \ | ||
102 | arch_spin_unlock(lock); \ | ||
103 | preempt_enable(); \ | ||
104 | } \ | ||
105 | EXPORT_SYMBOL(name##_local_unlock); \ | ||
106 | \ | ||
107 | void name##_local_lock_cpu(int cpu) { \ | ||
108 | arch_spinlock_t *lock; \ | ||
109 | preempt_disable(); \ | ||
110 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
111 | lock = &per_cpu(name##_lock, cpu); \ | ||
112 | arch_spin_lock(lock); \ | ||
113 | } \ | ||
114 | EXPORT_SYMBOL(name##_local_lock_cpu); \ | ||
115 | \ | ||
116 | void name##_local_unlock_cpu(int cpu) { \ | ||
117 | arch_spinlock_t *lock; \ | ||
118 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
119 | lock = &per_cpu(name##_lock, cpu); \ | ||
120 | arch_spin_unlock(lock); \ | ||
121 | preempt_enable(); \ | ||
122 | } \ | ||
123 | EXPORT_SYMBOL(name##_local_unlock_cpu); \ | ||
124 | \ | ||
125 | void name##_global_lock_online(void) { \ | ||
126 | int i; \ | ||
127 | preempt_disable(); \ | ||
128 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
129 | for_each_online_cpu(i) { \ | ||
130 | arch_spinlock_t *lock; \ | ||
131 | lock = &per_cpu(name##_lock, i); \ | ||
132 | arch_spin_lock(lock); \ | ||
133 | } \ | ||
134 | } \ | ||
135 | EXPORT_SYMBOL(name##_global_lock_online); \ | ||
136 | \ | ||
137 | void name##_global_unlock_online(void) { \ | ||
138 | int i; \ | ||
139 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
140 | for_each_online_cpu(i) { \ | ||
141 | arch_spinlock_t *lock; \ | ||
142 | lock = &per_cpu(name##_lock, i); \ | ||
143 | arch_spin_unlock(lock); \ | ||
144 | } \ | ||
145 | preempt_enable(); \ | ||
146 | } \ | ||
147 | EXPORT_SYMBOL(name##_global_unlock_online); \ | ||
148 | \ | ||
149 | void name##_global_lock(void) { \ | ||
150 | int i; \ | ||
151 | preempt_disable(); \ | ||
152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
153 | for_each_online_cpu(i) { \ | ||
154 | arch_spinlock_t *lock; \ | ||
155 | lock = &per_cpu(name##_lock, i); \ | ||
156 | arch_spin_lock(lock); \ | ||
157 | } \ | ||
158 | } \ | ||
159 | EXPORT_SYMBOL(name##_global_lock); \ | ||
160 | \ | ||
161 | void name##_global_unlock(void) { \ | ||
162 | int i; \ | ||
163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
164 | for_each_online_cpu(i) { \ | ||
165 | arch_spinlock_t *lock; \ | ||
166 | lock = &per_cpu(name##_lock, i); \ | ||
167 | arch_spin_unlock(lock); \ | ||
168 | } \ | ||
169 | preempt_enable(); \ | ||
170 | } \ | ||
171 | EXPORT_SYMBOL(name##_global_unlock); | ||
172 | #endif | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b8bb9a6a1f37..ee7e258627f9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -134,7 +134,7 @@ struct vm_area_struct { | |||
134 | within vm_mm. */ | 134 | within vm_mm. */ |
135 | 135 | ||
136 | /* linked list of VM areas per task, sorted by address */ | 136 | /* linked list of VM areas per task, sorted by address */ |
137 | struct vm_area_struct *vm_next; | 137 | struct vm_area_struct *vm_next, *vm_prev; |
138 | 138 | ||
139 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ | 139 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ |
140 | unsigned long vm_flags; /* Flags, see mm.h. */ | 140 | unsigned long vm_flags; /* Flags, see mm.h. */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index ce160d68f5e7..1e2a6db2d7dd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2109,7 +2109,9 @@ extern void daemonize(const char *, ...); | |||
2109 | extern int allow_signal(int); | 2109 | extern int allow_signal(int); |
2110 | extern int disallow_signal(int); | 2110 | extern int disallow_signal(int); |
2111 | 2111 | ||
2112 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 2112 | extern int do_execve(const char *, |
2113 | const char __user * const __user *, | ||
2114 | const char __user * const __user *, struct pt_regs *); | ||
2113 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 2115 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
2114 | struct task_struct *fork_idle(int); | 2116 | struct task_struct *fork_idle(int); |
2115 | 2117 | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6d14409c4d9a..9f63538928c0 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -68,7 +68,7 @@ struct kmem_cache_order_objects { | |||
68 | * Slab cache management. | 68 | * Slab cache management. |
69 | */ | 69 | */ |
70 | struct kmem_cache { | 70 | struct kmem_cache { |
71 | struct kmem_cache_cpu *cpu_slab; | 71 | struct kmem_cache_cpu __percpu *cpu_slab; |
72 | /* Used for retriving partial slabs etc */ | 72 | /* Used for retriving partial slabs etc */ |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | int size; /* The size of an object including meta data */ | 74 | int size; /* The size of an object including meta data */ |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ae0a5286f558..92e52a1e6af3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
213 | * @dma_alignment: SPI controller constraint on DMA buffers alignment. | 213 | * @dma_alignment: SPI controller constraint on DMA buffers alignment. |
214 | * @mode_bits: flags understood by this controller driver | 214 | * @mode_bits: flags understood by this controller driver |
215 | * @flags: other constraints relevant to this driver | 215 | * @flags: other constraints relevant to this driver |
216 | * @bus_lock_spinlock: spinlock for SPI bus locking | ||
217 | * @bus_lock_mutex: mutex for SPI bus locking | ||
218 | * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use | ||
216 | * @setup: updates the device mode and clocking records used by a | 219 | * @setup: updates the device mode and clocking records used by a |
217 | * device's SPI controller; protocol code may call this. This | 220 | * device's SPI controller; protocol code may call this. This |
218 | * must fail if an unrecognized or unsupported mode is requested. | 221 | * must fail if an unrecognized or unsupported mode is requested. |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 6e5d19788634..e6319d18a55d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -820,7 +820,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, | |||
820 | u64 mask, int fd, | 820 | u64 mask, int fd, |
821 | const char __user *pathname); | 821 | const char __user *pathname); |
822 | 822 | ||
823 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 823 | int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); |
824 | 824 | ||
825 | 825 | ||
826 | asmlinkage long sys_perf_event_open( | 826 | asmlinkage long sys_perf_event_open( |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 1437da3ddc62..67d64e6efe7a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -329,6 +329,13 @@ struct tty_struct { | |||
329 | struct tty_port *port; | 329 | struct tty_port *port; |
330 | }; | 330 | }; |
331 | 331 | ||
332 | /* Each of a tty's open files has private_data pointing to tty_file_private */ | ||
333 | struct tty_file_private { | ||
334 | struct tty_struct *tty; | ||
335 | struct file *file; | ||
336 | struct list_head list; | ||
337 | }; | ||
338 | |||
332 | /* tty magic number */ | 339 | /* tty magic number */ |
333 | #define TTY_MAGIC 0x5401 | 340 | #define TTY_MAGIC 0x5401 |
334 | 341 | ||
@@ -458,6 +465,7 @@ extern void proc_clear_tty(struct task_struct *p); | |||
458 | extern struct tty_struct *get_current_tty(void); | 465 | extern struct tty_struct *get_current_tty(void); |
459 | extern void tty_default_fops(struct file_operations *fops); | 466 | extern void tty_default_fops(struct file_operations *fops); |
460 | extern struct tty_struct *alloc_tty_struct(void); | 467 | extern struct tty_struct *alloc_tty_struct(void); |
468 | extern void tty_add_file(struct tty_struct *tty, struct file *file); | ||
461 | extern void free_tty_struct(struct tty_struct *tty); | 469 | extern void free_tty_struct(struct tty_struct *tty); |
462 | extern void initialize_tty_struct(struct tty_struct *tty, | 470 | extern void initialize_tty_struct(struct tty_struct *tty, |
463 | struct tty_driver *driver, int idx); | 471 | struct tty_driver *driver, int idx); |
@@ -470,6 +478,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | |||
470 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | 478 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); |
471 | 479 | ||
472 | extern struct mutex tty_mutex; | 480 | extern struct mutex tty_mutex; |
481 | extern spinlock_t tty_files_lock; | ||
473 | 482 | ||
474 | extern void tty_write_unlock(struct tty_struct *tty); | 483 | extern void tty_write_unlock(struct tty_struct *tty); |
475 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); | 484 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); |
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 6a664c3f7c1e..7dc97d12253c 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h | |||
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 { | |||
1707 | unsigned int card_type; /* EMU10K1_CARD_* */ | 1707 | unsigned int card_type; /* EMU10K1_CARD_* */ |
1708 | unsigned int ecard_ctrl; /* ecard control bits */ | 1708 | unsigned int ecard_ctrl; /* ecard control bits */ |
1709 | unsigned long dma_mask; /* PCI DMA mask */ | 1709 | unsigned long dma_mask; /* PCI DMA mask */ |
1710 | unsigned int delay_pcm_irq; /* in samples */ | ||
1710 | int max_cache_pages; /* max memory size / PAGE_SIZE */ | 1711 | int max_cache_pages; /* max memory size / PAGE_SIZE */ |
1711 | struct snd_dma_buffer silent_page; /* silent page */ | 1712 | struct snd_dma_buffer silent_page; /* silent page */ |
1712 | struct snd_dma_buffer ptb_pages; /* page table pages */ | 1713 | struct snd_dma_buffer ptb_pages; /* page table pages */ |
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h new file mode 100644 index 000000000000..49682d7e9d60 --- /dev/null +++ b/include/trace/events/workqueue.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM workqueue | ||
3 | |||
4 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_WORKQUEUE_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | #include <linux/workqueue.h> | ||
9 | |||
10 | /** | ||
11 | * workqueue_execute_start - called immediately before the workqueue callback | ||
12 | * @work: pointer to struct work_struct | ||
13 | * | ||
14 | * Allows to track workqueue execution. | ||
15 | */ | ||
16 | TRACE_EVENT(workqueue_execute_start, | ||
17 | |||
18 | TP_PROTO(struct work_struct *work), | ||
19 | |||
20 | TP_ARGS(work), | ||
21 | |||
22 | TP_STRUCT__entry( | ||
23 | __field( void *, work ) | ||
24 | __field( void *, function) | ||
25 | ), | ||
26 | |||
27 | TP_fast_assign( | ||
28 | __entry->work = work; | ||
29 | __entry->function = work->func; | ||
30 | ), | ||
31 | |||
32 | TP_printk("work struct %p: function %pf", __entry->work, __entry->function) | ||
33 | ); | ||
34 | |||
35 | /** | ||
36 | * workqueue_execute_end - called immediately before the workqueue callback | ||
37 | * @work: pointer to struct work_struct | ||
38 | * | ||
39 | * Allows to track workqueue execution. | ||
40 | */ | ||
41 | TRACE_EVENT(workqueue_execute_end, | ||
42 | |||
43 | TP_PROTO(struct work_struct *work), | ||
44 | |||
45 | TP_ARGS(work), | ||
46 | |||
47 | TP_STRUCT__entry( | ||
48 | __field( void *, work ) | ||
49 | ), | ||
50 | |||
51 | TP_fast_assign( | ||
52 | __entry->work = work; | ||
53 | ), | ||
54 | |||
55 | TP_printk("work struct %p", __entry->work) | ||
56 | ); | ||
57 | |||
58 | |||
59 | #endif /* _TRACE_WORKQUEUE_H */ | ||
60 | |||
61 | /* This part must be outside protection */ | ||
62 | #include <trace/define_trace.h> | ||
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 2b108538d0d9..3098a38f3ae1 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c | |||
@@ -24,10 +24,11 @@ static int __init no_initrd(char *str) | |||
24 | 24 | ||
25 | __setup("noinitrd", no_initrd); | 25 | __setup("noinitrd", no_initrd); |
26 | 26 | ||
27 | static int __init do_linuxrc(void * shell) | 27 | static int __init do_linuxrc(void *_shell) |
28 | { | 28 | { |
29 | static char *argv[] = { "linuxrc", NULL, }; | 29 | static const char *argv[] = { "linuxrc", NULL, }; |
30 | extern char * envp_init[]; | 30 | extern const char *envp_init[]; |
31 | const char *shell = _shell; | ||
31 | 32 | ||
32 | sys_close(old_fd);sys_close(root_fd); | 33 | sys_close(old_fd);sys_close(root_fd); |
33 | sys_setsid(); | 34 | sys_setsid(); |
diff --git a/init/main.c b/init/main.c index 22d61cb06f98..94ab488039aa 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -197,8 +197,8 @@ static int __init set_reset_devices(char *str) | |||
197 | 197 | ||
198 | __setup("reset_devices", set_reset_devices); | 198 | __setup("reset_devices", set_reset_devices); |
199 | 199 | ||
200 | static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; | 200 | static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; |
201 | char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; | 201 | const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; |
202 | static const char *panic_later, *panic_param; | 202 | static const char *panic_later, *panic_param; |
203 | 203 | ||
204 | extern const struct obs_kernel_param __setup_start[], __setup_end[]; | 204 | extern const struct obs_kernel_param __setup_start[], __setup_end[]; |
@@ -809,7 +809,7 @@ static void __init do_pre_smp_initcalls(void) | |||
809 | do_one_initcall(*fn); | 809 | do_one_initcall(*fn); |
810 | } | 810 | } |
811 | 811 | ||
812 | static void run_init_process(char *init_filename) | 812 | static void run_init_process(const char *init_filename) |
813 | { | 813 | { |
814 | argv_init[0] = init_filename; | 814 | argv_init[0] = init_filename; |
815 | kernel_execve(init_filename, argv_init, envp_init); | 815 | kernel_execve(init_filename, argv_init, envp_init); |
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index c438f545a321..be775f7e81e0 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h | |||
@@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p); | |||
255 | extern void kdb_print_nameval(const char *name, unsigned long val); | 255 | extern void kdb_print_nameval(const char *name, unsigned long val); |
256 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); | 256 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); |
257 | extern void kdb_meminfo_proc_show(void); | 257 | extern void kdb_meminfo_proc_show(void); |
258 | #ifdef CONFIG_KALLSYMS | ||
258 | extern const char *kdb_walk_kallsyms(loff_t *pos); | 259 | extern const char *kdb_walk_kallsyms(loff_t *pos); |
260 | #else /* ! CONFIG_KALLSYMS */ | ||
261 | static inline const char *kdb_walk_kallsyms(loff_t *pos) | ||
262 | { | ||
263 | return NULL; | ||
264 | } | ||
265 | #endif /* ! CONFIG_KALLSYMS */ | ||
259 | extern char *kdb_getstr(char *, size_t, char *); | 266 | extern char *kdb_getstr(char *, size_t, char *); |
260 | 267 | ||
261 | /* Defines for kdb_symbol_print */ | 268 | /* Defines for kdb_symbol_print */ |
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 45344d5c53dd..6b2485dcb050 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c | |||
@@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */ | |||
82 | int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) | 82 | int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) |
83 | { | 83 | { |
84 | int ret = 0; | 84 | int ret = 0; |
85 | unsigned long symbolsize; | 85 | unsigned long symbolsize = 0; |
86 | unsigned long offset; | 86 | unsigned long offset = 0; |
87 | #define knt1_size 128 /* must be >= kallsyms table size */ | 87 | #define knt1_size 128 /* must be >= kallsyms table size */ |
88 | char *knt1 = NULL; | 88 | char *knt1 = NULL; |
89 | 89 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 671ed56e0a49..03120229db28 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
1386 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1386 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1387 | *p_code = 0; | 1387 | *p_code = 0; |
1388 | 1388 | ||
1389 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1389 | uid = task_uid(p); |
1390 | uid = __task_cred(p)->uid; | ||
1391 | unlock_sig: | 1390 | unlock_sig: |
1392 | spin_unlock_irq(&p->sighand->siglock); | 1391 | spin_unlock_irq(&p->sighand->siglock); |
1393 | if (!exit_code) | 1392 | if (!exit_code) |
@@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1460 | } | 1459 | } |
1461 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1460 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1462 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1461 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1463 | uid = __task_cred(p)->uid; | 1462 | uid = task_uid(p); |
1464 | spin_unlock_irq(&p->sighand->siglock); | 1463 | spin_unlock_irq(&p->sighand->siglock); |
1465 | 1464 | ||
1466 | pid = task_pid_vnr(p); | 1465 | pid = task_pid_vnr(p); |
diff --git a/kernel/fork.c b/kernel/fork.c index 98b450876f93..b7e9d60a675d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -300,7 +300,7 @@ out: | |||
300 | #ifdef CONFIG_MMU | 300 | #ifdef CONFIG_MMU |
301 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | 301 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
302 | { | 302 | { |
303 | struct vm_area_struct *mpnt, *tmp, **pprev; | 303 | struct vm_area_struct *mpnt, *tmp, *prev, **pprev; |
304 | struct rb_node **rb_link, *rb_parent; | 304 | struct rb_node **rb_link, *rb_parent; |
305 | int retval; | 305 | int retval; |
306 | unsigned long charge; | 306 | unsigned long charge; |
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
328 | if (retval) | 328 | if (retval) |
329 | goto out; | 329 | goto out; |
330 | 330 | ||
331 | prev = NULL; | ||
331 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { | 332 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
332 | struct file *file; | 333 | struct file *file; |
333 | 334 | ||
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
359 | goto fail_nomem_anon_vma_fork; | 360 | goto fail_nomem_anon_vma_fork; |
360 | tmp->vm_flags &= ~VM_LOCKED; | 361 | tmp->vm_flags &= ~VM_LOCKED; |
361 | tmp->vm_mm = mm; | 362 | tmp->vm_mm = mm; |
362 | tmp->vm_next = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
363 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
364 | if (file) { | 365 | if (file) { |
365 | struct inode *inode = file->f_path.dentry->d_inode; | 366 | struct inode *inode = file->f_path.dentry->d_inode; |
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
392 | */ | 393 | */ |
393 | *pprev = tmp; | 394 | *pprev = tmp; |
394 | pprev = &tmp->vm_next; | 395 | pprev = &tmp->vm_next; |
396 | tmp->vm_prev = prev; | ||
397 | prev = tmp; | ||
395 | 398 | ||
396 | __vma_link_rb(mm, tmp, rb_link, rb_parent); | 399 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
397 | rb_link = &tmp->vm_rb.rb_right; | 400 | rb_link = &tmp->vm_rb.rb_right; |
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | |||
752 | struct fs_struct *fs = current->fs; | 755 | struct fs_struct *fs = current->fs; |
753 | if (clone_flags & CLONE_FS) { | 756 | if (clone_flags & CLONE_FS) { |
754 | /* tsk->fs is already what we want */ | 757 | /* tsk->fs is already what we want */ |
755 | write_lock(&fs->lock); | 758 | spin_lock(&fs->lock); |
756 | if (fs->in_exec) { | 759 | if (fs->in_exec) { |
757 | write_unlock(&fs->lock); | 760 | spin_unlock(&fs->lock); |
758 | return -EAGAIN; | 761 | return -EAGAIN; |
759 | } | 762 | } |
760 | fs->users++; | 763 | fs->users++; |
761 | write_unlock(&fs->lock); | 764 | spin_unlock(&fs->lock); |
762 | return 0; | 765 | return 0; |
763 | } | 766 | } |
764 | tsk->fs = copy_fs_struct(fs); | 767 | tsk->fs = copy_fs_struct(fs); |
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1676 | 1679 | ||
1677 | if (new_fs) { | 1680 | if (new_fs) { |
1678 | fs = current->fs; | 1681 | fs = current->fs; |
1679 | write_lock(&fs->lock); | 1682 | spin_lock(&fs->lock); |
1680 | current->fs = new_fs; | 1683 | current->fs = new_fs; |
1681 | if (--fs->users) | 1684 | if (--fs->users) |
1682 | new_fs = NULL; | 1685 | new_fs = NULL; |
1683 | else | 1686 | else |
1684 | new_fs = fs; | 1687 | new_fs = fs; |
1685 | write_unlock(&fs->lock); | 1688 | spin_unlock(&fs->lock); |
1686 | } | 1689 | } |
1687 | 1690 | ||
1688 | if (new_mm) { | 1691 | if (new_mm) { |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 4502604ecadf..6b5580c57644 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, | |||
503 | } | 503 | } |
504 | EXPORT_SYMBOL(__kfifo_out_r); | 504 | EXPORT_SYMBOL(__kfifo_out_r); |
505 | 505 | ||
506 | void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) | ||
507 | { | ||
508 | unsigned int n; | ||
509 | |||
510 | n = __kfifo_peek_n(fifo, recsize); | ||
511 | fifo->out += n + recsize; | ||
512 | } | ||
513 | EXPORT_SYMBOL(__kfifo_skip_r); | ||
514 | |||
506 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, | 515 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, |
507 | unsigned long len, unsigned int *copied, size_t recsize) | 516 | unsigned long len, unsigned int *copied, size_t recsize) |
508 | { | 517 | { |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 6e9b19667a8d..9cd0591c96a2 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data) | |||
153 | goto fail; | 153 | goto fail; |
154 | } | 154 | } |
155 | 155 | ||
156 | retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); | 156 | retval = kernel_execve(sub_info->path, |
157 | (const char *const *)sub_info->argv, | ||
158 | (const char *const *)sub_info->envp); | ||
157 | 159 | ||
158 | /* Exec failed? */ | 160 | /* Exec failed? */ |
159 | fail: | 161 | fail: |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3632ce87674f..19cccc3c3028 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, | |||
3846 | rpos = reader->read; | 3846 | rpos = reader->read; |
3847 | pos += size; | 3847 | pos += size; |
3848 | 3848 | ||
3849 | if (rpos >= commit) | ||
3850 | break; | ||
3851 | |||
3849 | event = rb_reader_event(cpu_buffer); | 3852 | event = rb_reader_event(cpu_buffer); |
3850 | size = rb_event_length(event); | 3853 | size = rb_event_length(event); |
3851 | } while (len > size); | 3854 | } while (len > size); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ba14a22be4cc..9ec59f541156 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3463 | size_t cnt, loff_t *fpos) | 3463 | size_t cnt, loff_t *fpos) |
3464 | { | 3464 | { |
3465 | char *buf; | 3465 | char *buf; |
3466 | size_t written; | ||
3466 | 3467 | ||
3467 | if (tracing_disabled) | 3468 | if (tracing_disabled) |
3468 | return -EINVAL; | 3469 | return -EINVAL; |
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3484 | } else | 3485 | } else |
3485 | buf[cnt] = '\0'; | 3486 | buf[cnt] = '\0'; |
3486 | 3487 | ||
3487 | cnt = mark_printk("%s", buf); | 3488 | written = mark_printk("%s", buf); |
3488 | kfree(buf); | 3489 | kfree(buf); |
3489 | *fpos += cnt; | 3490 | *fpos += written; |
3490 | 3491 | ||
3491 | return cnt; | 3492 | /* don't tell userspace we wrote more - it might confuse them */ |
3493 | if (written > cnt) | ||
3494 | written = cnt; | ||
3495 | |||
3496 | return written; | ||
3492 | } | 3497 | } |
3493 | 3498 | ||
3494 | static int tracing_clock_show(struct seq_file *m, void *v) | 3499 | static int tracing_clock_show(struct seq_file *m, void *v) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 09b4fa6e4d3b..4c758f146328 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -598,88 +598,165 @@ out: | |||
598 | return ret; | 598 | return ret; |
599 | } | 599 | } |
600 | 600 | ||
601 | static void print_event_fields(struct trace_seq *s, struct list_head *head) | 601 | enum { |
602 | FORMAT_HEADER = 1, | ||
603 | FORMAT_PRINTFMT = 2, | ||
604 | }; | ||
605 | |||
606 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | ||
602 | { | 607 | { |
608 | struct ftrace_event_call *call = m->private; | ||
603 | struct ftrace_event_field *field; | 609 | struct ftrace_event_field *field; |
610 | struct list_head *head; | ||
604 | 611 | ||
605 | list_for_each_entry_reverse(field, head, link) { | 612 | (*pos)++; |
606 | /* | ||
607 | * Smartly shows the array type(except dynamic array). | ||
608 | * Normal: | ||
609 | * field:TYPE VAR | ||
610 | * If TYPE := TYPE[LEN], it is shown: | ||
611 | * field:TYPE VAR[LEN] | ||
612 | */ | ||
613 | const char *array_descriptor = strchr(field->type, '['); | ||
614 | 613 | ||
615 | if (!strncmp(field->type, "__data_loc", 10)) | 614 | switch ((unsigned long)v) { |
616 | array_descriptor = NULL; | 615 | case FORMAT_HEADER: |
616 | head = &ftrace_common_fields; | ||
617 | 617 | ||
618 | if (!array_descriptor) { | 618 | if (unlikely(list_empty(head))) |
619 | trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" | 619 | return NULL; |
620 | "\tsize:%u;\tsigned:%d;\n", | 620 | |
621 | field->type, field->name, field->offset, | 621 | field = list_entry(head->prev, struct ftrace_event_field, link); |
622 | field->size, !!field->is_signed); | 622 | return field; |
623 | } else { | 623 | |
624 | trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" | 624 | case FORMAT_PRINTFMT: |
625 | "\tsize:%u;\tsigned:%d;\n", | 625 | /* all done */ |
626 | (int)(array_descriptor - field->type), | 626 | return NULL; |
627 | field->type, field->name, | 627 | } |
628 | array_descriptor, field->offset, | 628 | |
629 | field->size, !!field->is_signed); | 629 | head = trace_get_fields(call); |
630 | } | 630 | |
631 | /* | ||
632 | * To separate common fields from event fields, the | ||
633 | * LSB is set on the first event field. Clear it in case. | ||
634 | */ | ||
635 | v = (void *)((unsigned long)v & ~1L); | ||
636 | |||
637 | field = v; | ||
638 | /* | ||
639 | * If this is a common field, and at the end of the list, then | ||
640 | * continue with main list. | ||
641 | */ | ||
642 | if (field->link.prev == &ftrace_common_fields) { | ||
643 | if (unlikely(list_empty(head))) | ||
644 | return NULL; | ||
645 | field = list_entry(head->prev, struct ftrace_event_field, link); | ||
646 | /* Set the LSB to notify f_show to print an extra newline */ | ||
647 | field = (struct ftrace_event_field *) | ||
648 | ((unsigned long)field | 1); | ||
649 | return field; | ||
631 | } | 650 | } |
651 | |||
652 | /* If we are done tell f_show to print the format */ | ||
653 | if (field->link.prev == head) | ||
654 | return (void *)FORMAT_PRINTFMT; | ||
655 | |||
656 | field = list_entry(field->link.prev, struct ftrace_event_field, link); | ||
657 | |||
658 | return field; | ||
632 | } | 659 | } |
633 | 660 | ||
634 | static ssize_t | 661 | static void *f_start(struct seq_file *m, loff_t *pos) |
635 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
636 | loff_t *ppos) | ||
637 | { | 662 | { |
638 | struct ftrace_event_call *call = filp->private_data; | 663 | loff_t l = 0; |
639 | struct list_head *head; | 664 | void *p; |
640 | struct trace_seq *s; | ||
641 | char *buf; | ||
642 | int r; | ||
643 | 665 | ||
644 | if (*ppos) | 666 | /* Start by showing the header */ |
667 | if (!*pos) | ||
668 | return (void *)FORMAT_HEADER; | ||
669 | |||
670 | p = (void *)FORMAT_HEADER; | ||
671 | do { | ||
672 | p = f_next(m, p, &l); | ||
673 | } while (p && l < *pos); | ||
674 | |||
675 | return p; | ||
676 | } | ||
677 | |||
678 | static int f_show(struct seq_file *m, void *v) | ||
679 | { | ||
680 | struct ftrace_event_call *call = m->private; | ||
681 | struct ftrace_event_field *field; | ||
682 | const char *array_descriptor; | ||
683 | |||
684 | switch ((unsigned long)v) { | ||
685 | case FORMAT_HEADER: | ||
686 | seq_printf(m, "name: %s\n", call->name); | ||
687 | seq_printf(m, "ID: %d\n", call->event.type); | ||
688 | seq_printf(m, "format:\n"); | ||
645 | return 0; | 689 | return 0; |
646 | 690 | ||
647 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 691 | case FORMAT_PRINTFMT: |
648 | if (!s) | 692 | seq_printf(m, "\nprint fmt: %s\n", |
649 | return -ENOMEM; | 693 | call->print_fmt); |
694 | return 0; | ||
695 | } | ||
650 | 696 | ||
651 | trace_seq_init(s); | 697 | /* |
698 | * To separate common fields from event fields, the | ||
699 | * LSB is set on the first event field. Clear it and | ||
700 | * print a newline if it is set. | ||
701 | */ | ||
702 | if ((unsigned long)v & 1) { | ||
703 | seq_putc(m, '\n'); | ||
704 | v = (void *)((unsigned long)v & ~1L); | ||
705 | } | ||
652 | 706 | ||
653 | trace_seq_printf(s, "name: %s\n", call->name); | 707 | field = v; |
654 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
655 | trace_seq_printf(s, "format:\n"); | ||
656 | 708 | ||
657 | /* print common fields */ | 709 | /* |
658 | print_event_fields(s, &ftrace_common_fields); | 710 | * Smartly shows the array type(except dynamic array). |
711 | * Normal: | ||
712 | * field:TYPE VAR | ||
713 | * If TYPE := TYPE[LEN], it is shown: | ||
714 | * field:TYPE VAR[LEN] | ||
715 | */ | ||
716 | array_descriptor = strchr(field->type, '['); | ||
659 | 717 | ||
660 | trace_seq_putc(s, '\n'); | 718 | if (!strncmp(field->type, "__data_loc", 10)) |
719 | array_descriptor = NULL; | ||
661 | 720 | ||
662 | /* print event specific fields */ | 721 | if (!array_descriptor) |
663 | head = trace_get_fields(call); | 722 | seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", |
664 | print_event_fields(s, head); | 723 | field->type, field->name, field->offset, |
724 | field->size, !!field->is_signed); | ||
725 | else | ||
726 | seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", | ||
727 | (int)(array_descriptor - field->type), | ||
728 | field->type, field->name, | ||
729 | array_descriptor, field->offset, | ||
730 | field->size, !!field->is_signed); | ||
665 | 731 | ||
666 | r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); | 732 | return 0; |
733 | } | ||
667 | 734 | ||
668 | if (!r) { | 735 | static void f_stop(struct seq_file *m, void *p) |
669 | /* | 736 | { |
670 | * ug! The format output is bigger than a PAGE!! | 737 | } |
671 | */ | ||
672 | buf = "FORMAT TOO BIG\n"; | ||
673 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
674 | buf, strlen(buf)); | ||
675 | goto out; | ||
676 | } | ||
677 | 738 | ||
678 | r = simple_read_from_buffer(ubuf, cnt, ppos, | 739 | static const struct seq_operations trace_format_seq_ops = { |
679 | s->buffer, s->len); | 740 | .start = f_start, |
680 | out: | 741 | .next = f_next, |
681 | kfree(s); | 742 | .stop = f_stop, |
682 | return r; | 743 | .show = f_show, |
744 | }; | ||
745 | |||
746 | static int trace_format_open(struct inode *inode, struct file *file) | ||
747 | { | ||
748 | struct ftrace_event_call *call = inode->i_private; | ||
749 | struct seq_file *m; | ||
750 | int ret; | ||
751 | |||
752 | ret = seq_open(file, &trace_format_seq_ops); | ||
753 | if (ret < 0) | ||
754 | return ret; | ||
755 | |||
756 | m = file->private_data; | ||
757 | m->private = call; | ||
758 | |||
759 | return 0; | ||
683 | } | 760 | } |
684 | 761 | ||
685 | static ssize_t | 762 | static ssize_t |
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = { | |||
877 | }; | 954 | }; |
878 | 955 | ||
879 | static const struct file_operations ftrace_event_format_fops = { | 956 | static const struct file_operations ftrace_event_format_fops = { |
880 | .open = tracing_open_generic, | 957 | .open = trace_format_open, |
881 | .read = event_format_read, | 958 | .read = seq_read, |
959 | .llseek = seq_lseek, | ||
960 | .release = seq_release, | ||
882 | }; | 961 | }; |
883 | 962 | ||
884 | static const struct file_operations ftrace_event_id_fops = { | 963 | static const struct file_operations ftrace_event_id_fops = { |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6bff23625781..6f233698518e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
507 | * if the output fails. | 507 | * if the output fails. |
508 | */ | 508 | */ |
509 | data->ent = *curr; | 509 | data->ent = *curr; |
510 | data->ret = *next; | 510 | /* |
511 | * If the next event is not a return type, then | ||
512 | * we only care about what type it is. Otherwise we can | ||
513 | * safely copy the entire event. | ||
514 | */ | ||
515 | if (next->ent.type == TRACE_GRAPH_RET) | ||
516 | data->ret = *next; | ||
517 | else | ||
518 | data->ret.ent.type = next->ent.type; | ||
511 | } | 519 | } |
512 | } | 520 | } |
513 | 521 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2994a0e3a61c..8bd600c020e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -35,6 +35,9 @@ | |||
35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
37 | 37 | ||
38 | #define CREATE_TRACE_POINTS | ||
39 | #include <trace/events/workqueue.h> | ||
40 | |||
38 | #include "workqueue_sched.h" | 41 | #include "workqueue_sched.h" |
39 | 42 | ||
40 | enum { | 43 | enum { |
@@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
1790 | work_clear_pending(work); | 1793 | work_clear_pending(work); |
1791 | lock_map_acquire(&cwq->wq->lockdep_map); | 1794 | lock_map_acquire(&cwq->wq->lockdep_map); |
1792 | lock_map_acquire(&lockdep_map); | 1795 | lock_map_acquire(&lockdep_map); |
1796 | trace_workqueue_execute_start(work); | ||
1793 | f(work); | 1797 | f(work); |
1798 | /* | ||
1799 | * While we must be careful to not use "work" after this, the trace | ||
1800 | * point will only record its address. | ||
1801 | */ | ||
1802 | trace_workqueue_execute_end(work); | ||
1794 | lock_map_release(&lockdep_map); | 1803 | lock_map_release(&lockdep_map); |
1795 | lock_map_release(&cwq->wq->lockdep_map); | 1804 | lock_map_release(&cwq->wq->lockdep_map); |
1796 | 1805 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9e06b7f5ecf1..1b4afd2e6ca0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -994,13 +994,16 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
994 | 994 | ||
995 | config LATENCYTOP | 995 | config LATENCYTOP |
996 | bool "Latency measuring infrastructure" | 996 | bool "Latency measuring infrastructure" |
997 | depends on HAVE_LATENCYTOP_SUPPORT | ||
998 | depends on DEBUG_KERNEL | ||
999 | depends on STACKTRACE_SUPPORT | ||
1000 | depends on PROC_FS | ||
997 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE | 1001 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE |
998 | select KALLSYMS | 1002 | select KALLSYMS |
999 | select KALLSYMS_ALL | 1003 | select KALLSYMS_ALL |
1000 | select STACKTRACE | 1004 | select STACKTRACE |
1001 | select SCHEDSTATS | 1005 | select SCHEDSTATS |
1002 | select SCHED_DEBUG | 1006 | select SCHED_DEBUG |
1003 | depends on HAVE_LATENCYTOP_SUPPORT | ||
1004 | help | 1007 | help |
1005 | Enable this option if you want to use the LatencyTOP tool | 1008 | Enable this option if you want to use the LatencyTOP tool |
1006 | to find out which userspace is blocking on what kernel operations. | 1009 | to find out which userspace is blocking on what kernel operations. |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e907858498a6..efd16fa80b1c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -174,14 +174,16 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
174 | { | 174 | { |
175 | struct radix_tree_node *node = | 175 | struct radix_tree_node *node = |
176 | container_of(head, struct radix_tree_node, rcu_head); | 176 | container_of(head, struct radix_tree_node, rcu_head); |
177 | int i; | ||
177 | 178 | ||
178 | /* | 179 | /* |
179 | * must only free zeroed nodes into the slab. radix_tree_shrink | 180 | * must only free zeroed nodes into the slab. radix_tree_shrink |
180 | * can leave us with a non-NULL entry in the first slot, so clear | 181 | * can leave us with a non-NULL entry in the first slot, so clear |
181 | * that here to make sure. | 182 | * that here to make sure. |
182 | */ | 183 | */ |
183 | tag_clear(node, 0, 0); | 184 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
184 | tag_clear(node, 1, 0); | 185 | tag_clear(node, i, 0); |
186 | |||
185 | node->slots[0] = NULL; | 187 | node->slots[0] = NULL; |
186 | node->count = 0; | 188 | node->count = 0; |
187 | 189 | ||
@@ -623,17 +625,30 @@ EXPORT_SYMBOL(radix_tree_tag_get); | |||
623 | * also settag. The function stops either after tagging nr_to_tag items or | 625 | * also settag. The function stops either after tagging nr_to_tag items or |
624 | * after reaching last_index. | 626 | * after reaching last_index. |
625 | * | 627 | * |
628 | * The tags must be set from the leaf level only and propagated back up the | ||
629 | * path to the root. We must do this so that we resolve the full path before | ||
630 | * setting any tags on intermediate nodes. If we set tags as we descend, then | ||
631 | * we can get to the leaf node and find that the index that has the iftag | ||
632 | * set is outside the range we are scanning. This reults in dangling tags and | ||
633 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | ||
634 | * | ||
626 | * The function returns number of leaves where the tag was set and sets | 635 | * The function returns number of leaves where the tag was set and sets |
627 | * *first_indexp to the first unscanned index. | 636 | * *first_indexp to the first unscanned index. |
637 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | ||
638 | * be prepared to handle that. | ||
628 | */ | 639 | */ |
629 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | 640 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, |
630 | unsigned long *first_indexp, unsigned long last_index, | 641 | unsigned long *first_indexp, unsigned long last_index, |
631 | unsigned long nr_to_tag, | 642 | unsigned long nr_to_tag, |
632 | unsigned int iftag, unsigned int settag) | 643 | unsigned int iftag, unsigned int settag) |
633 | { | 644 | { |
634 | unsigned int height = root->height, shift; | 645 | unsigned int height = root->height; |
635 | unsigned long tagged = 0, index = *first_indexp; | 646 | struct radix_tree_path path[height]; |
636 | struct radix_tree_node *open_slots[height], *slot; | 647 | struct radix_tree_path *pathp = path; |
648 | struct radix_tree_node *slot; | ||
649 | unsigned int shift; | ||
650 | unsigned long tagged = 0; | ||
651 | unsigned long index = *first_indexp; | ||
637 | 652 | ||
638 | last_index = min(last_index, radix_tree_maxindex(height)); | 653 | last_index = min(last_index, radix_tree_maxindex(height)); |
639 | if (index > last_index) | 654 | if (index > last_index) |
@@ -653,6 +668,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
653 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
654 | slot = radix_tree_indirect_to_ptr(root->rnode); | 669 | slot = radix_tree_indirect_to_ptr(root->rnode); |
655 | 670 | ||
671 | /* | ||
672 | * we fill the path from (root->height - 2) to 0, leaving the index at | ||
673 | * (root->height - 1) as a terminator. Zero the node in the terminator | ||
674 | * so that we can use this to end walk loops back up the path. | ||
675 | */ | ||
676 | path[height - 1].node = NULL; | ||
677 | |||
656 | for (;;) { | 678 | for (;;) { |
657 | int offset; | 679 | int offset; |
658 | 680 | ||
@@ -661,21 +683,35 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
661 | goto next; | 683 | goto next; |
662 | if (!tag_get(slot, iftag, offset)) | 684 | if (!tag_get(slot, iftag, offset)) |
663 | goto next; | 685 | goto next; |
686 | if (height > 1) { | ||
687 | /* Go down one level */ | ||
688 | height--; | ||
689 | shift -= RADIX_TREE_MAP_SHIFT; | ||
690 | path[height - 1].node = slot; | ||
691 | path[height - 1].offset = offset; | ||
692 | slot = slot->slots[offset]; | ||
693 | continue; | ||
694 | } | ||
695 | |||
696 | /* tag the leaf */ | ||
697 | tagged++; | ||
664 | tag_set(slot, settag, offset); | 698 | tag_set(slot, settag, offset); |
665 | if (height == 1) { | 699 | |
666 | tagged++; | 700 | /* walk back up the path tagging interior nodes */ |
667 | goto next; | 701 | pathp = &path[0]; |
702 | while (pathp->node) { | ||
703 | /* stop if we find a node with the tag already set */ | ||
704 | if (tag_get(pathp->node, settag, pathp->offset)) | ||
705 | break; | ||
706 | tag_set(pathp->node, settag, pathp->offset); | ||
707 | pathp++; | ||
668 | } | 708 | } |
669 | /* Go down one level */ | 709 | |
670 | height--; | ||
671 | shift -= RADIX_TREE_MAP_SHIFT; | ||
672 | open_slots[height] = slot; | ||
673 | slot = slot->slots[offset]; | ||
674 | continue; | ||
675 | next: | 710 | next: |
676 | /* Go to next item at level determined by 'shift' */ | 711 | /* Go to next item at level determined by 'shift' */ |
677 | index = ((index >> shift) + 1) << shift; | 712 | index = ((index >> shift) + 1) << shift; |
678 | if (index > last_index) | 713 | /* Overflow can happen when last_index is ~0UL... */ |
714 | if (index > last_index || !index) | ||
679 | break; | 715 | break; |
680 | if (tagged >= nr_to_tag) | 716 | if (tagged >= nr_to_tag) |
681 | break; | 717 | break; |
@@ -685,7 +721,7 @@ next: | |||
685 | * last_index is guaranteed to be in the tree, what | 721 | * last_index is guaranteed to be in the tree, what |
686 | * we do below cannot wander astray. | 722 | * we do below cannot wander astray. |
687 | */ | 723 | */ |
688 | slot = open_slots[height]; | 724 | slot = path[height - 1].node; |
689 | height++; | 725 | height++; |
690 | shift += RADIX_TREE_MAP_SHIFT; | 726 | shift += RADIX_TREE_MAP_SHIFT; |
691 | } | 727 | } |
diff --git a/mm/memory.c b/mm/memory.c index b6e5fd23cc5a..2ed2267439df 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
2770 | { | 2770 | { |
2771 | address &= PAGE_MASK; | 2771 | address &= PAGE_MASK; |
2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { | 2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { |
2773 | address -= PAGE_SIZE; | 2773 | struct vm_area_struct *prev = vma->vm_prev; |
2774 | if (find_vma(vma->vm_mm, address) != vma) | 2774 | |
2775 | return -ENOMEM; | 2775 | /* |
2776 | * Is there a mapping abutting this one below? | ||
2777 | * | ||
2778 | * That's only ok if it's the same stack mapping | ||
2779 | * that has gotten split.. | ||
2780 | */ | ||
2781 | if (prev && prev->vm_end == address) | ||
2782 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; | ||
2776 | 2783 | ||
2777 | expand_stack(vma, address); | 2784 | expand_stack(vma, address - PAGE_SIZE); |
2778 | } | 2785 | } |
2779 | return 0; | 2786 | return 0; |
2780 | } | 2787 | } |
diff --git a/mm/mlock.c b/mm/mlock.c index 49e5e4cb8232..cbae7c5b9568 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page) | |||
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
138 | /* Is the vma a continuation of the stack vma above it? */ | ||
139 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
140 | { | ||
141 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
142 | } | ||
143 | |||
144 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
145 | { | ||
146 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
147 | (vma->vm_start == addr) && | ||
148 | !vma_stack_continue(vma->vm_prev, addr); | ||
149 | } | ||
150 | |||
138 | /** | 151 | /** |
139 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. | 152 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
140 | * @vma: target vma | 153 | * @vma: target vma |
@@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
168 | gup_flags |= FOLL_WRITE; | 181 | gup_flags |= FOLL_WRITE; |
169 | 182 | ||
170 | /* We don't try to access the guard page of a stack vma */ | 183 | /* We don't try to access the guard page of a stack vma */ |
171 | if (vma->vm_flags & VM_GROWSDOWN) { | 184 | if (stack_guard_page(vma, start)) { |
172 | if (start == vma->vm_start) { | 185 | addr += PAGE_SIZE; |
173 | start += PAGE_SIZE; | 186 | nr_pages--; |
174 | nr_pages--; | ||
175 | } | ||
176 | } | 187 | } |
177 | 188 | ||
178 | while (nr_pages > 0) { | 189 | while (nr_pages > 0) { |
@@ -388,17 +388,23 @@ static inline void | |||
388 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | 388 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
389 | struct vm_area_struct *prev, struct rb_node *rb_parent) | 389 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
390 | { | 390 | { |
391 | struct vm_area_struct *next; | ||
392 | |||
393 | vma->vm_prev = prev; | ||
391 | if (prev) { | 394 | if (prev) { |
392 | vma->vm_next = prev->vm_next; | 395 | next = prev->vm_next; |
393 | prev->vm_next = vma; | 396 | prev->vm_next = vma; |
394 | } else { | 397 | } else { |
395 | mm->mmap = vma; | 398 | mm->mmap = vma; |
396 | if (rb_parent) | 399 | if (rb_parent) |
397 | vma->vm_next = rb_entry(rb_parent, | 400 | next = rb_entry(rb_parent, |
398 | struct vm_area_struct, vm_rb); | 401 | struct vm_area_struct, vm_rb); |
399 | else | 402 | else |
400 | vma->vm_next = NULL; | 403 | next = NULL; |
401 | } | 404 | } |
405 | vma->vm_next = next; | ||
406 | if (next) | ||
407 | next->vm_prev = vma; | ||
402 | } | 408 | } |
403 | 409 | ||
404 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, | 410 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
@@ -483,7 +489,11 @@ static inline void | |||
483 | __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, | 489 | __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, |
484 | struct vm_area_struct *prev) | 490 | struct vm_area_struct *prev) |
485 | { | 491 | { |
486 | prev->vm_next = vma->vm_next; | 492 | struct vm_area_struct *next = vma->vm_next; |
493 | |||
494 | prev->vm_next = next; | ||
495 | if (next) | ||
496 | next->vm_prev = prev; | ||
487 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 497 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
488 | if (mm->mmap_cache == vma) | 498 | if (mm->mmap_cache == vma) |
489 | mm->mmap_cache = prev; | 499 | mm->mmap_cache = prev; |
@@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1915 | unsigned long addr; | 1925 | unsigned long addr; |
1916 | 1926 | ||
1917 | insertion_point = (prev ? &prev->vm_next : &mm->mmap); | 1927 | insertion_point = (prev ? &prev->vm_next : &mm->mmap); |
1928 | vma->vm_prev = NULL; | ||
1918 | do { | 1929 | do { |
1919 | rb_erase(&vma->vm_rb, &mm->mm_rb); | 1930 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
1920 | mm->map_count--; | 1931 | mm->map_count--; |
@@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1922 | vma = vma->vm_next; | 1933 | vma = vma->vm_next; |
1923 | } while (vma && vma->vm_start < end); | 1934 | } while (vma && vma->vm_start < end); |
1924 | *insertion_point = vma; | 1935 | *insertion_point = vma; |
1936 | if (vma) | ||
1937 | vma->vm_prev = prev; | ||
1925 | tail_vma->vm_next = NULL; | 1938 | tail_vma->vm_next = NULL; |
1926 | if (mm->unmap_area == arch_unmap_area) | 1939 | if (mm->unmap_area == arch_unmap_area) |
1927 | addr = prev ? prev->vm_end : mm->mmap_base; | 1940 | addr = prev ? prev->vm_end : mm->mmap_base; |
diff --git a/mm/nommu.c b/mm/nommu.c index efa9a380335e..88ff091eb07a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) | |||
604 | */ | 604 | */ |
605 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | 605 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
606 | { | 606 | { |
607 | struct vm_area_struct *pvma, **pp; | 607 | struct vm_area_struct *pvma, **pp, *next; |
608 | struct address_space *mapping; | 608 | struct address_space *mapping; |
609 | struct rb_node **p, *parent; | 609 | struct rb_node **p, *parent; |
610 | 610 | ||
@@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) | |||
664 | break; | 664 | break; |
665 | } | 665 | } |
666 | 666 | ||
667 | vma->vm_next = *pp; | 667 | next = *pp; |
668 | *pp = vma; | 668 | *pp = vma; |
669 | vma->vm_next = next; | ||
670 | if (next) | ||
671 | next->vm_prev = vma; | ||
669 | } | 672 | } |
670 | 673 | ||
671 | /* | 674 | /* |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5014e50644d1..fc81cb22869e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem) | |||
372 | } | 372 | } |
373 | 373 | ||
374 | pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", | 374 | pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", |
375 | task->pid, __task_cred(task)->uid, task->tgid, | 375 | task->pid, task_uid(task), task->tgid, |
376 | task->mm->total_vm, get_mm_rss(task->mm), | 376 | task->mm->total_vm, get_mm_rss(task->mm), |
377 | task_cpu(task), task->signal->oom_adj, | 377 | task_cpu(task), task->signal->oom_adj, |
378 | task->signal->oom_score_adj, task->comm); | 378 | task->signal->oom_score_adj, task->comm); |
@@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
401 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | 401 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) |
402 | { | 402 | { |
403 | p = find_lock_task_mm(p); | 403 | p = find_lock_task_mm(p); |
404 | if (!p) { | 404 | if (!p) |
405 | task_unlock(p); | ||
406 | return 1; | 405 | return 1; |
407 | } | 406 | |
408 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | 407 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
409 | task_pid_nr(p), p->comm, K(p->mm->total_vm), | 408 | task_pid_nr(p), p->comm, K(p->mm->total_vm), |
410 | K(get_mm_counter(p->mm, MM_ANONPAGES)), | 409 | K(get_mm_counter(p->mm, MM_ANONPAGES)), |
@@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
647 | unsigned long freed = 0; | 646 | unsigned long freed = 0; |
648 | unsigned int points; | 647 | unsigned int points; |
649 | enum oom_constraint constraint = CONSTRAINT_NONE; | 648 | enum oom_constraint constraint = CONSTRAINT_NONE; |
649 | int killed = 0; | ||
650 | 650 | ||
651 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | 651 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); |
652 | if (freed > 0) | 652 | if (freed > 0) |
@@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
684 | if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, | 684 | if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, |
685 | NULL, nodemask, | 685 | NULL, nodemask, |
686 | "Out of memory (oom_kill_allocating_task)")) | 686 | "Out of memory (oom_kill_allocating_task)")) |
687 | return; | 687 | goto out; |
688 | } | 688 | } |
689 | 689 | ||
690 | retry: | 690 | retry: |
@@ -692,7 +692,7 @@ retry: | |||
692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : | 692 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : |
693 | NULL); | 693 | NULL); |
694 | if (PTR_ERR(p) == -1UL) | 694 | if (PTR_ERR(p) == -1UL) |
695 | return; | 695 | goto out; |
696 | 696 | ||
697 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 697 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
698 | if (!p) { | 698 | if (!p) { |
@@ -704,13 +704,15 @@ retry: | |||
704 | if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, | 704 | if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, |
705 | nodemask, "Out of memory")) | 705 | nodemask, "Out of memory")) |
706 | goto retry; | 706 | goto retry; |
707 | killed = 1; | ||
708 | out: | ||
707 | read_unlock(&tasklist_lock); | 709 | read_unlock(&tasklist_lock); |
708 | 710 | ||
709 | /* | 711 | /* |
710 | * Give "p" a good chance of killing itself before we | 712 | * Give "p" a good chance of killing itself before we |
711 | * retry to allocate memory unless "p" is current | 713 | * retry to allocate memory unless "p" is current |
712 | */ | 714 | */ |
713 | if (!test_thread_flag(TIF_MEMDIE)) | 715 | if (killed && !test_thread_flag(TIF_MEMDIE)) |
714 | schedule_timeout_uninterruptible(1); | 716 | schedule_timeout_uninterruptible(1); |
715 | } | 717 | } |
716 | 718 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7262aacea8a2..c09ef5219cbe 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping, | |||
836 | spin_unlock_irq(&mapping->tree_lock); | 836 | spin_unlock_irq(&mapping->tree_lock); |
837 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | 837 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); |
838 | cond_resched(); | 838 | cond_resched(); |
839 | } while (tagged >= WRITEBACK_TAG_BATCH); | 839 | /* We check 'start' to handle wrapping when end == ~0UL */ |
840 | } while (tagged >= WRITEBACK_TAG_BATCH && start); | ||
840 | } | 841 | } |
841 | EXPORT_SYMBOL(tag_pages_for_writeback); | 842 | EXPORT_SYMBOL(tag_pages_for_writeback); |
842 | 843 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index dfaa0f4e9789..080b09a57a8f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2325,7 +2325,10 @@ static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
2325 | 2325 | ||
2326 | static void shmem_put_super(struct super_block *sb) | 2326 | static void shmem_put_super(struct super_block *sb) |
2327 | { | 2327 | { |
2328 | kfree(sb->s_fs_info); | 2328 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
2329 | |||
2330 | percpu_counter_destroy(&sbinfo->used_blocks); | ||
2331 | kfree(sbinfo); | ||
2329 | sb->s_fs_info = NULL; | 2332 | sb->s_fs_info = NULL; |
2330 | } | 2333 | } |
2331 | 2334 | ||
@@ -2367,7 +2370,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) | |||
2367 | #endif | 2370 | #endif |
2368 | 2371 | ||
2369 | spin_lock_init(&sbinfo->stat_lock); | 2372 | spin_lock_init(&sbinfo->stat_lock); |
2370 | percpu_counter_init(&sbinfo->used_blocks, 0); | 2373 | if (percpu_counter_init(&sbinfo->used_blocks, 0)) |
2374 | goto failed; | ||
2371 | sbinfo->free_inodes = sbinfo->max_inodes; | 2375 | sbinfo->free_inodes = sbinfo->max_inodes; |
2372 | 2376 | ||
2373 | sb->s_maxbytes = SHMEM_MAX_BYTES; | 2377 | sb->s_maxbytes = SHMEM_MAX_BYTES; |
@@ -2330,8 +2330,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2330 | } | 2330 | } |
2331 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2331 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2332 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size | 2332 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
2333 | && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { | 2333 | && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
2334 | cachep->obj_offset += PAGE_SIZE - size; | 2334 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
2335 | size = PAGE_SIZE; | 2335 | size = PAGE_SIZE; |
2336 | } | 2336 | } |
2337 | #endif | 2337 | #endif |
diff --git a/net/core/dev.c b/net/core/dev.c index 1ae654391442..3721fbb9a83c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3143,7 +3143,7 @@ pull: | |||
3143 | put_page(skb_shinfo(skb)->frags[0].page); | 3143 | put_page(skb_shinfo(skb)->frags[0].page); |
3144 | memmove(skb_shinfo(skb)->frags, | 3144 | memmove(skb_shinfo(skb)->frags, |
3145 | skb_shinfo(skb)->frags + 1, | 3145 | skb_shinfo(skb)->frags + 1, |
3146 | --skb_shinfo(skb)->nr_frags); | 3146 | --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); |
3147 | } | 3147 | } |
3148 | } | 3148 | } |
3149 | 3149 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6bccba31d132..51d6c3167975 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -735,6 +735,7 @@ static void get_counters(const struct xt_table_info *t, | |||
735 | if (cpu == curcpu) | 735 | if (cpu == curcpu) |
736 | continue; | 736 | continue; |
737 | i = 0; | 737 | i = 0; |
738 | local_bh_disable(); | ||
738 | xt_info_wrlock(cpu); | 739 | xt_info_wrlock(cpu); |
739 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
740 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 741 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -742,6 +743,7 @@ static void get_counters(const struct xt_table_info *t, | |||
742 | ++i; | 743 | ++i; |
743 | } | 744 | } |
744 | xt_info_wrunlock(cpu); | 745 | xt_info_wrunlock(cpu); |
746 | local_bh_enable(); | ||
745 | } | 747 | } |
746 | put_cpu(); | 748 | put_cpu(); |
747 | } | 749 | } |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c439721b165a..97b64b22c412 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -909,6 +909,7 @@ get_counters(const struct xt_table_info *t, | |||
909 | if (cpu == curcpu) | 909 | if (cpu == curcpu) |
910 | continue; | 910 | continue; |
911 | i = 0; | 911 | i = 0; |
912 | local_bh_disable(); | ||
912 | xt_info_wrlock(cpu); | 913 | xt_info_wrlock(cpu); |
913 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
914 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 915 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -916,6 +917,7 @@ get_counters(const struct xt_table_info *t, | |||
916 | ++i; /* macro does multi eval of i */ | 917 | ++i; /* macro does multi eval of i */ |
917 | } | 918 | } |
918 | xt_info_wrunlock(cpu); | 919 | xt_info_wrunlock(cpu); |
920 | local_bh_enable(); | ||
919 | } | 921 | } |
920 | put_cpu(); | 922 | put_cpu(); |
921 | } | 923 | } |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 5359ef4daac5..29a7bca29e3f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -922,6 +922,7 @@ get_counters(const struct xt_table_info *t, | |||
922 | if (cpu == curcpu) | 922 | if (cpu == curcpu) |
923 | continue; | 923 | continue; |
924 | i = 0; | 924 | i = 0; |
925 | local_bh_disable(); | ||
925 | xt_info_wrlock(cpu); | 926 | xt_info_wrlock(cpu); |
926 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 927 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
927 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 928 | ADD_COUNTER(counters[i], iter->counters.bcnt, |
@@ -929,6 +930,7 @@ get_counters(const struct xt_table_info *t, | |||
929 | ++i; | 930 | ++i; |
930 | } | 931 | } |
931 | xt_info_wrunlock(cpu); | 932 | xt_info_wrunlock(cpu); |
933 | local_bh_enable(); | ||
932 | } | 934 | } |
933 | put_cpu(); | 935 | put_cpu(); |
934 | } | 936 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8f2d0400cf8a..d126365ac046 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2580,7 +2580,7 @@ ctl_table ipv6_route_table_template[] = { | |||
2580 | .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, | 2580 | .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, |
2581 | .maxlen = sizeof(int), | 2581 | .maxlen = sizeof(int), |
2582 | .mode = 0644, | 2582 | .mode = 0644, |
2583 | .proc_handler = proc_dointvec_jiffies, | 2583 | .proc_handler = proc_dointvec, |
2584 | }, | 2584 | }, |
2585 | { | 2585 | { |
2586 | .procname = "mtu_expires", | 2586 | .procname = "mtu_expires", |
@@ -2594,7 +2594,7 @@ ctl_table ipv6_route_table_template[] = { | |||
2594 | .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, | 2594 | .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, |
2595 | .maxlen = sizeof(int), | 2595 | .maxlen = sizeof(int), |
2596 | .mode = 0644, | 2596 | .mode = 0644, |
2597 | .proc_handler = proc_dointvec_jiffies, | 2597 | .proc_handler = proc_dointvec, |
2598 | }, | 2598 | }, |
2599 | { | 2599 | { |
2600 | .procname = "gc_min_interval_ms", | 2600 | .procname = "gc_min_interval_ms", |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2cbf380377d5..8648a9922aab 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1406 | struct netlink_sock *nlk = nlk_sk(sk); | 1406 | struct netlink_sock *nlk = nlk_sk(sk); |
1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
1408 | size_t copied; | 1408 | size_t copied; |
1409 | struct sk_buff *skb; | 1409 | struct sk_buff *skb, *frag __maybe_unused = NULL; |
1410 | int err; | 1410 | int err; |
1411 | 1411 | ||
1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
@@ -1441,21 +1441,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1441 | kfree_skb(skb); | 1441 | kfree_skb(skb); |
1442 | skb = compskb; | 1442 | skb = compskb; |
1443 | } else { | 1443 | } else { |
1444 | /* | 1444 | frag = skb_shinfo(skb)->frag_list; |
1445 | * Before setting frag_list to NULL, we must get a | ||
1446 | * private copy of skb if shared (because of MSG_PEEK) | ||
1447 | */ | ||
1448 | if (skb_shared(skb)) { | ||
1449 | struct sk_buff *nskb; | ||
1450 | |||
1451 | nskb = pskb_copy(skb, GFP_KERNEL); | ||
1452 | kfree_skb(skb); | ||
1453 | skb = nskb; | ||
1454 | err = -ENOMEM; | ||
1455 | if (!skb) | ||
1456 | goto out; | ||
1457 | } | ||
1458 | kfree_skb(skb_shinfo(skb)->frag_list); | ||
1459 | skb_shinfo(skb)->frag_list = NULL; | 1445 | skb_shinfo(skb)->frag_list = NULL; |
1460 | } | 1446 | } |
1461 | } | 1447 | } |
@@ -1492,6 +1478,10 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1492 | if (flags & MSG_TRUNC) | 1478 | if (flags & MSG_TRUNC) |
1493 | copied = skb->len; | 1479 | copied = skb->len; |
1494 | 1480 | ||
1481 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1482 | skb_shinfo(skb)->frag_list = frag; | ||
1483 | #endif | ||
1484 | |||
1495 | skb_free_datagram(sk, skb); | 1485 | skb_free_datagram(sk, skb); |
1496 | 1486 | ||
1497 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1487 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 8406c6654990..c2ed90a4c0b4 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
152 | static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | 152 | static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) |
153 | { | 153 | { |
154 | unsigned char *b = skb_tail_pointer(skb); | 154 | unsigned char *b = skb_tail_pointer(skb); |
155 | struct tc_gact opt; | ||
156 | struct tcf_gact *gact = a->priv; | 155 | struct tcf_gact *gact = a->priv; |
156 | struct tc_gact opt = { | ||
157 | .index = gact->tcf_index, | ||
158 | .refcnt = gact->tcf_refcnt - ref, | ||
159 | .bindcnt = gact->tcf_bindcnt - bind, | ||
160 | .action = gact->tcf_action, | ||
161 | }; | ||
157 | struct tcf_t t; | 162 | struct tcf_t t; |
158 | 163 | ||
159 | opt.index = gact->tcf_index; | ||
160 | opt.refcnt = gact->tcf_refcnt - ref; | ||
161 | opt.bindcnt = gact->tcf_bindcnt - bind; | ||
162 | opt.action = gact->tcf_action; | ||
163 | NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); | 164 | NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); |
164 | #ifdef CONFIG_GACT_PROB | 165 | #ifdef CONFIG_GACT_PROB |
165 | if (gact->tcfg_ptype) { | 166 | if (gact->tcfg_ptype) { |
166 | struct tc_gact_p p_opt; | 167 | struct tc_gact_p p_opt = { |
167 | p_opt.paction = gact->tcfg_paction; | 168 | .paction = gact->tcfg_paction, |
168 | p_opt.pval = gact->tcfg_pval; | 169 | .pval = gact->tcfg_pval, |
169 | p_opt.ptype = gact->tcfg_ptype; | 170 | .ptype = gact->tcfg_ptype, |
171 | }; | ||
172 | |||
170 | NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); | 173 | NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); |
171 | } | 174 | } |
172 | #endif | 175 | #endif |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 11f195af2da0..0c311be92827 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -219,15 +219,16 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i | |||
219 | { | 219 | { |
220 | unsigned char *b = skb_tail_pointer(skb); | 220 | unsigned char *b = skb_tail_pointer(skb); |
221 | struct tcf_mirred *m = a->priv; | 221 | struct tcf_mirred *m = a->priv; |
222 | struct tc_mirred opt; | 222 | struct tc_mirred opt = { |
223 | .index = m->tcf_index, | ||
224 | .action = m->tcf_action, | ||
225 | .refcnt = m->tcf_refcnt - ref, | ||
226 | .bindcnt = m->tcf_bindcnt - bind, | ||
227 | .eaction = m->tcfm_eaction, | ||
228 | .ifindex = m->tcfm_ifindex, | ||
229 | }; | ||
223 | struct tcf_t t; | 230 | struct tcf_t t; |
224 | 231 | ||
225 | opt.index = m->tcf_index; | ||
226 | opt.action = m->tcf_action; | ||
227 | opt.refcnt = m->tcf_refcnt - ref; | ||
228 | opt.bindcnt = m->tcf_bindcnt - bind; | ||
229 | opt.eaction = m->tcfm_eaction; | ||
230 | opt.ifindex = m->tcfm_ifindex; | ||
231 | NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); | 232 | NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); |
232 | t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); | 233 | t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); |
233 | t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); | 234 | t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 509a2d53a99d..186eb837e600 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -272,19 +272,19 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, | |||
272 | { | 272 | { |
273 | unsigned char *b = skb_tail_pointer(skb); | 273 | unsigned char *b = skb_tail_pointer(skb); |
274 | struct tcf_nat *p = a->priv; | 274 | struct tcf_nat *p = a->priv; |
275 | struct tc_nat opt; | 275 | struct tc_nat opt = { |
276 | .old_addr = p->old_addr, | ||
277 | .new_addr = p->new_addr, | ||
278 | .mask = p->mask, | ||
279 | .flags = p->flags, | ||
280 | |||
281 | .index = p->tcf_index, | ||
282 | .action = p->tcf_action, | ||
283 | .refcnt = p->tcf_refcnt - ref, | ||
284 | .bindcnt = p->tcf_bindcnt - bind, | ||
285 | }; | ||
276 | struct tcf_t t; | 286 | struct tcf_t t; |
277 | 287 | ||
278 | opt.old_addr = p->old_addr; | ||
279 | opt.new_addr = p->new_addr; | ||
280 | opt.mask = p->mask; | ||
281 | opt.flags = p->flags; | ||
282 | |||
283 | opt.index = p->tcf_index; | ||
284 | opt.action = p->tcf_action; | ||
285 | opt.refcnt = p->tcf_refcnt - ref; | ||
286 | opt.bindcnt = p->tcf_bindcnt - bind; | ||
287 | |||
288 | NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); | 288 | NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); |
289 | t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); | 289 | t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); |
290 | t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); | 290 | t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 4a1d640b0cf1..97e84f3ee775 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | |||
164 | { | 164 | { |
165 | unsigned char *b = skb_tail_pointer(skb); | 165 | unsigned char *b = skb_tail_pointer(skb); |
166 | struct tcf_defact *d = a->priv; | 166 | struct tcf_defact *d = a->priv; |
167 | struct tc_defact opt; | 167 | struct tc_defact opt = { |
168 | .index = d->tcf_index, | ||
169 | .refcnt = d->tcf_refcnt - ref, | ||
170 | .bindcnt = d->tcf_bindcnt - bind, | ||
171 | .action = d->tcf_action, | ||
172 | }; | ||
168 | struct tcf_t t; | 173 | struct tcf_t t; |
169 | 174 | ||
170 | opt.index = d->tcf_index; | ||
171 | opt.refcnt = d->tcf_refcnt - ref; | ||
172 | opt.bindcnt = d->tcf_bindcnt - bind; | ||
173 | opt.action = d->tcf_action; | ||
174 | NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); | 175 | NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); |
175 | NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); | 176 | NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); |
176 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); | 177 | t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index e9607fe55b58..66cbf4eb8855 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |||
159 | { | 159 | { |
160 | unsigned char *b = skb_tail_pointer(skb); | 160 | unsigned char *b = skb_tail_pointer(skb); |
161 | struct tcf_skbedit *d = a->priv; | 161 | struct tcf_skbedit *d = a->priv; |
162 | struct tc_skbedit opt; | 162 | struct tc_skbedit opt = { |
163 | .index = d->tcf_index, | ||
164 | .refcnt = d->tcf_refcnt - ref, | ||
165 | .bindcnt = d->tcf_bindcnt - bind, | ||
166 | .action = d->tcf_action, | ||
167 | }; | ||
163 | struct tcf_t t; | 168 | struct tcf_t t; |
164 | 169 | ||
165 | opt.index = d->tcf_index; | ||
166 | opt.refcnt = d->tcf_refcnt - ref; | ||
167 | opt.bindcnt = d->tcf_bindcnt - bind; | ||
168 | opt.action = d->tcf_action; | ||
169 | NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); | 170 | NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); |
170 | if (d->flags & SKBEDIT_F_PRIORITY) | 171 | if (d->flags & SKBEDIT_F_PRIORITY) |
171 | NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), | 172 | NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 443c161eb8bd..3376d7657185 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
@@ -18,10 +18,11 @@ config SUNRPC_XPRT_RDMA | |||
18 | If unsure, say N. | 18 | If unsure, say N. |
19 | 19 | ||
20 | config RPCSEC_GSS_KRB5 | 20 | config RPCSEC_GSS_KRB5 |
21 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | 21 | tristate |
22 | depends on SUNRPC && EXPERIMENTAL | 22 | depends on SUNRPC && CRYPTO |
23 | prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4) | ||
24 | default y | ||
23 | select SUNRPC_GSS | 25 | select SUNRPC_GSS |
24 | select CRYPTO | ||
25 | select CRYPTO_MD5 | 26 | select CRYPTO_MD5 |
26 | select CRYPTO_DES | 27 | select CRYPTO_DES |
27 | select CRYPTO_CBC | 28 | select CRYPTO_CBC |
@@ -34,7 +35,7 @@ config RPCSEC_GSS_KRB5 | |||
34 | available from http://linux-nfs.org/. In addition, user-space | 35 | available from http://linux-nfs.org/. In addition, user-space |
35 | Kerberos support should be installed. | 36 | Kerberos support should be installed. |
36 | 37 | ||
37 | If unsure, say N. | 38 | If unsure, say Y. |
38 | 39 | ||
39 | config RPCSEC_GSS_SPKM3 | 40 | config RPCSEC_GSS_SPKM3 |
40 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" | 41 | tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)" |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index e5e28d1946a4..2ac3f6e8adff 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -249,6 +249,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
249 | req->rl_nchunks = nchunks; | 249 | req->rl_nchunks = nchunks; |
250 | 250 | ||
251 | BUG_ON(nchunks == 0); | 251 | BUG_ON(nchunks == 0); |
252 | BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) | ||
253 | && (nchunks > 3)); | ||
252 | 254 | ||
253 | /* | 255 | /* |
254 | * finish off header. If write, marshal discrim and nchunks. | 256 | * finish off header. If write, marshal discrim and nchunks. |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 27015c6d8eb5..5f4c7b3bc711 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -650,10 +650,22 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
650 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | 650 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; |
651 | switch (ia->ri_memreg_strategy) { | 651 | switch (ia->ri_memreg_strategy) { |
652 | case RPCRDMA_FRMR: | 652 | case RPCRDMA_FRMR: |
653 | /* Add room for frmr register and invalidate WRs */ | 653 | /* Add room for frmr register and invalidate WRs. |
654 | ep->rep_attr.cap.max_send_wr *= 3; | 654 | * 1. FRMR reg WR for head |
655 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) | 655 | * 2. FRMR invalidate WR for head |
656 | return -EINVAL; | 656 | * 3. FRMR reg WR for pagelist |
657 | * 4. FRMR invalidate WR for pagelist | ||
658 | * 5. FRMR reg WR for tail | ||
659 | * 6. FRMR invalidate WR for tail | ||
660 | * 7. The RDMA_SEND WR | ||
661 | */ | ||
662 | ep->rep_attr.cap.max_send_wr *= 7; | ||
663 | if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { | ||
664 | cdata->max_requests = devattr.max_qp_wr / 7; | ||
665 | if (!cdata->max_requests) | ||
666 | return -EINVAL; | ||
667 | ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7; | ||
668 | } | ||
657 | break; | 669 | break; |
658 | case RPCRDMA_MEMWINDOWS_ASYNC: | 670 | case RPCRDMA_MEMWINDOWS_ASYNC: |
659 | case RPCRDMA_MEMWINDOWS: | 671 | case RPCRDMA_MEMWINDOWS: |
@@ -1490,7 +1502,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, | |||
1490 | memset(&frmr_wr, 0, sizeof frmr_wr); | 1502 | memset(&frmr_wr, 0, sizeof frmr_wr); |
1491 | frmr_wr.opcode = IB_WR_FAST_REG_MR; | 1503 | frmr_wr.opcode = IB_WR_FAST_REG_MR; |
1492 | frmr_wr.send_flags = 0; /* unsignaled */ | 1504 | frmr_wr.send_flags = 0; /* unsignaled */ |
1493 | frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma; | 1505 | frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma; |
1494 | frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; | 1506 | frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; |
1495 | frmr_wr.wr.fast_reg.page_list_len = i; | 1507 | frmr_wr.wr.fast_reg.page_list_len = i; |
1496 | frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 1508 | frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 49a62f0c4b87..b6309db56226 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1305,10 +1305,11 @@ static void xs_tcp_state_change(struct sock *sk) | |||
1305 | if (!(xprt = xprt_from_sock(sk))) | 1305 | if (!(xprt = xprt_from_sock(sk))) |
1306 | goto out; | 1306 | goto out; |
1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); | 1307 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
1308 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", | 1308 | dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", |
1309 | sk->sk_state, xprt_connected(xprt), | 1309 | sk->sk_state, xprt_connected(xprt), |
1310 | sock_flag(sk, SOCK_DEAD), | 1310 | sock_flag(sk, SOCK_DEAD), |
1311 | sock_flag(sk, SOCK_ZAPPED)); | 1311 | sock_flag(sk, SOCK_ZAPPED), |
1312 | sk->sk_shutdown); | ||
1312 | 1313 | ||
1313 | switch (sk->sk_state) { | 1314 | switch (sk->sk_state) { |
1314 | case TCP_ESTABLISHED: | 1315 | case TCP_ESTABLISHED: |
@@ -1779,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra | |||
1779 | { | 1780 | { |
1780 | unsigned int state = transport->inet->sk_state; | 1781 | unsigned int state = transport->inet->sk_state; |
1781 | 1782 | ||
1782 | if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) | 1783 | if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) { |
1783 | return; | 1784 | /* we don't need to abort the connection if the socket |
1784 | if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) | 1785 | * hasn't undergone a shutdown |
1785 | return; | 1786 | */ |
1787 | if (transport->inet->sk_shutdown == 0) | ||
1788 | return; | ||
1789 | dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n", | ||
1790 | __func__, transport->inet->sk_shutdown); | ||
1791 | } | ||
1792 | if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) { | ||
1793 | /* we don't need to abort the connection if the socket | ||
1794 | * hasn't undergone a shutdown | ||
1795 | */ | ||
1796 | if (transport->inet->sk_shutdown == 0) | ||
1797 | return; | ||
1798 | dprintk("RPC: %s: ESTABLISHED/SYN_SENT " | ||
1799 | "sk_shutdown set to %d\n", | ||
1800 | __func__, transport->inet->sk_shutdown); | ||
1801 | } | ||
1786 | xs_abort_connection(xprt, transport); | 1802 | xs_abort_connection(xprt, transport); |
1787 | } | 1803 | } |
1788 | 1804 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ba59983aaffe..b14ed4b1f27c 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -2504,7 +2504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, | |||
2504 | if (p->dir > XFRM_POLICY_OUT) | 2504 | if (p->dir > XFRM_POLICY_OUT) |
2505 | return NULL; | 2505 | return NULL; |
2506 | 2506 | ||
2507 | xp = xfrm_policy_alloc(net, GFP_KERNEL); | 2507 | xp = xfrm_policy_alloc(net, GFP_ATOMIC); |
2508 | if (xp == NULL) { | 2508 | if (xp == NULL) { |
2509 | *dir = -ENOBUFS; | 2509 | *dir = -ENOBUFS; |
2510 | return NULL; | 2510 | return NULL; |
diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c index 642eef3f6336..178061e87ffe 100644 --- a/samples/kfifo/bytestream-example.c +++ b/samples/kfifo/bytestream-example.c | |||
@@ -44,10 +44,17 @@ static struct kfifo test; | |||
44 | static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE); | 44 | static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE); |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | static const unsigned char expected_result[FIFO_SIZE] = { | ||
48 | 3, 4, 5, 6, 7, 8, 9, 0, | ||
49 | 1, 20, 21, 22, 23, 24, 25, 26, | ||
50 | 27, 28, 29, 30, 31, 32, 33, 34, | ||
51 | 35, 36, 37, 38, 39, 40, 41, 42, | ||
52 | }; | ||
53 | |||
47 | static int __init testfunc(void) | 54 | static int __init testfunc(void) |
48 | { | 55 | { |
49 | unsigned char buf[6]; | 56 | unsigned char buf[6]; |
50 | unsigned char i; | 57 | unsigned char i, j; |
51 | unsigned int ret; | 58 | unsigned int ret; |
52 | 59 | ||
53 | printk(KERN_INFO "byte stream fifo test start\n"); | 60 | printk(KERN_INFO "byte stream fifo test start\n"); |
@@ -73,16 +80,34 @@ static int __init testfunc(void) | |||
73 | ret = kfifo_in(&test, buf, ret); | 80 | ret = kfifo_in(&test, buf, ret); |
74 | printk(KERN_INFO "ret: %d\n", ret); | 81 | printk(KERN_INFO "ret: %d\n", ret); |
75 | 82 | ||
83 | /* skip first element of the fifo */ | ||
84 | printk(KERN_INFO "skip 1st element\n"); | ||
85 | kfifo_skip(&test); | ||
86 | |||
76 | /* put values into the fifo until is full */ | 87 | /* put values into the fifo until is full */ |
77 | for (i = 20; kfifo_put(&test, &i); i++) | 88 | for (i = 20; kfifo_put(&test, &i); i++) |
78 | ; | 89 | ; |
79 | 90 | ||
80 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); | 91 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); |
81 | 92 | ||
82 | /* print out all values in the fifo */ | 93 | /* show the first value without removing from the fifo */ |
83 | while (kfifo_get(&test, &i)) | 94 | if (kfifo_peek(&test, &i)) |
84 | printk("%d ", i); | 95 | printk(KERN_INFO "%d\n", i); |
85 | printk("\n"); | 96 | |
97 | /* check the correctness of all values in the fifo */ | ||
98 | j = 0; | ||
99 | while (kfifo_get(&test, &i)) { | ||
100 | printk(KERN_INFO "item = %d\n", i); | ||
101 | if (i != expected_result[j++]) { | ||
102 | printk(KERN_WARNING "value mismatch: test failed\n"); | ||
103 | return -EIO; | ||
104 | } | ||
105 | } | ||
106 | if (j != ARRAY_SIZE(expected_result)) { | ||
107 | printk(KERN_WARNING "size mismatch: test failed\n"); | ||
108 | return -EIO; | ||
109 | } | ||
110 | printk(KERN_INFO "test passed\n"); | ||
86 | 111 | ||
87 | return 0; | 112 | return 0; |
88 | } | 113 | } |
@@ -138,7 +163,12 @@ static int __init example_init(void) | |||
138 | #else | 163 | #else |
139 | INIT_KFIFO(test); | 164 | INIT_KFIFO(test); |
140 | #endif | 165 | #endif |
141 | testfunc(); | 166 | if (testfunc() < 0) { |
167 | #ifdef DYNAMIC | ||
168 | kfifo_free(&test); | ||
169 | #endif | ||
170 | return -EIO; | ||
171 | } | ||
142 | 172 | ||
143 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { | 173 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { |
144 | #ifdef DYNAMIC | 174 | #ifdef DYNAMIC |
diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c index b9482c28b41a..ee03a4f0b64f 100644 --- a/samples/kfifo/dma-example.c +++ b/samples/kfifo/dma-example.c | |||
@@ -29,8 +29,8 @@ static int __init example_init(void) | |||
29 | printk(KERN_INFO "DMA fifo test start\n"); | 29 | printk(KERN_INFO "DMA fifo test start\n"); |
30 | 30 | ||
31 | if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { | 31 | if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { |
32 | printk(KERN_ERR "error kfifo_alloc\n"); | 32 | printk(KERN_WARNING "error kfifo_alloc\n"); |
33 | return 1; | 33 | return -ENOMEM; |
34 | } | 34 | } |
35 | 35 | ||
36 | printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); | 36 | printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); |
@@ -41,72 +41,99 @@ static int __init example_init(void) | |||
41 | kfifo_put(&fifo, &i); | 41 | kfifo_put(&fifo, &i); |
42 | 42 | ||
43 | /* kick away first byte */ | 43 | /* kick away first byte */ |
44 | ret = kfifo_get(&fifo, &i); | 44 | kfifo_skip(&fifo); |
45 | 45 | ||
46 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); | 46 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); |
47 | 47 | ||
48 | /* | ||
49 | * Configure the kfifo buffer to receive data from DMA input. | ||
50 | * | ||
51 | * .--------------------------------------. | ||
52 | * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 | | ||
53 | * |---|------------------|---------------| | ||
54 | * \_/ \________________/ \_____________/ | ||
55 | * \ \ \ | ||
56 | * \ \_allocated data \ | ||
57 | * \_*free space* \_*free space* | ||
58 | * | ||
59 | * We need two different SG entries: one for the free space area at the | ||
60 | * end of the kfifo buffer (19 bytes) and another for the first free | ||
61 | * byte at the beginning, after the kfifo_skip(). | ||
62 | */ | ||
63 | sg_init_table(sg, ARRAY_SIZE(sg)); | ||
48 | ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); | 64 | ret = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); |
49 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 65 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); |
66 | if (!ret) { | ||
67 | /* fifo is full and no sgl was created */ | ||
68 | printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); | ||
69 | return -EIO; | ||
70 | } | ||
50 | 71 | ||
51 | /* if 0 was returned, fifo is full and no sgl was created */ | 72 | /* receive data */ |
52 | if (ret) { | 73 | printk(KERN_INFO "scatterlist for receive:\n"); |
53 | printk(KERN_INFO "scatterlist for receive:\n"); | 74 | for (i = 0; i < ARRAY_SIZE(sg); i++) { |
54 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 75 | printk(KERN_INFO |
55 | printk(KERN_INFO | 76 | "sg[%d] -> " |
56 | "sg[%d] -> " | 77 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
57 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | 78 | i, sg[i].page_link, sg[i].offset, sg[i].length); |
58 | i, sg[i].page_link, sg[i].offset, sg[i].length); | ||
59 | 79 | ||
60 | if (sg_is_last(&sg[i])) | 80 | if (sg_is_last(&sg[i])) |
61 | break; | 81 | break; |
62 | } | 82 | } |
63 | 83 | ||
64 | /* but here your code to setup and exectute the dma operation */ | 84 | /* put here your code to setup and exectute the dma operation */ |
65 | /* ... */ | 85 | /* ... */ |
66 | 86 | ||
67 | /* example: zero bytes received */ | 87 | /* example: zero bytes received */ |
68 | ret = 0; | 88 | ret = 0; |
69 | 89 | ||
70 | /* finish the dma operation and update the received data */ | 90 | /* finish the dma operation and update the received data */ |
71 | kfifo_dma_in_finish(&fifo, ret); | 91 | kfifo_dma_in_finish(&fifo, ret); |
72 | } | ||
73 | 92 | ||
93 | /* Prepare to transmit data, example: 8 bytes */ | ||
74 | ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); | 94 | ret = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); |
75 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); | 95 | printk(KERN_INFO "DMA sgl entries: %d\n", ret); |
96 | if (!ret) { | ||
97 | /* no data was available and no sgl was created */ | ||
98 | printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); | ||
99 | return -EIO; | ||
100 | } | ||
76 | 101 | ||
77 | /* if 0 was returned, no data was available and no sgl was created */ | 102 | printk(KERN_INFO "scatterlist for transmit:\n"); |
78 | if (ret) { | 103 | for (i = 0; i < ARRAY_SIZE(sg); i++) { |
79 | printk(KERN_INFO "scatterlist for transmit:\n"); | 104 | printk(KERN_INFO |
80 | for (i = 0; i < ARRAY_SIZE(sg); i++) { | 105 | "sg[%d] -> " |
81 | printk(KERN_INFO | 106 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", |
82 | "sg[%d] -> " | 107 | i, sg[i].page_link, sg[i].offset, sg[i].length); |
83 | "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", | ||
84 | i, sg[i].page_link, sg[i].offset, sg[i].length); | ||
85 | 108 | ||
86 | if (sg_is_last(&sg[i])) | 109 | if (sg_is_last(&sg[i])) |
87 | break; | 110 | break; |
88 | } | 111 | } |
89 | 112 | ||
90 | /* but here your code to setup and exectute the dma operation */ | 113 | /* put here your code to setup and exectute the dma operation */ |
91 | /* ... */ | 114 | /* ... */ |
92 | 115 | ||
93 | /* example: 5 bytes transmitted */ | 116 | /* example: 5 bytes transmitted */ |
94 | ret = 5; | 117 | ret = 5; |
95 | 118 | ||
96 | /* finish the dma operation and update the transmitted data */ | 119 | /* finish the dma operation and update the transmitted data */ |
97 | kfifo_dma_out_finish(&fifo, ret); | 120 | kfifo_dma_out_finish(&fifo, ret); |
98 | } | ||
99 | 121 | ||
122 | ret = kfifo_len(&fifo); | ||
100 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); | 123 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); |
101 | 124 | ||
125 | if (ret != 7) { | ||
126 | printk(KERN_WARNING "size mismatch: test failed"); | ||
127 | return -EIO; | ||
128 | } | ||
129 | printk(KERN_INFO "test passed\n"); | ||
130 | |||
102 | return 0; | 131 | return 0; |
103 | } | 132 | } |
104 | 133 | ||
105 | static void __exit example_exit(void) | 134 | static void __exit example_exit(void) |
106 | { | 135 | { |
107 | #ifdef DYNAMIC | 136 | kfifo_free(&fifo); |
108 | kfifo_free(&test); | ||
109 | #endif | ||
110 | } | 137 | } |
111 | 138 | ||
112 | module_init(example_init); | 139 | module_init(example_init); |
diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c index d6c5b7d9df64..71b2aabca96a 100644 --- a/samples/kfifo/inttype-example.c +++ b/samples/kfifo/inttype-example.c | |||
@@ -44,10 +44,17 @@ static DECLARE_KFIFO_PTR(test, int); | |||
44 | static DEFINE_KFIFO(test, int, FIFO_SIZE); | 44 | static DEFINE_KFIFO(test, int, FIFO_SIZE); |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | static const int expected_result[FIFO_SIZE] = { | ||
48 | 3, 4, 5, 6, 7, 8, 9, 0, | ||
49 | 1, 20, 21, 22, 23, 24, 25, 26, | ||
50 | 27, 28, 29, 30, 31, 32, 33, 34, | ||
51 | 35, 36, 37, 38, 39, 40, 41, 42, | ||
52 | }; | ||
53 | |||
47 | static int __init testfunc(void) | 54 | static int __init testfunc(void) |
48 | { | 55 | { |
49 | int buf[6]; | 56 | int buf[6]; |
50 | int i; | 57 | int i, j; |
51 | unsigned int ret; | 58 | unsigned int ret; |
52 | 59 | ||
53 | printk(KERN_INFO "int fifo test start\n"); | 60 | printk(KERN_INFO "int fifo test start\n"); |
@@ -66,8 +73,13 @@ static int __init testfunc(void) | |||
66 | ret = kfifo_in(&test, buf, ret); | 73 | ret = kfifo_in(&test, buf, ret); |
67 | printk(KERN_INFO "ret: %d\n", ret); | 74 | printk(KERN_INFO "ret: %d\n", ret); |
68 | 75 | ||
69 | for (i = 20; i != 30; i++) | 76 | /* skip first element of the fifo */ |
70 | kfifo_put(&test, &i); | 77 | printk(KERN_INFO "skip 1st element\n"); |
78 | kfifo_skip(&test); | ||
79 | |||
80 | /* put values into the fifo until is full */ | ||
81 | for (i = 20; kfifo_put(&test, &i); i++) | ||
82 | ; | ||
71 | 83 | ||
72 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); | 84 | printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); |
73 | 85 | ||
@@ -75,10 +87,20 @@ static int __init testfunc(void) | |||
75 | if (kfifo_peek(&test, &i)) | 87 | if (kfifo_peek(&test, &i)) |
76 | printk(KERN_INFO "%d\n", i); | 88 | printk(KERN_INFO "%d\n", i); |
77 | 89 | ||
78 | /* print out all values in the fifo */ | 90 | /* check the correctness of all values in the fifo */ |
79 | while (kfifo_get(&test, &i)) | 91 | j = 0; |
80 | printk("%d ", i); | 92 | while (kfifo_get(&test, &i)) { |
81 | printk("\n"); | 93 | printk(KERN_INFO "item = %d\n", i); |
94 | if (i != expected_result[j++]) { | ||
95 | printk(KERN_WARNING "value mismatch: test failed\n"); | ||
96 | return -EIO; | ||
97 | } | ||
98 | } | ||
99 | if (j != ARRAY_SIZE(expected_result)) { | ||
100 | printk(KERN_WARNING "size mismatch: test failed\n"); | ||
101 | return -EIO; | ||
102 | } | ||
103 | printk(KERN_INFO "test passed\n"); | ||
82 | 104 | ||
83 | return 0; | 105 | return 0; |
84 | } | 106 | } |
@@ -132,7 +154,12 @@ static int __init example_init(void) | |||
132 | return ret; | 154 | return ret; |
133 | } | 155 | } |
134 | #endif | 156 | #endif |
135 | testfunc(); | 157 | if (testfunc() < 0) { |
158 | #ifdef DYNAMIC | ||
159 | kfifo_free(&test); | ||
160 | #endif | ||
161 | return -EIO; | ||
162 | } | ||
136 | 163 | ||
137 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { | 164 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { |
138 | #ifdef DYNAMIC | 165 | #ifdef DYNAMIC |
diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c index 32c6e0bda744..e68bd16a5da4 100644 --- a/samples/kfifo/record-example.c +++ b/samples/kfifo/record-example.c | |||
@@ -55,6 +55,19 @@ typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest; | |||
55 | static mytest test; | 55 | static mytest test; |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | static const char *expected_result[] = { | ||
59 | "a", | ||
60 | "bb", | ||
61 | "ccc", | ||
62 | "dddd", | ||
63 | "eeeee", | ||
64 | "ffffff", | ||
65 | "ggggggg", | ||
66 | "hhhhhhhh", | ||
67 | "iiiiiiiii", | ||
68 | "jjjjjjjjjj", | ||
69 | }; | ||
70 | |||
58 | static int __init testfunc(void) | 71 | static int __init testfunc(void) |
59 | { | 72 | { |
60 | char buf[100]; | 73 | char buf[100]; |
@@ -75,6 +88,10 @@ static int __init testfunc(void) | |||
75 | kfifo_in(&test, buf, i + 1); | 88 | kfifo_in(&test, buf, i + 1); |
76 | } | 89 | } |
77 | 90 | ||
91 | /* skip first element of the fifo */ | ||
92 | printk(KERN_INFO "skip 1st element\n"); | ||
93 | kfifo_skip(&test); | ||
94 | |||
78 | printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); | 95 | printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); |
79 | 96 | ||
80 | /* show the first record without removing from the fifo */ | 97 | /* show the first record without removing from the fifo */ |
@@ -82,11 +99,22 @@ static int __init testfunc(void) | |||
82 | if (ret) | 99 | if (ret) |
83 | printk(KERN_INFO "%.*s\n", ret, buf); | 100 | printk(KERN_INFO "%.*s\n", ret, buf); |
84 | 101 | ||
85 | /* print out all records in the fifo */ | 102 | /* check the correctness of all values in the fifo */ |
103 | i = 0; | ||
86 | while (!kfifo_is_empty(&test)) { | 104 | while (!kfifo_is_empty(&test)) { |
87 | ret = kfifo_out(&test, buf, sizeof(buf)); | 105 | ret = kfifo_out(&test, buf, sizeof(buf)); |
88 | printk(KERN_INFO "%.*s\n", ret, buf); | 106 | buf[ret] = '\0'; |
107 | printk(KERN_INFO "item = %.*s\n", ret, buf); | ||
108 | if (strcmp(buf, expected_result[i++])) { | ||
109 | printk(KERN_WARNING "value mismatch: test failed\n"); | ||
110 | return -EIO; | ||
111 | } | ||
112 | } | ||
113 | if (i != ARRAY_SIZE(expected_result)) { | ||
114 | printk(KERN_WARNING "size mismatch: test failed\n"); | ||
115 | return -EIO; | ||
89 | } | 116 | } |
117 | printk(KERN_INFO "test passed\n"); | ||
90 | 118 | ||
91 | return 0; | 119 | return 0; |
92 | } | 120 | } |
@@ -142,7 +170,12 @@ static int __init example_init(void) | |||
142 | #else | 170 | #else |
143 | INIT_KFIFO(test); | 171 | INIT_KFIFO(test); |
144 | #endif | 172 | #endif |
145 | testfunc(); | 173 | if (testfunc() < 0) { |
174 | #ifdef DYNAMIC | ||
175 | kfifo_free(&test); | ||
176 | #endif | ||
177 | return -EIO; | ||
178 | } | ||
146 | 179 | ||
147 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { | 180 | if (proc_create(PROC_FIFO, 0, NULL, &fifo_fops) == NULL) { |
148 | #ifdef DYNAMIC | 181 | #ifdef DYNAMIC |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index c39327e60ea4..515253fe46cf 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -497,7 +497,9 @@ int conf_write_defconfig(const char *filename) | |||
497 | /* | 497 | /* |
498 | * If symbol is a choice value and equals to the | 498 | * If symbol is a choice value and equals to the |
499 | * default for a choice - skip. | 499 | * default for a choice - skip. |
500 | * But only if value is bool and equal to "y" . | 500 | * But only if value is bool and equal to "y" and |
501 | * choice is not "optional". | ||
502 | * (If choice is "optional" then all values can be "n") | ||
501 | */ | 503 | */ |
502 | if (sym_is_choice_value(sym)) { | 504 | if (sym_is_choice_value(sym)) { |
503 | struct symbol *cs; | 505 | struct symbol *cs; |
@@ -505,7 +507,7 @@ int conf_write_defconfig(const char *filename) | |||
505 | 507 | ||
506 | cs = prop_get_symbol(sym_get_choice_prop(sym)); | 508 | cs = prop_get_symbol(sym_get_choice_prop(sym)); |
507 | ds = sym_choice_default(cs); | 509 | ds = sym_choice_default(cs); |
508 | if (sym == ds) { | 510 | if (!sym_is_optional(cs) && sym == ds) { |
509 | if ((sym->type == S_BOOLEAN) && | 511 | if ((sym->type == S_BOOLEAN) && |
510 | sym_get_tristate_value(sym) == yes) | 512 | sym_get_tristate_value(sym) == yes) |
511 | goto next_menu; | 513 | goto next_menu; |
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index e95718fea355..943712ca6c0a 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c | |||
@@ -937,6 +937,8 @@ static void sym_check_print_recursive(struct symbol *last_sym) | |||
937 | sym = stack->sym; | 937 | sym = stack->sym; |
938 | next_sym = stack->next ? stack->next->sym : last_sym; | 938 | next_sym = stack->next ? stack->next->sym : last_sym; |
939 | prop = stack->prop; | 939 | prop = stack->prop; |
940 | if (prop == NULL) | ||
941 | prop = stack->sym->prop; | ||
940 | 942 | ||
941 | /* for choice values find the menu entry (used below) */ | 943 | /* for choice values find the menu entry (used below) */ |
942 | if (sym_is_choice(sym) || sym_is_choice_value(sym)) { | 944 | if (sym_is_choice(sym) || sym_is_choice_value(sym)) { |
diff --git a/scripts/mkmakefile b/scripts/mkmakefile index 67d59c7a18dc..5325423ceab4 100644 --- a/scripts/mkmakefile +++ b/scripts/mkmakefile | |||
@@ -44,7 +44,9 @@ all: | |||
44 | 44 | ||
45 | Makefile:; | 45 | Makefile:; |
46 | 46 | ||
47 | \$(all) %/: all | 47 | \$(all): all |
48 | @: | 48 | @: |
49 | 49 | ||
50 | %/: all | ||
51 | @: | ||
50 | EOF | 52 | EOF |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 0171060b5fd6..e67f05486087 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -159,6 +159,7 @@ my $section_regex; # Find the start of a section | |||
159 | my $function_regex; # Find the name of a function | 159 | my $function_regex; # Find the name of a function |
160 | # (return offset and func name) | 160 | # (return offset and func name) |
161 | my $mcount_regex; # Find the call site to mcount (return offset) | 161 | my $mcount_regex; # Find the call site to mcount (return offset) |
162 | my $mcount_adjust; # Address adjustment to mcount offset | ||
162 | my $alignment; # The .align value to use for $mcount_section | 163 | my $alignment; # The .align value to use for $mcount_section |
163 | my $section_type; # Section header plus possible alignment command | 164 | my $section_type; # Section header plus possible alignment command |
164 | my $can_use_local = 0; # If we can use local function references | 165 | my $can_use_local = 0; # If we can use local function references |
@@ -213,6 +214,7 @@ $section_regex = "Disassembly of section\\s+(\\S+):"; | |||
213 | $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; | 214 | $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; |
214 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; | 215 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; |
215 | $section_type = '@progbits'; | 216 | $section_type = '@progbits'; |
217 | $mcount_adjust = 0; | ||
216 | $type = ".long"; | 218 | $type = ".long"; |
217 | 219 | ||
218 | if ($arch eq "x86_64") { | 220 | if ($arch eq "x86_64") { |
@@ -351,6 +353,9 @@ if ($arch eq "x86_64") { | |||
351 | } elsif ($arch eq "microblaze") { | 353 | } elsif ($arch eq "microblaze") { |
352 | # Microblaze calls '_mcount' instead of plain 'mcount'. | 354 | # Microblaze calls '_mcount' instead of plain 'mcount'. |
353 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; | 355 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; |
356 | } elsif ($arch eq "blackfin") { | ||
357 | $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; | ||
358 | $mcount_adjust = -4; | ||
354 | } else { | 359 | } else { |
355 | die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; | 360 | die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; |
356 | } | 361 | } |
@@ -511,7 +516,7 @@ while (<IN>) { | |||
511 | } | 516 | } |
512 | # is this a call site to mcount? If so, record it to print later | 517 | # is this a call site to mcount? If so, record it to print later |
513 | if ($text_found && /$mcount_regex/) { | 518 | if ($text_found && /$mcount_regex/) { |
514 | push(@offsets, hex $1); | 519 | push(@offsets, (hex $1) + $mcount_adjust); |
515 | } | 520 | } |
516 | } | 521 | } |
517 | 522 | ||
diff --git a/scripts/setlocalversion b/scripts/setlocalversion index e90a91cc5185..057b6b3c5dfb 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion | |||
@@ -43,7 +43,7 @@ scm_version() | |||
43 | fi | 43 | fi |
44 | 44 | ||
45 | # Check for git and a git repo. | 45 | # Check for git and a git repo. |
46 | if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then | 46 | if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then |
47 | 47 | ||
48 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore | 48 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore |
49 | # it, because this version is defined in the top level Makefile. | 49 | # it, because this version is defined in the top level Makefile. |
@@ -85,7 +85,7 @@ scm_version() | |||
85 | fi | 85 | fi |
86 | 86 | ||
87 | # Check for mercurial and a mercurial repo. | 87 | # Check for mercurial and a mercurial repo. |
88 | if hgid=`hg id 2>/dev/null`; then | 88 | if test -d .hg && hgid=`hg id 2>/dev/null`; then |
89 | tag=`printf '%s' "$hgid" | cut -s -d' ' -f2` | 89 | tag=`printf '%s' "$hgid" | cut -s -d' ' -f2` |
90 | 90 | ||
91 | # Do we have an untagged version? | 91 | # Do we have an untagged version? |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index d5666d3cc21b..f73e2c204218 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -607,8 +607,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name, | |||
607 | return error; | 607 | return error; |
608 | } | 608 | } |
609 | 609 | ||
610 | static int apparmor_task_setrlimit(unsigned int resource, | 610 | static int apparmor_task_setrlimit(struct task_struct *task, |
611 | struct rlimit *new_rlim) | 611 | unsigned int resource, struct rlimit *new_rlim) |
612 | { | 612 | { |
613 | struct aa_profile *profile = aa_current_profile(); | 613 | struct aa_profile *profile = aa_current_profile(); |
614 | int error = 0; | 614 | int error = 0; |
diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 96bab9469d48..19358dc14605 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c | |||
@@ -62,19 +62,14 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, | |||
62 | int deleted, connected; | 62 | int deleted, connected; |
63 | int error = 0; | 63 | int error = 0; |
64 | 64 | ||
65 | /* Get the root we want to resolve too */ | 65 | /* Get the root we want to resolve too, released below */ |
66 | if (flags & PATH_CHROOT_REL) { | 66 | if (flags & PATH_CHROOT_REL) { |
67 | /* resolve paths relative to chroot */ | 67 | /* resolve paths relative to chroot */ |
68 | read_lock(¤t->fs->lock); | 68 | get_fs_root(current->fs, &root); |
69 | root = current->fs->root; | ||
70 | /* released below */ | ||
71 | path_get(&root); | ||
72 | read_unlock(¤t->fs->lock); | ||
73 | } else { | 69 | } else { |
74 | /* resolve paths relative to namespace */ | 70 | /* resolve paths relative to namespace */ |
75 | root.mnt = current->nsproxy->mnt_ns->root; | 71 | root.mnt = current->nsproxy->mnt_ns->root; |
76 | root.dentry = root.mnt->mnt_root; | 72 | root.dentry = root.mnt->mnt_root; |
77 | /* released below */ | ||
78 | path_get(&root); | 73 | path_get(&root); |
79 | } | 74 | } |
80 | 75 | ||
diff --git a/security/commoncap.c b/security/commoncap.c index 4e015996dd4d..9d172e6e330c 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * | 40 | * |
41 | * Warn if that happens, once per boot. | 41 | * Warn if that happens, once per boot. |
42 | */ | 42 | */ |
43 | static void warn_setuid_and_fcaps_mixed(char *fname) | 43 | static void warn_setuid_and_fcaps_mixed(const char *fname) |
44 | { | 44 | { |
45 | static int warned; | 45 | static int warned; |
46 | if (!warned) { | 46 | if (!warned) { |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 42043f96e54f..4796ddd4e721 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -2170,8 +2170,9 @@ static inline void flush_unauthorized_files(const struct cred *cred, | |||
2170 | 2170 | ||
2171 | tty = get_current_tty(); | 2171 | tty = get_current_tty(); |
2172 | if (tty) { | 2172 | if (tty) { |
2173 | file_list_lock(); | 2173 | spin_lock(&tty_files_lock); |
2174 | if (!list_empty(&tty->tty_files)) { | 2174 | if (!list_empty(&tty->tty_files)) { |
2175 | struct tty_file_private *file_priv; | ||
2175 | struct inode *inode; | 2176 | struct inode *inode; |
2176 | 2177 | ||
2177 | /* Revalidate access to controlling tty. | 2178 | /* Revalidate access to controlling tty. |
@@ -2179,14 +2180,16 @@ static inline void flush_unauthorized_files(const struct cred *cred, | |||
2179 | than using file_has_perm, as this particular open | 2180 | than using file_has_perm, as this particular open |
2180 | file may belong to another process and we are only | 2181 | file may belong to another process and we are only |
2181 | interested in the inode-based check here. */ | 2182 | interested in the inode-based check here. */ |
2182 | file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list); | 2183 | file_priv = list_first_entry(&tty->tty_files, |
2184 | struct tty_file_private, list); | ||
2185 | file = file_priv->file; | ||
2183 | inode = file->f_path.dentry->d_inode; | 2186 | inode = file->f_path.dentry->d_inode; |
2184 | if (inode_has_perm(cred, inode, | 2187 | if (inode_has_perm(cred, inode, |
2185 | FILE__READ | FILE__WRITE, NULL)) { | 2188 | FILE__READ | FILE__WRITE, NULL)) { |
2186 | drop_tty = 1; | 2189 | drop_tty = 1; |
2187 | } | 2190 | } |
2188 | } | 2191 | } |
2189 | file_list_unlock(); | 2192 | spin_unlock(&tty_files_lock); |
2190 | tty_kref_put(tty); | 2193 | tty_kref_put(tty); |
2191 | } | 2194 | } |
2192 | /* Reset controlling tty. */ | 2195 | /* Reset controlling tty. */ |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index a3b2a6479246..134fc6c2e08d 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -978,6 +978,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) | |||
978 | { | 978 | { |
979 | if (substream->runtime->trigger_master != substream) | 979 | if (substream->runtime->trigger_master != substream) |
980 | return 0; | 980 | return 0; |
981 | /* some drivers might use hw_ptr to recover from the pause - | ||
982 | update the hw_ptr now */ | ||
983 | if (push) | ||
984 | snd_pcm_update_hw_ptr(substream); | ||
981 | /* The jiffies check in snd_pcm_update_hw_ptr*() is done by | 985 | /* The jiffies check in snd_pcm_update_hw_ptr*() is done by |
982 | * a delta betwen the current jiffies, this gives a large enough | 986 | * a delta betwen the current jiffies, this gives a large enough |
983 | * delta, effectively to skip the check once. | 987 | * delta, effectively to skip the check once. |
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index 4203782d7cb7..aff8387c45cf 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c | |||
@@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64}; | |||
52 | static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; | 52 | static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; |
53 | static int enable_ir[SNDRV_CARDS]; | 53 | static int enable_ir[SNDRV_CARDS]; |
54 | static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ | 54 | static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ |
55 | static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; | ||
55 | 56 | ||
56 | module_param_array(index, int, NULL, 0444); | 57 | module_param_array(index, int, NULL, 0444); |
57 | MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); | 58 | MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); |
@@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444); | |||
73 | MODULE_PARM_DESC(enable_ir, "Enable IR."); | 74 | MODULE_PARM_DESC(enable_ir, "Enable IR."); |
74 | module_param_array(subsystem, uint, NULL, 0444); | 75 | module_param_array(subsystem, uint, NULL, 0444); |
75 | MODULE_PARM_DESC(subsystem, "Force card subsystem model."); | 76 | MODULE_PARM_DESC(subsystem, "Force card subsystem model."); |
77 | module_param_array(delay_pcm_irq, uint, NULL, 0444); | ||
78 | MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0)."); | ||
76 | /* | 79 | /* |
77 | * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 | 80 | * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 |
78 | */ | 81 | */ |
@@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci, | |||
127 | &emu)) < 0) | 130 | &emu)) < 0) |
128 | goto error; | 131 | goto error; |
129 | card->private_data = emu; | 132 | card->private_data = emu; |
133 | emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f; | ||
130 | if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) | 134 | if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) |
131 | goto error; | 135 | goto error; |
132 | if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) | 136 | if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) |
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index 55b83ef73c63..622bace148e3 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c | |||
@@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, | |||
332 | evoice->epcm->ccca_start_addr = start_addr + ccis; | 332 | evoice->epcm->ccca_start_addr = start_addr + ccis; |
333 | if (extra) { | 333 | if (extra) { |
334 | start_addr += ccis; | 334 | start_addr += ccis; |
335 | end_addr += ccis; | 335 | end_addr += ccis + emu->delay_pcm_irq; |
336 | } | 336 | } |
337 | if (stereo && !extra) { | 337 | if (stereo && !extra) { |
338 | snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK); | 338 | snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK); |
@@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, | |||
360 | /* Assumption that PT is already 0 so no harm overwriting */ | 360 | /* Assumption that PT is already 0 so no harm overwriting */ |
361 | snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]); | 361 | snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]); |
362 | snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24)); | 362 | snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24)); |
363 | snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24)); | 363 | snd_emu10k1_ptr_write(emu, PSST, voice, |
364 | (start_addr + (extra ? emu->delay_pcm_irq : 0)) | | ||
365 | (send_amount[2] << 24)); | ||
364 | if (emu->card_capabilities->emu_model) | 366 | if (emu->card_capabilities->emu_model) |
365 | pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */ | 367 | pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */ |
366 | else | 368 | else |
@@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_ | |||
732 | snd_emu10k1_ptr_write(emu, IP, voice, 0); | 734 | snd_emu10k1_ptr_write(emu, IP, voice, 0); |
733 | } | 735 | } |
734 | 736 | ||
737 | static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu, | ||
738 | struct snd_emu10k1_pcm *epcm, | ||
739 | struct snd_pcm_substream *substream, | ||
740 | struct snd_pcm_runtime *runtime) | ||
741 | { | ||
742 | unsigned int ptr, period_pos; | ||
743 | |||
744 | /* try to sychronize the current position for the interrupt | ||
745 | source voice */ | ||
746 | period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt; | ||
747 | period_pos %= runtime->period_size; | ||
748 | ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number); | ||
749 | ptr &= ~0x00ffffff; | ||
750 | ptr |= epcm->ccca_start_addr + period_pos; | ||
751 | snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr); | ||
752 | } | ||
753 | |||
735 | static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, | 754 | static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, |
736 | int cmd) | 755 | int cmd) |
737 | { | 756 | { |
@@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, | |||
753 | /* follow thru */ | 772 | /* follow thru */ |
754 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 773 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
755 | case SNDRV_PCM_TRIGGER_RESUME: | 774 | case SNDRV_PCM_TRIGGER_RESUME: |
775 | if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE) | ||
776 | snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime); | ||
756 | mix = &emu->pcm_mixer[substream->number]; | 777 | mix = &emu->pcm_mixer[substream->number]; |
757 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix); | 778 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix); |
758 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix); | 779 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix); |
@@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream * | |||
869 | #endif | 890 | #endif |
870 | /* | 891 | /* |
871 | printk(KERN_DEBUG | 892 | printk(KERN_DEBUG |
872 | "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n", | 893 | "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n", |
873 | ptr, runtime->buffer_size, runtime->period_size); | 894 | (long)ptr, (long)runtime->buffer_size, |
895 | (long)runtime->period_size); | ||
874 | */ | 896 | */ |
875 | return ptr; | 897 | return ptr; |
876 | } | 898 | } |
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index ffb1ddb8dc28..957a311514c8 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c | |||
@@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst | |||
310 | if (snd_BUG_ON(!hdr)) | 310 | if (snd_BUG_ON(!hdr)) |
311 | return NULL; | 311 | return NULL; |
312 | 312 | ||
313 | idx = runtime->period_size >= runtime->buffer_size ? | ||
314 | (emu->delay_pcm_irq * 2) : 0; | ||
313 | mutex_lock(&hdr->block_mutex); | 315 | mutex_lock(&hdr->block_mutex); |
314 | blk = search_empty(emu, runtime->dma_bytes); | 316 | blk = search_empty(emu, runtime->dma_bytes + idx); |
315 | if (blk == NULL) { | 317 | if (blk == NULL) { |
316 | mutex_unlock(&hdr->block_mutex); | 318 | mutex_unlock(&hdr->block_mutex); |
317 | return NULL; | 319 | return NULL; |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index dd8fb86c842b..3827092cc1d2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -589,6 +589,7 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card, | |||
589 | bus->ops = temp->ops; | 589 | bus->ops = temp->ops; |
590 | 590 | ||
591 | mutex_init(&bus->cmd_mutex); | 591 | mutex_init(&bus->cmd_mutex); |
592 | mutex_init(&bus->prepare_mutex); | ||
592 | INIT_LIST_HEAD(&bus->codec_list); | 593 | INIT_LIST_HEAD(&bus->codec_list); |
593 | 594 | ||
594 | snprintf(bus->workq_name, sizeof(bus->workq_name), | 595 | snprintf(bus->workq_name, sizeof(bus->workq_name), |
@@ -1068,7 +1069,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, | |||
1068 | codec->addr = codec_addr; | 1069 | codec->addr = codec_addr; |
1069 | mutex_init(&codec->spdif_mutex); | 1070 | mutex_init(&codec->spdif_mutex); |
1070 | mutex_init(&codec->control_mutex); | 1071 | mutex_init(&codec->control_mutex); |
1071 | mutex_init(&codec->prepare_mutex); | ||
1072 | init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info)); | 1072 | init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info)); |
1073 | init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head)); | 1073 | init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head)); |
1074 | snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32); | 1074 | snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32); |
@@ -1213,6 +1213,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
1213 | u32 stream_tag, | 1213 | u32 stream_tag, |
1214 | int channel_id, int format) | 1214 | int channel_id, int format) |
1215 | { | 1215 | { |
1216 | struct hda_codec *c; | ||
1216 | struct hda_cvt_setup *p; | 1217 | struct hda_cvt_setup *p; |
1217 | unsigned int oldval, newval; | 1218 | unsigned int oldval, newval; |
1218 | int i; | 1219 | int i; |
@@ -1253,10 +1254,12 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
1253 | p->dirty = 0; | 1254 | p->dirty = 0; |
1254 | 1255 | ||
1255 | /* make other inactive cvts with the same stream-tag dirty */ | 1256 | /* make other inactive cvts with the same stream-tag dirty */ |
1256 | for (i = 0; i < codec->cvt_setups.used; i++) { | 1257 | list_for_each_entry(c, &codec->bus->codec_list, list) { |
1257 | p = snd_array_elem(&codec->cvt_setups, i); | 1258 | for (i = 0; i < c->cvt_setups.used; i++) { |
1258 | if (!p->active && p->stream_tag == stream_tag) | 1259 | p = snd_array_elem(&c->cvt_setups, i); |
1259 | p->dirty = 1; | 1260 | if (!p->active && p->stream_tag == stream_tag) |
1261 | p->dirty = 1; | ||
1262 | } | ||
1260 | } | 1263 | } |
1261 | } | 1264 | } |
1262 | EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream); | 1265 | EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream); |
@@ -1306,12 +1309,16 @@ static void really_cleanup_stream(struct hda_codec *codec, | |||
1306 | /* clean up the all conflicting obsolete streams */ | 1309 | /* clean up the all conflicting obsolete streams */ |
1307 | static void purify_inactive_streams(struct hda_codec *codec) | 1310 | static void purify_inactive_streams(struct hda_codec *codec) |
1308 | { | 1311 | { |
1312 | struct hda_codec *c; | ||
1309 | int i; | 1313 | int i; |
1310 | 1314 | ||
1311 | for (i = 0; i < codec->cvt_setups.used; i++) { | 1315 | list_for_each_entry(c, &codec->bus->codec_list, list) { |
1312 | struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i); | 1316 | for (i = 0; i < c->cvt_setups.used; i++) { |
1313 | if (p->dirty) | 1317 | struct hda_cvt_setup *p; |
1314 | really_cleanup_stream(codec, p); | 1318 | p = snd_array_elem(&c->cvt_setups, i); |
1319 | if (p->dirty) | ||
1320 | really_cleanup_stream(c, p); | ||
1321 | } | ||
1315 | } | 1322 | } |
1316 | } | 1323 | } |
1317 | 1324 | ||
@@ -3502,11 +3509,11 @@ int snd_hda_codec_prepare(struct hda_codec *codec, | |||
3502 | struct snd_pcm_substream *substream) | 3509 | struct snd_pcm_substream *substream) |
3503 | { | 3510 | { |
3504 | int ret; | 3511 | int ret; |
3505 | mutex_lock(&codec->prepare_mutex); | 3512 | mutex_lock(&codec->bus->prepare_mutex); |
3506 | ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream); | 3513 | ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream); |
3507 | if (ret >= 0) | 3514 | if (ret >= 0) |
3508 | purify_inactive_streams(codec); | 3515 | purify_inactive_streams(codec); |
3509 | mutex_unlock(&codec->prepare_mutex); | 3516 | mutex_unlock(&codec->bus->prepare_mutex); |
3510 | return ret; | 3517 | return ret; |
3511 | } | 3518 | } |
3512 | EXPORT_SYMBOL_HDA(snd_hda_codec_prepare); | 3519 | EXPORT_SYMBOL_HDA(snd_hda_codec_prepare); |
@@ -3515,9 +3522,9 @@ void snd_hda_codec_cleanup(struct hda_codec *codec, | |||
3515 | struct hda_pcm_stream *hinfo, | 3522 | struct hda_pcm_stream *hinfo, |
3516 | struct snd_pcm_substream *substream) | 3523 | struct snd_pcm_substream *substream) |
3517 | { | 3524 | { |
3518 | mutex_lock(&codec->prepare_mutex); | 3525 | mutex_lock(&codec->bus->prepare_mutex); |
3519 | hinfo->ops.cleanup(hinfo, codec, substream); | 3526 | hinfo->ops.cleanup(hinfo, codec, substream); |
3520 | mutex_unlock(&codec->prepare_mutex); | 3527 | mutex_unlock(&codec->bus->prepare_mutex); |
3521 | } | 3528 | } |
3522 | EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup); | 3529 | EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup); |
3523 | 3530 | ||
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 4303353feda9..62c702240108 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
@@ -648,6 +648,7 @@ struct hda_bus { | |||
648 | struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1]; | 648 | struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1]; |
649 | 649 | ||
650 | struct mutex cmd_mutex; | 650 | struct mutex cmd_mutex; |
651 | struct mutex prepare_mutex; | ||
651 | 652 | ||
652 | /* unsolicited event queue */ | 653 | /* unsolicited event queue */ |
653 | struct hda_bus_unsolicited *unsol; | 654 | struct hda_bus_unsolicited *unsol; |
@@ -826,7 +827,6 @@ struct hda_codec { | |||
826 | 827 | ||
827 | struct mutex spdif_mutex; | 828 | struct mutex spdif_mutex; |
828 | struct mutex control_mutex; | 829 | struct mutex control_mutex; |
829 | struct mutex prepare_mutex; | ||
830 | unsigned int spdif_status; /* IEC958 status bits */ | 830 | unsigned int spdif_status; /* IEC958 status bits */ |
831 | unsigned short spdif_ctls; /* SPDIF control bits */ | 831 | unsigned short spdif_ctls; /* SPDIF control bits */ |
832 | unsigned int spdif_in_enable; /* SPDIF input enable? */ | 832 | unsigned int spdif_in_enable; /* SPDIF input enable? */ |
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 803b298f7411..26c3ade73583 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
@@ -596,6 +596,8 @@ void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld) | |||
596 | } | 596 | } |
597 | EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free); | 597 | EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free); |
598 | 598 | ||
599 | #endif /* CONFIG_PROC_FS */ | ||
600 | |||
599 | /* update PCM info based on ELD */ | 601 | /* update PCM info based on ELD */ |
600 | void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, | 602 | void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, |
601 | struct hda_pcm_stream *codec_pars) | 603 | struct hda_pcm_stream *codec_pars) |
@@ -644,5 +646,3 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, | |||
644 | pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps); | 646 | pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps); |
645 | } | 647 | } |
646 | EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info); | 648 | EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info); |
647 | |||
648 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 31b5d9eeba68..5cdb80edbd7f 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3049,6 +3049,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3049 | SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", | 3049 | SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", |
3050 | CXT5066_DELL_LAPTOP), | 3050 | CXT5066_DELL_LAPTOP), |
3051 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), | 3051 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), |
3052 | SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), | ||
3052 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), | 3053 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), |
3053 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3054 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3054 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), | 3055 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
@@ -3058,6 +3059,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3058 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3059 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
3059 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3060 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
3060 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), | 3061 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), |
3062 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), | ||
3061 | SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD), | 3063 | SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD), |
3062 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), | 3064 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), |
3063 | {} | 3065 | {} |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2bc0f07cf33f..afd6022a96a7 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -707,8 +707,6 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
707 | u32 stream_tag, int format) | 707 | u32 stream_tag, int format) |
708 | { | 708 | { |
709 | struct hdmi_spec *spec = codec->spec; | 709 | struct hdmi_spec *spec = codec->spec; |
710 | int tag; | ||
711 | int fmt; | ||
712 | int pinctl; | 710 | int pinctl; |
713 | int new_pinctl = 0; | 711 | int new_pinctl = 0; |
714 | int i; | 712 | int i; |
@@ -745,24 +743,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
745 | return -EINVAL; | 743 | return -EINVAL; |
746 | } | 744 | } |
747 | 745 | ||
748 | tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4; | 746 | snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format); |
749 | fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0); | ||
750 | |||
751 | snd_printdd("hdmi_setup_stream: " | ||
752 | "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n", | ||
753 | nid, | ||
754 | tag == stream_tag ? "" : "new-", | ||
755 | stream_tag, | ||
756 | fmt == format ? "" : "new-", | ||
757 | format); | ||
758 | |||
759 | if (tag != stream_tag) | ||
760 | snd_hda_codec_write(codec, nid, 0, | ||
761 | AC_VERB_SET_CHANNEL_STREAMID, | ||
762 | stream_tag << 4); | ||
763 | if (fmt != format) | ||
764 | snd_hda_codec_write(codec, nid, 0, | ||
765 | AC_VERB_SET_STREAM_FORMAT, format); | ||
766 | return 0; | 747 | return 0; |
767 | } | 748 | } |
768 | 749 | ||
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c index d382d3c81c0f..36a9b83a6174 100644 --- a/sound/pci/hda/patch_intelhdmi.c +++ b/sound/pci/hda/patch_intelhdmi.c | |||
@@ -69,20 +69,12 @@ static int intel_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
69 | return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format); | 69 | return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format); |
70 | } | 70 | } |
71 | 71 | ||
72 | static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, | ||
73 | struct hda_codec *codec, | ||
74 | struct snd_pcm_substream *substream) | ||
75 | { | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static struct hda_pcm_stream intel_hdmi_pcm_playback = { | 72 | static struct hda_pcm_stream intel_hdmi_pcm_playback = { |
80 | .substreams = 1, | 73 | .substreams = 1, |
81 | .channels_min = 2, | 74 | .channels_min = 2, |
82 | .ops = { | 75 | .ops = { |
83 | .open = hdmi_pcm_open, | 76 | .open = hdmi_pcm_open, |
84 | .prepare = intel_hdmi_playback_pcm_prepare, | 77 | .prepare = intel_hdmi_playback_pcm_prepare, |
85 | .cleanup = intel_hdmi_playback_pcm_cleanup, | ||
86 | }, | 78 | }, |
87 | }; | 79 | }; |
88 | 80 | ||
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index f636870dc718..69b950d527c3 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c | |||
@@ -326,13 +326,6 @@ static int nvhdmi_dig_playback_pcm_prepare_8ch(struct hda_pcm_stream *hinfo, | |||
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, | ||
330 | struct hda_codec *codec, | ||
331 | struct snd_pcm_substream *substream) | ||
332 | { | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo, | 329 | static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo, |
337 | struct hda_codec *codec, | 330 | struct hda_codec *codec, |
338 | unsigned int stream_tag, | 331 | unsigned int stream_tag, |
@@ -350,7 +343,6 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch_89 = { | |||
350 | .ops = { | 343 | .ops = { |
351 | .open = hdmi_pcm_open, | 344 | .open = hdmi_pcm_open, |
352 | .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89, | 345 | .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89, |
353 | .cleanup = nvhdmi_playback_pcm_cleanup, | ||
354 | }, | 346 | }, |
355 | }; | 347 | }; |
356 | 348 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2cd1ae809e46..a4dd04524e43 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -19030,6 +19030,7 @@ static int patch_alc888(struct hda_codec *codec) | |||
19030 | /* | 19030 | /* |
19031 | * ALC680 support | 19031 | * ALC680 support |
19032 | */ | 19032 | */ |
19033 | #define ALC680_DIGIN_NID ALC880_DIGIN_NID | ||
19033 | #define ALC680_DIGOUT_NID ALC880_DIGOUT_NID | 19034 | #define ALC680_DIGOUT_NID ALC880_DIGOUT_NID |
19034 | #define alc680_modes alc260_modes | 19035 | #define alc680_modes alc260_modes |
19035 | 19036 | ||
@@ -19044,23 +19045,93 @@ static hda_nid_t alc680_adc_nids[3] = { | |||
19044 | 0x07, 0x08, 0x09 | 19045 | 0x07, 0x08, 0x09 |
19045 | }; | 19046 | }; |
19046 | 19047 | ||
19048 | /* | ||
19049 | * Analog capture ADC cgange | ||
19050 | */ | ||
19051 | static int alc680_capture_pcm_prepare(struct hda_pcm_stream *hinfo, | ||
19052 | struct hda_codec *codec, | ||
19053 | unsigned int stream_tag, | ||
19054 | unsigned int format, | ||
19055 | struct snd_pcm_substream *substream) | ||
19056 | { | ||
19057 | struct alc_spec *spec = codec->spec; | ||
19058 | struct auto_pin_cfg *cfg = &spec->autocfg; | ||
19059 | unsigned int pre_mic, pre_line; | ||
19060 | |||
19061 | pre_mic = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]); | ||
19062 | pre_line = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_LINE]); | ||
19063 | |||
19064 | spec->cur_adc_stream_tag = stream_tag; | ||
19065 | spec->cur_adc_format = format; | ||
19066 | |||
19067 | if (pre_mic || pre_line) { | ||
19068 | if (pre_mic) | ||
19069 | snd_hda_codec_setup_stream(codec, 0x08, stream_tag, 0, | ||
19070 | format); | ||
19071 | else | ||
19072 | snd_hda_codec_setup_stream(codec, 0x09, stream_tag, 0, | ||
19073 | format); | ||
19074 | } else | ||
19075 | snd_hda_codec_setup_stream(codec, 0x07, stream_tag, 0, format); | ||
19076 | return 0; | ||
19077 | } | ||
19078 | |||
19079 | static int alc680_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, | ||
19080 | struct hda_codec *codec, | ||
19081 | struct snd_pcm_substream *substream) | ||
19082 | { | ||
19083 | snd_hda_codec_cleanup_stream(codec, 0x07); | ||
19084 | snd_hda_codec_cleanup_stream(codec, 0x08); | ||
19085 | snd_hda_codec_cleanup_stream(codec, 0x09); | ||
19086 | return 0; | ||
19087 | } | ||
19088 | |||
19089 | static struct hda_pcm_stream alc680_pcm_analog_auto_capture = { | ||
19090 | .substreams = 1, /* can be overridden */ | ||
19091 | .channels_min = 2, | ||
19092 | .channels_max = 2, | ||
19093 | /* NID is set in alc_build_pcms */ | ||
19094 | .ops = { | ||
19095 | .prepare = alc680_capture_pcm_prepare, | ||
19096 | .cleanup = alc680_capture_pcm_cleanup | ||
19097 | }, | ||
19098 | }; | ||
19099 | |||
19047 | static struct snd_kcontrol_new alc680_base_mixer[] = { | 19100 | static struct snd_kcontrol_new alc680_base_mixer[] = { |
19048 | /* output mixer control */ | 19101 | /* output mixer control */ |
19049 | HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT), | 19102 | HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT), |
19050 | HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), | 19103 | HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT), |
19051 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT), | 19104 | HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT), |
19052 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT), | 19105 | HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT), |
19106 | HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT), | ||
19053 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), | 19107 | HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), |
19108 | HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT), | ||
19054 | { } | 19109 | { } |
19055 | }; | 19110 | }; |
19056 | 19111 | ||
19057 | static struct snd_kcontrol_new alc680_capture_mixer[] = { | 19112 | static struct hda_bind_ctls alc680_bind_cap_vol = { |
19058 | HDA_CODEC_VOLUME("Capture Volume", 0x07, 0x0, HDA_INPUT), | 19113 | .ops = &snd_hda_bind_vol, |
19059 | HDA_CODEC_MUTE("Capture Switch", 0x07, 0x0, HDA_INPUT), | 19114 | .values = { |
19060 | HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x08, 0x0, HDA_INPUT), | 19115 | HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT), |
19061 | HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x08, 0x0, HDA_INPUT), | 19116 | HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT), |
19062 | HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x09, 0x0, HDA_INPUT), | 19117 | HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT), |
19063 | HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x09, 0x0, HDA_INPUT), | 19118 | 0 |
19119 | }, | ||
19120 | }; | ||
19121 | |||
19122 | static struct hda_bind_ctls alc680_bind_cap_switch = { | ||
19123 | .ops = &snd_hda_bind_sw, | ||
19124 | .values = { | ||
19125 | HDA_COMPOSE_AMP_VAL(0x07, 3, 0, HDA_INPUT), | ||
19126 | HDA_COMPOSE_AMP_VAL(0x08, 3, 0, HDA_INPUT), | ||
19127 | HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT), | ||
19128 | 0 | ||
19129 | }, | ||
19130 | }; | ||
19131 | |||
19132 | static struct snd_kcontrol_new alc680_master_capture_mixer[] = { | ||
19133 | HDA_BIND_VOL("Capture Volume", &alc680_bind_cap_vol), | ||
19134 | HDA_BIND_SW("Capture Switch", &alc680_bind_cap_switch), | ||
19064 | { } /* end */ | 19135 | { } /* end */ |
19065 | }; | 19136 | }; |
19066 | 19137 | ||
@@ -19068,25 +19139,73 @@ static struct snd_kcontrol_new alc680_capture_mixer[] = { | |||
19068 | * generic initialization of ADC, input mixers and output mixers | 19139 | * generic initialization of ADC, input mixers and output mixers |
19069 | */ | 19140 | */ |
19070 | static struct hda_verb alc680_init_verbs[] = { | 19141 | static struct hda_verb alc680_init_verbs[] = { |
19071 | /* Unmute DAC0-1 and set vol = 0 */ | 19142 | {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
19072 | {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, | 19143 | {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
19073 | {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, | 19144 | {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
19074 | {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, | ||
19075 | 19145 | ||
19076 | {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40}, | 19146 | {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, |
19077 | {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40}, | 19147 | {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, |
19078 | {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0}, | 19148 | {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, |
19079 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24}, | 19149 | {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, |
19080 | {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20}, | 19150 | {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, |
19151 | {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, | ||
19081 | 19152 | ||
19082 | {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, | 19153 | {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
19083 | {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, | 19154 | {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
19084 | {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, | 19155 | {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
19085 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, | 19156 | {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
19086 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, | 19157 | {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
19158 | |||
19159 | {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, | ||
19160 | {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN}, | ||
19161 | |||
19087 | { } | 19162 | { } |
19088 | }; | 19163 | }; |
19089 | 19164 | ||
19165 | /* toggle speaker-output according to the hp-jack state */ | ||
19166 | static void alc680_base_setup(struct hda_codec *codec) | ||
19167 | { | ||
19168 | struct alc_spec *spec = codec->spec; | ||
19169 | |||
19170 | spec->autocfg.hp_pins[0] = 0x16; | ||
19171 | spec->autocfg.speaker_pins[0] = 0x14; | ||
19172 | spec->autocfg.speaker_pins[1] = 0x15; | ||
19173 | spec->autocfg.input_pins[AUTO_PIN_MIC] = 0x18; | ||
19174 | spec->autocfg.input_pins[AUTO_PIN_LINE] = 0x19; | ||
19175 | } | ||
19176 | |||
19177 | static void alc680_rec_autoswitch(struct hda_codec *codec) | ||
19178 | { | ||
19179 | struct alc_spec *spec = codec->spec; | ||
19180 | struct auto_pin_cfg *cfg = &spec->autocfg; | ||
19181 | unsigned int present; | ||
19182 | hda_nid_t new_adc; | ||
19183 | |||
19184 | present = snd_hda_jack_detect(codec, cfg->input_pins[AUTO_PIN_MIC]); | ||
19185 | |||
19186 | new_adc = present ? 0x8 : 0x7; | ||
19187 | __snd_hda_codec_cleanup_stream(codec, !present ? 0x8 : 0x7, 1); | ||
19188 | snd_hda_codec_setup_stream(codec, new_adc, | ||
19189 | spec->cur_adc_stream_tag, 0, | ||
19190 | spec->cur_adc_format); | ||
19191 | |||
19192 | } | ||
19193 | |||
19194 | static void alc680_unsol_event(struct hda_codec *codec, | ||
19195 | unsigned int res) | ||
19196 | { | ||
19197 | if ((res >> 26) == ALC880_HP_EVENT) | ||
19198 | alc_automute_amp(codec); | ||
19199 | if ((res >> 26) == ALC880_MIC_EVENT) | ||
19200 | alc680_rec_autoswitch(codec); | ||
19201 | } | ||
19202 | |||
19203 | static void alc680_inithook(struct hda_codec *codec) | ||
19204 | { | ||
19205 | alc_automute_amp(codec); | ||
19206 | alc680_rec_autoswitch(codec); | ||
19207 | } | ||
19208 | |||
19090 | /* create input playback/capture controls for the given pin */ | 19209 | /* create input playback/capture controls for the given pin */ |
19091 | static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid, | 19210 | static int alc680_new_analog_output(struct alc_spec *spec, hda_nid_t nid, |
19092 | const char *ctlname, int idx) | 19211 | const char *ctlname, int idx) |
@@ -19197,13 +19316,7 @@ static void alc680_auto_init_hp_out(struct hda_codec *codec) | |||
19197 | #define alc680_pcm_analog_capture alc880_pcm_analog_capture | 19316 | #define alc680_pcm_analog_capture alc880_pcm_analog_capture |
19198 | #define alc680_pcm_analog_alt_capture alc880_pcm_analog_alt_capture | 19317 | #define alc680_pcm_analog_alt_capture alc880_pcm_analog_alt_capture |
19199 | #define alc680_pcm_digital_playback alc880_pcm_digital_playback | 19318 | #define alc680_pcm_digital_playback alc880_pcm_digital_playback |
19200 | 19319 | #define alc680_pcm_digital_capture alc880_pcm_digital_capture | |
19201 | static struct hda_input_mux alc680_capture_source = { | ||
19202 | .num_items = 1, | ||
19203 | .items = { | ||
19204 | { "Mic", 0x0 }, | ||
19205 | }, | ||
19206 | }; | ||
19207 | 19320 | ||
19208 | /* | 19321 | /* |
19209 | * BIOS auto configuration | 19322 | * BIOS auto configuration |
@@ -19218,6 +19331,7 @@ static int alc680_parse_auto_config(struct hda_codec *codec) | |||
19218 | alc680_ignore); | 19331 | alc680_ignore); |
19219 | if (err < 0) | 19332 | if (err < 0) |
19220 | return err; | 19333 | return err; |
19334 | |||
19221 | if (!spec->autocfg.line_outs) { | 19335 | if (!spec->autocfg.line_outs) { |
19222 | if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) { | 19336 | if (spec->autocfg.dig_outs || spec->autocfg.dig_in_pin) { |
19223 | spec->multiout.max_channels = 2; | 19337 | spec->multiout.max_channels = 2; |
@@ -19239,8 +19353,6 @@ static int alc680_parse_auto_config(struct hda_codec *codec) | |||
19239 | add_mixer(spec, spec->kctls.list); | 19353 | add_mixer(spec, spec->kctls.list); |
19240 | 19354 | ||
19241 | add_verb(spec, alc680_init_verbs); | 19355 | add_verb(spec, alc680_init_verbs); |
19242 | spec->num_mux_defs = 1; | ||
19243 | spec->input_mux = &alc680_capture_source; | ||
19244 | 19356 | ||
19245 | err = alc_auto_add_mic_boost(codec); | 19357 | err = alc_auto_add_mic_boost(codec); |
19246 | if (err < 0) | 19358 | if (err < 0) |
@@ -19279,17 +19391,17 @@ static struct snd_pci_quirk alc680_cfg_tbl[] = { | |||
19279 | static struct alc_config_preset alc680_presets[] = { | 19391 | static struct alc_config_preset alc680_presets[] = { |
19280 | [ALC680_BASE] = { | 19392 | [ALC680_BASE] = { |
19281 | .mixers = { alc680_base_mixer }, | 19393 | .mixers = { alc680_base_mixer }, |
19282 | .cap_mixer = alc680_capture_mixer, | 19394 | .cap_mixer = alc680_master_capture_mixer, |
19283 | .init_verbs = { alc680_init_verbs }, | 19395 | .init_verbs = { alc680_init_verbs }, |
19284 | .num_dacs = ARRAY_SIZE(alc680_dac_nids), | 19396 | .num_dacs = ARRAY_SIZE(alc680_dac_nids), |
19285 | .dac_nids = alc680_dac_nids, | 19397 | .dac_nids = alc680_dac_nids, |
19286 | .num_adc_nids = ARRAY_SIZE(alc680_adc_nids), | ||
19287 | .adc_nids = alc680_adc_nids, | ||
19288 | .hp_nid = 0x04, | ||
19289 | .dig_out_nid = ALC680_DIGOUT_NID, | 19398 | .dig_out_nid = ALC680_DIGOUT_NID, |
19290 | .num_channel_mode = ARRAY_SIZE(alc680_modes), | 19399 | .num_channel_mode = ARRAY_SIZE(alc680_modes), |
19291 | .channel_mode = alc680_modes, | 19400 | .channel_mode = alc680_modes, |
19292 | .input_mux = &alc680_capture_source, | 19401 | .unsol_event = alc680_unsol_event, |
19402 | .setup = alc680_base_setup, | ||
19403 | .init_hook = alc680_inithook, | ||
19404 | |||
19293 | }, | 19405 | }, |
19294 | }; | 19406 | }; |
19295 | 19407 | ||
@@ -19333,9 +19445,9 @@ static int patch_alc680(struct hda_codec *codec) | |||
19333 | setup_preset(codec, &alc680_presets[board_config]); | 19445 | setup_preset(codec, &alc680_presets[board_config]); |
19334 | 19446 | ||
19335 | spec->stream_analog_playback = &alc680_pcm_analog_playback; | 19447 | spec->stream_analog_playback = &alc680_pcm_analog_playback; |
19336 | spec->stream_analog_capture = &alc680_pcm_analog_capture; | 19448 | spec->stream_analog_capture = &alc680_pcm_analog_auto_capture; |
19337 | spec->stream_analog_alt_capture = &alc680_pcm_analog_alt_capture; | ||
19338 | spec->stream_digital_playback = &alc680_pcm_digital_playback; | 19449 | spec->stream_digital_playback = &alc680_pcm_digital_playback; |
19450 | spec->stream_digital_capture = &alc680_pcm_digital_capture; | ||
19339 | 19451 | ||
19340 | if (!spec->adc_nids) { | 19452 | if (!spec->adc_nids) { |
19341 | spec->adc_nids = alc680_adc_nids; | 19453 | spec->adc_nids = alc680_adc_nids; |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f3f861bd1bf8..95148e58026c 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -6303,6 +6303,21 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { | |||
6303 | { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, | 6303 | { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, |
6304 | { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, | 6304 | { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, |
6305 | { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, | 6305 | { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, |
6306 | { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx }, | ||
6307 | { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx }, | ||
6308 | { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx }, | ||
6309 | { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx }, | ||
6310 | { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx }, | ||
6311 | { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx }, | ||
6312 | { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx }, | ||
6313 | { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx }, | ||
6314 | { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx }, | ||
6315 | { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx }, | ||
6316 | { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx }, | ||
6317 | { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx }, | ||
6318 | { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, | ||
6319 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, | ||
6320 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, | ||
6306 | {} /* terminator */ | 6321 | {} /* terminator */ |
6307 | }; | 6322 | }; |
6308 | 6323 | ||
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 6433e65c9507..467749249576 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
@@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
1776 | }, | 1776 | }, |
1777 | { | 1777 | { |
1778 | .subvendor = 0x1014, | 1778 | .subvendor = 0x1014, |
1779 | .subdevice = 0x0534, | ||
1780 | .name = "ThinkPad X31", | ||
1781 | .type = AC97_TUNE_INV_EAPD | ||
1782 | }, | ||
1783 | { | ||
1784 | .subvendor = 0x1014, | ||
1779 | .subdevice = 0x1f00, | 1785 | .subdevice = 0x1f00, |
1780 | .name = "MS-9128", | 1786 | .name = "MS-9128", |
1781 | .type = AC97_TUNE_ALC_JACK | 1787 | .type = AC97_TUNE_ALC_JACK |
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c index f64fb7d988cb..ad5202efd7a9 100644 --- a/sound/pci/riptide/riptide.c +++ b/sound/pci/riptide/riptide.c | |||
@@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip) | |||
1224 | firmware.firmware.ASIC, firmware.firmware.CODEC, | 1224 | firmware.firmware.ASIC, firmware.firmware.CODEC, |
1225 | firmware.firmware.AUXDSP, firmware.firmware.PROG); | 1225 | firmware.firmware.AUXDSP, firmware.firmware.PROG); |
1226 | 1226 | ||
1227 | if (!chip) | ||
1228 | return 1; | ||
1229 | |||
1227 | for (i = 0; i < FIRMWARE_VERSIONS; i++) { | 1230 | for (i = 0; i < FIRMWARE_VERSIONS; i++) { |
1228 | if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) | 1231 | if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) |
1229 | break; | 1232 | return 1; /* OK */ |
1230 | } | ||
1231 | if (i >= FIRMWARE_VERSIONS) | ||
1232 | return 0; /* no match */ | ||
1233 | 1233 | ||
1234 | if (!chip) | 1234 | } |
1235 | return 1; /* OK */ | ||
1236 | 1235 | ||
1237 | snd_printdd("Writing Firmware\n"); | 1236 | snd_printdd("Writing Firmware\n"); |
1238 | if (!chip->fw_entry) { | 1237 | if (!chip->fw_entry) { |
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c index 4e212ed62ea6..f8154e661524 100644 --- a/sound/soc/codecs/wm8776.c +++ b/sound/soc/codecs/wm8776.c | |||
@@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) | |||
178 | case SND_SOC_DAIFMT_LEFT_J: | 178 | case SND_SOC_DAIFMT_LEFT_J: |
179 | iface |= 0x0001; | 179 | iface |= 0x0001; |
180 | break; | 180 | break; |
181 | /* FIXME: CHECK A/B */ | ||
182 | case SND_SOC_DAIFMT_DSP_A: | ||
183 | iface |= 0x0003; | ||
184 | break; | ||
185 | case SND_SOC_DAIFMT_DSP_B: | ||
186 | iface |= 0x0007; | ||
187 | break; | ||
188 | default: | 181 | default: |
189 | return -EINVAL; | 182 | return -EINVAL; |
190 | } | 183 | } |
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index a11daa1e905b..c81da05a4f11 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
@@ -254,6 +254,9 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream, | |||
254 | dma_data = &ssi->dma_params_rx; | 254 | dma_data = &ssi->dma_params_rx; |
255 | } | 255 | } |
256 | 256 | ||
257 | if (ssi->flags & IMX_SSI_SYN) | ||
258 | reg = SSI_STCCR; | ||
259 | |||
257 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | 260 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); |
258 | 261 | ||
259 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; | 262 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 41abb90df50d..4f1fa77c1feb 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -5,6 +5,12 @@ endif | |||
5 | # The default target of this Makefile is... | 5 | # The default target of this Makefile is... |
6 | all:: | 6 | all:: |
7 | 7 | ||
8 | ifneq ($(OUTPUT),) | ||
9 | # check that the output directory actually exists | ||
10 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) | ||
11 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) | ||
12 | endif | ||
13 | |||
8 | # Define V=1 to have a more verbose compile. | 14 | # Define V=1 to have a more verbose compile. |
9 | # Define V=2 to have an even more verbose compile. | 15 | # Define V=2 to have an even more verbose compile. |
10 | # | 16 | # |
@@ -157,10 +163,6 @@ all:: | |||
157 | # | 163 | # |
158 | # Define NO_DWARF if you do not want debug-info analysis feature at all. | 164 | # Define NO_DWARF if you do not want debug-info analysis feature at all. |
159 | 165 | ||
160 | $(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null) | ||
161 | $(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null) | ||
162 | $(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null) | ||
163 | |||
164 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 166 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
165 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) | 167 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) |
166 | -include $(OUTPUT)PERF-VERSION-FILE | 168 | -include $(OUTPUT)PERF-VERSION-FILE |
@@ -186,8 +188,6 @@ ifeq ($(ARCH),x86_64) | |||
186 | ARCH := x86 | 188 | ARCH := x86 |
187 | endif | 189 | endif |
188 | 190 | ||
189 | $(shell sh -c 'mkdir -p $(OUTPUT)arch/$(ARCH)/util/' 2> /dev/null) | ||
190 | |||
191 | # CFLAGS and LDFLAGS are for the users to override from the command line. | 191 | # CFLAGS and LDFLAGS are for the users to override from the command line. |
192 | 192 | ||
193 | # | 193 | # |
@@ -268,6 +268,7 @@ export prefix bindir sharedir sysconfdir | |||
268 | CC = $(CROSS_COMPILE)gcc | 268 | CC = $(CROSS_COMPILE)gcc |
269 | AR = $(CROSS_COMPILE)ar | 269 | AR = $(CROSS_COMPILE)ar |
270 | RM = rm -f | 270 | RM = rm -f |
271 | MKDIR = mkdir | ||
271 | TAR = tar | 272 | TAR = tar |
272 | FIND = find | 273 | FIND = find |
273 | INSTALL = install | 274 | INSTALL = install |
@@ -838,6 +839,7 @@ ifndef V | |||
838 | QUIET_CC = @echo ' ' CC $@; | 839 | QUIET_CC = @echo ' ' CC $@; |
839 | QUIET_AR = @echo ' ' AR $@; | 840 | QUIET_AR = @echo ' ' AR $@; |
840 | QUIET_LINK = @echo ' ' LINK $@; | 841 | QUIET_LINK = @echo ' ' LINK $@; |
842 | QUIET_MKDIR = @echo ' ' MKDIR $@; | ||
841 | QUIET_BUILT_IN = @echo ' ' BUILTIN $@; | 843 | QUIET_BUILT_IN = @echo ' ' BUILTIN $@; |
842 | QUIET_GEN = @echo ' ' GEN $@; | 844 | QUIET_GEN = @echo ' ' GEN $@; |
843 | QUIET_SUBDIR0 = +@subdir= | 845 | QUIET_SUBDIR0 = +@subdir= |
@@ -935,15 +937,15 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt) | |||
935 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ | 937 | $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ |
936 | 938 | ||
937 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh | 939 | $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh |
938 | $(QUIET_GEN)$(RM) $@ $@+ && \ | 940 | $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \ |
939 | sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ | 941 | sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \ |
940 | -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ | 942 | -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \ |
941 | -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ | 943 | -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \ |
942 | -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ | 944 | -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \ |
943 | -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ | 945 | -e 's/@@NO_CURL@@/$(NO_CURL)/g' \ |
944 | $@.sh >$@+ && \ | 946 | $@.sh > $(OUTPUT)$@+ && \ |
945 | chmod +x $@+ && \ | 947 | chmod +x $(OUTPUT)$@+ && \ |
946 | mv $@+ $(OUTPUT)$@ | 948 | mv $(OUTPUT)$@+ $(OUTPUT)$@ |
947 | 949 | ||
948 | configure: configure.ac | 950 | configure: configure.ac |
949 | $(QUIET_GEN)$(RM) $@ $<+ && \ | 951 | $(QUIET_GEN)$(RM) $@ $<+ && \ |
@@ -1012,6 +1014,14 @@ $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) | |||
1012 | $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) | 1014 | $(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) |
1013 | builtin-revert.o wt-status.o: wt-status.h | 1015 | builtin-revert.o wt-status.o: wt-status.h |
1014 | 1016 | ||
1017 | # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So | ||
1018 | # we depend the various files onto their directories. | ||
1019 | DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h | ||
1020 | $(DIRECTORY_DEPS): $(sort $(dir $(DIRECTORY_DEPS))) | ||
1021 | # In the second step, we make a rule to actually create these directories | ||
1022 | $(sort $(dir $(DIRECTORY_DEPS))): | ||
1023 | $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null | ||
1024 | |||
1015 | $(LIB_FILE): $(LIB_OBJS) | 1025 | $(LIB_FILE): $(LIB_OBJS) |
1016 | $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) | 1026 | $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) |
1017 | 1027 | ||
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak index ddb68e601f0e..7a7b60859053 100644 --- a/tools/perf/feature-tests.mak +++ b/tools/perf/feature-tests.mak | |||
@@ -113,7 +113,7 @@ endef | |||
113 | # try-cc | 113 | # try-cc |
114 | # Usage: option = $(call try-cc, source-to-build, cc-options) | 114 | # Usage: option = $(call try-cc, source-to-build, cc-options) |
115 | try-cc = $(shell sh -c \ | 115 | try-cc = $(shell sh -c \ |
116 | 'TMP="$(TMPOUT).$$$$"; \ | 116 | 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ |
117 | echo "$(1)" | \ | 117 | echo "$(1)" | \ |
118 | $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ | 118 | $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ |
119 | rm -f "$$TMP"') | 119 | rm -f "$$TMP"') |
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 55ff792459ac..a90273e63f4f 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c | |||
@@ -146,6 +146,7 @@ static int annotate_browser__run(struct annotate_browser *self, | |||
146 | return -1; | 146 | return -1; |
147 | 147 | ||
148 | newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); | 148 | newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); |
149 | newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT); | ||
149 | 150 | ||
150 | nd = self->curr_hot; | 151 | nd = self->curr_hot; |
151 | if (nd) { | 152 | if (nd) { |
@@ -178,7 +179,7 @@ static int annotate_browser__run(struct annotate_browser *self, | |||
178 | } | 179 | } |
179 | out: | 180 | out: |
180 | ui_browser__hide(&self->b); | 181 | ui_browser__hide(&self->b); |
181 | return 0; | 182 | return es->u.key; |
182 | } | 183 | } |
183 | 184 | ||
184 | int hist_entry__tui_annotate(struct hist_entry *self) | 185 | int hist_entry__tui_annotate(struct hist_entry *self) |