aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2009-07-30 13:38:04 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2009-07-30 13:38:04 -0400
commit2e6713c7662cc5ebc7346b033c404cb2f708fd51 (patch)
tree8492ea548fea2d8243e4af4b877906afc4e32783 /drivers
parentb4093d6235b7e4249616651ee328600ced48a18a (diff)
parent658874f05d040ca96eb5ba9b1c30ce0ff287d762 (diff)
Merge branch 'master' into for-linus
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ata_piix.c3
-rw-r--r--drivers/ata/libata-core.c30
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/pata_at91.c67
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/sys.c2
-rw-r--r--drivers/char/n_tty.c1
-rw-r--r--drivers/char/pty.c2
-rw-r--r--drivers/char/sysrq.c8
-rw-r--r--drivers/char/tty_buffer.c13
-rw-r--r--drivers/char/vr41xx_giu.c0
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c293
-rw-r--r--drivers/gpu/drm/radeon/r100.c770
-rw-r--r--drivers/gpu/drm/radeon/r300.c78
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c22
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h87
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c74
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c359
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c687
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h51
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_share.h39
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c209
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c24
-rw-r--r--drivers/gpu/drm/radeon/rs400.c30
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rs690.c479
-rw-r--r--drivers/gpu/drm/radeon/rs690r.h99
-rw-r--r--drivers/gpu/drm/radeon/rv515.c798
-rw-r--r--drivers/gpu/drm/radeon/rv515r.h170
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--drivers/i2c/busses/i2c-omap.c42
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c5
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/mmc/host/sdhci-of.c8
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/rtc/rtc-cmos.c23
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/spi/omap2_mcspi.c32
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/lowmemorykiller.c8
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c20
-rw-r--r--drivers/staging/uc2322/Kconfig10
-rw-r--r--drivers/staging/uc2322/Makefile1
-rw-r--r--drivers/staging/uc2322/TODO7
-rw-r--r--drivers/staging/uc2322/aten2011.c2430
-rw-r--r--drivers/staging/udlfb/udlfb.c1
-rw-r--r--drivers/usb/core/config.c48
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c199
-rw-r--r--drivers/usb/host/xhci-hcd.c290
-rw-r--r--drivers/usb/host/xhci-mem.c300
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c305
-rw-r--r--drivers/usb/host/xhci.h148
-rw-r--r--drivers/usb/misc/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_regs.h1
-rw-r--r--drivers/usb/serial/cp210x.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/mos7840.c9
-rw-r--r--drivers/usb/serial/option.c133
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/s3c-fb.c4
100 files changed, 4768 insertions, 4509 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 01574a066534..42159a28f433 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -397,6 +397,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
397 }, 397 },
398 }, 398 },
399 { 399 {
400 .callback = init_set_sci_en_on_resume,
401 .ident = "Hewlett-Packard HP G7000 Notebook PC",
402 .matches = {
403 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
404 DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
405 },
406 },
407 {
400 .callback = init_old_suspend_ordering, 408 .callback = init_old_suspend_ordering,
401 .ident = "Panasonic CF51-2L", 409 .ident = "Panasonic CF51-2L",
402 .matches = { 410 .matches = {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 336eb1ed73cc..958c1fa41900 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -515,10 +515,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
515 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 515 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
516 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ 516 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
517 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 517 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
518 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
519 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
518 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 520 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
519 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ 521 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
522 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
520 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 523 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
521 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 524 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
525 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
522 526
523 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 527 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
524 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 528 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d0a14cf2bd74..56b8a3ff1286 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = {
596 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ 596 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
597 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ 597 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
598 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 598 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
599 { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
599 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 600 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
600 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 601 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
602 { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */
601 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ 603 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
604 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
602 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 605 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
603 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 606 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
604 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 607 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2c6aedaef718..8ac98ff16d7d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1515 1515
1516 return rc; 1516 return rc;
1517 } 1517 }
1518 dev->n_native_sectors = native_sectors;
1518 1519
1519 /* nothing to do? */ 1520 /* nothing to do? */
1520 if (native_sectors <= sectors || !ata_ignore_hpa) { 1521 if (native_sectors <= sectors || !ata_ignore_hpa) {
@@ -4099,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4099 unsigned int readid_flags) 4100 unsigned int readid_flags)
4100{ 4101{
4101 u64 n_sectors = dev->n_sectors; 4102 u64 n_sectors = dev->n_sectors;
4103 u64 n_native_sectors = dev->n_native_sectors;
4102 int rc; 4104 int rc;
4103 4105
4104 if (!ata_dev_enabled(dev)) 4106 if (!ata_dev_enabled(dev))
@@ -4128,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4128 /* verify n_sectors hasn't changed */ 4130 /* verify n_sectors hasn't changed */
4129 if (dev->class == ATA_DEV_ATA && n_sectors && 4131 if (dev->class == ATA_DEV_ATA && n_sectors &&
4130 dev->n_sectors != n_sectors) { 4132 dev->n_sectors != n_sectors) {
4131 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4133 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch "
4132 "%llu != %llu\n", 4134 "%llu != %llu\n",
4133 (unsigned long long)n_sectors, 4135 (unsigned long long)n_sectors,
4134 (unsigned long long)dev->n_sectors); 4136 (unsigned long long)dev->n_sectors);
4135 4137 /*
4136 /* restore original n_sectors */ 4138 * Something could have caused HPA to be unlocked
4137 dev->n_sectors = n_sectors; 4139 * involuntarily. If n_native_sectors hasn't changed
4138 4140 * and the new size matches it, keep the device.
4139 rc = -ENODEV; 4141 */
4140 goto fail; 4142 if (dev->n_native_sectors == n_native_sectors &&
4143 dev->n_sectors > n_sectors &&
4144 dev->n_sectors == n_native_sectors) {
4145 ata_dev_printk(dev, KERN_WARNING,
4146 "new n_sectors matches native, probably "
4147 "late HPA unlock, continuing\n");
4148 /* keep using the old n_sectors */
4149 dev->n_sectors = n_sectors;
4150 } else {
4151 /* restore original n_[native]_sectors and fail */
4152 dev->n_native_sectors = n_native_sectors;
4153 dev->n_sectors = n_sectors;
4154 rc = -ENODEV;
4155 goto fail;
4156 }
4141 } 4157 }
4142 4158
4143 return 0; 4159 return 0;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1a07c061f644..79711b64054b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2327 struct ata_port *ap = link->ap; 2327 struct ata_port *ap = link->ap;
2328 struct ata_link *slave = ap->slave_link; 2328 struct ata_link *slave = ap->slave_link;
2329 struct ata_eh_context *ehc = &link->eh_context; 2329 struct ata_eh_context *ehc = &link->eh_context;
2330 struct ata_eh_context *sehc = &slave->eh_context; 2330 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2331 unsigned int *classes = ehc->classes; 2331 unsigned int *classes = ehc->classes;
2332 unsigned int lflags = link->flags; 2332 unsigned int lflags = link->flags;
2333 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2333 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 8561a9f195c1..5702affcb325 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -26,9 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/ata_platform.h> 27#include <linux/ata_platform.h>
28 28
29#include <mach/at91sam9260_matrix.h>
30#include <mach/at91sam9_smc.h> 29#include <mach/at91sam9_smc.h>
31#include <mach/at91sam9260.h>
32#include <mach/board.h> 30#include <mach/board.h>
33#include <mach/gpio.h> 31#include <mach/gpio.h>
34 32
@@ -44,65 +42,62 @@ struct at91_ide_info {
44 unsigned long mode; 42 unsigned long mode;
45 unsigned int cs; 43 unsigned int cs;
46 44
45 struct clk *mck;
46
47 void __iomem *ide_addr; 47 void __iomem *ide_addr;
48 void __iomem *alt_addr; 48 void __iomem *alt_addr;
49}; 49};
50 50
51const struct ata_timing initial_timing = 51static const struct ata_timing initial_timing =
52 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; 52 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
53 53
54static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz) 54static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
55{ 55{
56 unsigned long mul; 56 unsigned long mul;
57 57
58 /* 58 /*
59 * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] = 59 * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
60 * x * (f / 1_000_000_000) = 60 * x * (f / 1_000_000_000) =
61 * x * ((f * 65536) / 1_000_000_000) / 65536 = 61 * x * ((f * 65536) / 1_000_000_000) / 65536 =
62 * x * (((f / 10_000) * 65536) / 100_000) / 65536 = 62 * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
63 */ 63 */
64 64
65 mul = (mck_hz / 10000) << 16; 65 mul = (mck_hz / 10000) << 16;
66 mul /= 100000; 66 mul /= 100000;
67 67
68 return (ns * mul + 65536) >> 16; /* rounding */ 68 return (ns * mul + 65536) >> 16; /* rounding */
69} 69}
70 70
71static void set_smc_mode(struct at91_ide_info *info) 71static void set_smc_mode(struct at91_ide_info *info)
72{ 72{
73 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode); 73 at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
74 return; 74 return;
75} 75}
76 76
77static void set_smc_timing(struct device *dev, 77static void set_smc_timing(struct device *dev,
78 struct at91_ide_info *info, const struct ata_timing *ata) 78 struct at91_ide_info *info, const struct ata_timing *ata)
79{ 79{
80 int read_cycle, write_cycle, active, recover; 80 unsigned long read_cycle, write_cycle, active, recover;
81 int nrd_setup, nrd_pulse, nrd_recover; 81 unsigned long nrd_setup, nrd_pulse, nrd_recover;
82 int nwe_setup, nwe_pulse; 82 unsigned long nwe_setup, nwe_pulse;
83 83
84 int ncs_write_setup, ncs_write_pulse; 84 unsigned long ncs_write_setup, ncs_write_pulse;
85 int ncs_read_setup, ncs_read_pulse; 85 unsigned long ncs_read_setup, ncs_read_pulse;
86 86
87 unsigned int mck_hz; 87 unsigned long mck_hz;
88 struct clk *mck;
89 88
90 read_cycle = ata->cyc8b; 89 read_cycle = ata->cyc8b;
91 nrd_setup = ata->setup; 90 nrd_setup = ata->setup;
92 nrd_pulse = ata->act8b; 91 nrd_pulse = ata->act8b;
93 nrd_recover = ata->rec8b; 92 nrd_recover = ata->rec8b;
94 93
95 mck = clk_get(NULL, "mck"); 94 mck_hz = clk_get_rate(info->mck);
96 BUG_ON(IS_ERR(mck));
97 mck_hz = clk_get_rate(mck);
98 95
99 read_cycle = calc_mck_cycles(read_cycle, mck_hz); 96 read_cycle = calc_mck_cycles(read_cycle, mck_hz);
100 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz); 97 nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
101 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz); 98 nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
102 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz); 99 nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
103 100
104 clk_put(mck);
105
106 active = nrd_setup + nrd_pulse; 101 active = nrd_setup + nrd_pulse;
107 recover = read_cycle - active; 102 recover = read_cycle - active;
108 103
@@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev,
121 ncs_write_setup = ncs_read_setup; 116 ncs_write_setup = ncs_read_setup;
122 ncs_write_pulse = ncs_read_pulse; 117 ncs_write_pulse = ncs_read_pulse;
123 118
124 dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n", 119 dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n",
125 nrd_setup, nrd_pulse, read_cycle); 120 nrd_setup, nrd_pulse, read_cycle);
126 dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n", 121 dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n",
127 nwe_setup, nwe_pulse, write_cycle); 122 nwe_setup, nwe_pulse, write_cycle);
128 dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n", 123 dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n",
129 ncs_read_setup, ncs_read_pulse); 124 ncs_read_setup, ncs_read_pulse);
130 dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n", 125 dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n",
131 ncs_write_setup, ncs_write_pulse); 126 ncs_write_setup, ncs_write_pulse);
132 127
133 at91_sys_write(AT91_SMC_SETUP(info->cs), 128 at91_sys_write(AT91_SMC_SETUP(info->cs),
@@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
217 struct resource *mem_res; 212 struct resource *mem_res;
218 struct ata_host *host; 213 struct ata_host *host;
219 struct ata_port *ap; 214 struct ata_port *ap;
215
220 int irq_flags = 0; 216 int irq_flags = 0;
221 int irq = 0; 217 int irq = 0;
222 int ret; 218 int ret;
@@ -261,6 +257,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
261 return -ENOMEM; 257 return -ENOMEM;
262 } 258 }
263 259
260 info->mck = clk_get(NULL, "mck");
261
262 if (IS_ERR(info->mck)) {
263 dev_err(dev, "failed to get access to mck clock\n");
264 return -ENODEV;
265 }
266
264 info->cs = board->chipselect; 267 info->cs = board->chipselect;
265 info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | 268 info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
266 AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT | 269 AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
@@ -304,6 +307,7 @@ err_alt_ioremap:
304 devm_iounmap(dev, info->ide_addr); 307 devm_iounmap(dev, info->ide_addr);
305 308
306err_ide_ioremap: 309err_ide_ioremap:
310 clk_put(info->mck);
307 kfree(info); 311 kfree(info);
308 312
309 return ret; 313 return ret;
@@ -326,6 +330,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
326 330
327 devm_iounmap(dev, info->ide_addr); 331 devm_iounmap(dev, info->ide_addr);
328 devm_iounmap(dev, info->alt_addr); 332 devm_iounmap(dev, info->alt_addr);
333 clk_put(info->mck);
329 334
330 kfree(info); 335 kfree(info);
331 return 0; 336 return 0;
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 8d9343accf3c..abdd19fe990a 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) 656
657 if (ap->flags & ATA_FLAG_DISABLED)
657 continue; 658 continue;
658 659
659 ocd = ap->dev->platform_data; 660 ocd = ap->dev->platform_data;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index f4d009ed50ac..dc99e26f8e5b 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
411 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 411 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
412 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), 412 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
413 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), 413 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
414 PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
414 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), 415 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
415 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), 416 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
416 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), 417 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 23714aefb825..c19417e02208 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2514 char *when = "idle"; 2514 char *when = "idle";
2515 2515
2516 ata_ehi_clear_desc(ehi); 2516 ata_ehi_clear_desc(ehi);
2517 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2517 if (ap->flags & ATA_FLAG_DISABLED) {
2518 when = "disabled"; 2518 when = "disabled";
2519 } else if (edma_was_enabled) { 2519 } else if (edma_was_enabled) {
2520 when = "EDMA enabled"; 2520 when = "EDMA enabled";
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 030ec079b184..35bd5cc7f285 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
532 struct ata_port *ap = host->ports[i]; 532 struct ata_port *ap = host->ports[i];
533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 533 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
534 534
535 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) 535 if (unlikely(ap->flags & ATA_FLAG_DISABLED))
536 continue; 536 continue;
537 537
538 /* turn off SATA_IRQ if not supported */ 538 /* turn off SATA_IRQ if not supported */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index f285f441fab9..7376367bcb80 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev,
180 goto err; 180 goto err;
181 } 181 }
182 /* Pages will be freed by vfree() */ 182 /* Pages will be freed by vfree() */
183 fw_priv->pages = NULL;
184 fw_priv->page_array_size = 0; 183 fw_priv->page_array_size = 0;
185 fw_priv->nr_pages = 0; 184 fw_priv->nr_pages = 0;
186 complete(&fw_priv->completion); 185 complete(&fw_priv->completion);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 79a9ae5238ac..0d903909af7e 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev)
275 drv->add(sysdev); 275 drv->add(sysdev);
276 } 276 }
277 mutex_unlock(&sysdev_drivers_lock); 277 mutex_unlock(&sysdev_drivers_lock);
278 kobject_uevent(&sysdev->kobj, KOBJ_ADD);
278 } 279 }
279 280
280 kobject_uevent(&sysdev->kobj, KOBJ_ADD);
281 return error; 281 return error;
282} 282}
283 283
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ff47907ff1bf..973be2f44195 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1583,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty)
1583 1583
1584static inline int input_available_p(struct tty_struct *tty, int amt) 1584static inline int input_available_p(struct tty_struct *tty, int amt)
1585{ 1585{
1586 tty_flush_to_ldisc(tty);
1586 if (tty->icanon) { 1587 if (tty->icanon) {
1587 if (tty->canon_data) 1588 if (tty->canon_data)
1588 return 1; 1589 return 1;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 3850a68f265a..6e6942c45f5b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -52,7 +52,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
52 return; 52 return;
53 tty->link->packet = 0; 53 tty->link->packet = 0;
54 set_bit(TTY_OTHER_CLOSED, &tty->link->flags); 54 set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
55 tty_flip_buffer_push(tty->link);
56 wake_up_interruptible(&tty->link->read_wait); 55 wake_up_interruptible(&tty->link->read_wait);
57 wake_up_interruptible(&tty->link->write_wait); 56 wake_up_interruptible(&tty->link->write_wait);
58 if (tty->driver->subtype == PTY_TYPE_MASTER) { 57 if (tty->driver->subtype == PTY_TYPE_MASTER) {
@@ -208,7 +207,6 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
208 clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); 207 clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
209 set_bit(TTY_THROTTLED, &tty->flags); 208 set_bit(TTY_THROTTLED, &tty->flags);
210 retval = 0; 209 retval = 0;
211 tty->low_latency = 1;
212out: 210out:
213 return retval; 211 return retval;
214} 212}
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 0db35857e4d8..5d7a02f63e1c 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -35,7 +35,6 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/vt_kern.h> 36#include <linux/vt_kern.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/kexec.h>
39#include <linux/hrtimer.h> 38#include <linux/hrtimer.h>
40#include <linux/oom.h> 39#include <linux/oom.h>
41 40
@@ -124,9 +123,12 @@ static struct sysrq_key_op sysrq_unraw_op = {
124static void sysrq_handle_crash(int key, struct tty_struct *tty) 123static void sysrq_handle_crash(int key, struct tty_struct *tty)
125{ 124{
126 char *killer = NULL; 125 char *killer = NULL;
126
127 panic_on_oops = 1; /* force panic */
128 wmb();
127 *killer = 1; 129 *killer = 1;
128} 130}
129static struct sysrq_key_op sysrq_crashdump_op = { 131static struct sysrq_key_op sysrq_crash_op = {
130 .handler = sysrq_handle_crash, 132 .handler = sysrq_handle_crash,
131 .help_msg = "Crash", 133 .help_msg = "Crash",
132 .action_msg = "Trigger a crash", 134 .action_msg = "Trigger a crash",
@@ -401,7 +403,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
401 */ 403 */
402 NULL, /* a */ 404 NULL, /* a */
403 &sysrq_reboot_op, /* b */ 405 &sysrq_reboot_op, /* b */
404 &sysrq_crashdump_op, /* c & ibm_emac driver debug */ 406 &sysrq_crash_op, /* c & ibm_emac driver debug */
405 &sysrq_showlocks_op, /* d */ 407 &sysrq_showlocks_op, /* d */
406 &sysrq_term_op, /* e */ 408 &sysrq_term_op, /* e */
407 &sysrq_moom_op, /* f */ 409 &sysrq_moom_op, /* f */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 810ee25d66a4..3108991c5c8b 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -462,6 +462,19 @@ static void flush_to_ldisc(struct work_struct *work)
462} 462}
463 463
464/** 464/**
465 * tty_flush_to_ldisc
466 * @tty: tty to push
467 *
468 * Push the terminal flip buffers to the line discipline.
469 *
470 * Must not be called from IRQ context.
471 */
472void tty_flush_to_ldisc(struct tty_struct *tty)
473{
474 flush_to_ldisc(&tty->buf.work.work);
475}
476
477/**
465 * tty_flip_buffer_push - terminal 478 * tty_flip_buffer_push - terminal
466 * @tty: tty to push 479 * @tty: tty to push
467 * 480 *
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/char/vr41xx_giu.c
+++ /dev/null
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 2406c2ce2844..d4ec60593176 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -30,7 +30,7 @@
30/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ 30/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
31 31
32#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ 32#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
33#define X38_MCHBAR_HIGH 0x4b 33#define X38_MCHBAR_HIGH 0x4c
34#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ 34#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
35#define X38_MMR_WINDOW_SIZE 16384 35#define X38_MMR_WINDOW_SIZE 16384
36 36
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 5fae1e074b4b..013d38059943 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ 13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ 14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o 16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \
17 radeon_test.o
17 18
18radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 19radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
19 20
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c0080cc9bf8d..74d034f77c6b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -31,6 +31,132 @@
31#include "atom.h" 31#include "atom.h"
32#include "atom-bits.h" 32#include "atom-bits.h"
33 33
34static void atombios_overscan_setup(struct drm_crtc *crtc,
35 struct drm_display_mode *mode,
36 struct drm_display_mode *adjusted_mode)
37{
38 struct drm_device *dev = crtc->dev;
39 struct radeon_device *rdev = dev->dev_private;
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
42 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
43 int a1, a2;
44
45 memset(&args, 0, sizeof(args));
46
47 args.usOverscanRight = 0;
48 args.usOverscanLeft = 0;
49 args.usOverscanBottom = 0;
50 args.usOverscanTop = 0;
51 args.ucCRTC = radeon_crtc->crtc_id;
52
53 switch (radeon_crtc->rmx_type) {
54 case RMX_CENTER:
55 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
56 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
57 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
58 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
59 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
60 break;
61 case RMX_ASPECT:
62 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
63 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
64
65 if (a1 > a2) {
66 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
67 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
68 } else if (a2 > a1) {
69 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
70 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
71 }
72 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
73 break;
74 case RMX_FULL:
75 default:
76 args.usOverscanRight = 0;
77 args.usOverscanLeft = 0;
78 args.usOverscanBottom = 0;
79 args.usOverscanTop = 0;
80 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
81 break;
82 }
83}
84
85static void atombios_scaler_setup(struct drm_crtc *crtc)
86{
87 struct drm_device *dev = crtc->dev;
88 struct radeon_device *rdev = dev->dev_private;
89 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
90 ENABLE_SCALER_PS_ALLOCATION args;
91 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
92 /* fixme - fill in enc_priv for atom dac */
93 enum radeon_tv_std tv_std = TV_STD_NTSC;
94
95 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
96 return;
97
98 memset(&args, 0, sizeof(args));
99
100 args.ucScaler = radeon_crtc->crtc_id;
101
102 if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) {
103 switch (tv_std) {
104 case TV_STD_NTSC:
105 default:
106 args.ucTVStandard = ATOM_TV_NTSC;
107 break;
108 case TV_STD_PAL:
109 args.ucTVStandard = ATOM_TV_PAL;
110 break;
111 case TV_STD_PAL_M:
112 args.ucTVStandard = ATOM_TV_PALM;
113 break;
114 case TV_STD_PAL_60:
115 args.ucTVStandard = ATOM_TV_PAL60;
116 break;
117 case TV_STD_NTSC_J:
118 args.ucTVStandard = ATOM_TV_NTSCJ;
119 break;
120 case TV_STD_SCART_PAL:
121 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
122 break;
123 case TV_STD_SECAM:
124 args.ucTVStandard = ATOM_TV_SECAM;
125 break;
126 case TV_STD_PAL_CN:
127 args.ucTVStandard = ATOM_TV_PALCN;
128 break;
129 }
130 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
131 } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) {
132 args.ucTVStandard = ATOM_TV_CV;
133 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
134 } else {
135 switch (radeon_crtc->rmx_type) {
136 case RMX_FULL:
137 args.ucEnable = ATOM_SCALER_EXPANSION;
138 break;
139 case RMX_CENTER:
140 args.ucEnable = ATOM_SCALER_CENTER;
141 break;
142 case RMX_ASPECT:
143 args.ucEnable = ATOM_SCALER_EXPANSION;
144 break;
145 default:
146 if (ASIC_IS_AVIVO(rdev))
147 args.ucEnable = ATOM_SCALER_DISABLE;
148 else
149 args.ucEnable = ATOM_SCALER_CENTER;
150 break;
151 }
152 }
153 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
154 if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
155 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
156 atom_rv515_force_tv_scaler(rdev);
157 }
158}
159
34static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) 160static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
35{ 161{
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 162 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
203 if (ASIC_IS_AVIVO(rdev)) { 329 if (ASIC_IS_AVIVO(rdev)) {
204 uint32_t ss_cntl; 330 uint32_t ss_cntl;
205 331
332 if ((rdev->family == CHIP_RS600) ||
333 (rdev->family == CHIP_RS690) ||
334 (rdev->family == CHIP_RS740))
335 pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
336 RADEON_PLL_PREFER_CLOSEST_LOWER);
337
206 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 338 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
207 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 339 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
208 else 340 else
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
321 struct drm_gem_object *obj; 453 struct drm_gem_object *obj;
322 struct drm_radeon_gem_object *obj_priv; 454 struct drm_radeon_gem_object *obj_priv;
323 uint64_t fb_location; 455 uint64_t fb_location;
324 uint32_t fb_format, fb_pitch_pixels; 456 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
325 457
326 if (!crtc->fb) 458 if (!crtc->fb)
327 return -EINVAL; 459 return -EINVAL;
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
358 return -EINVAL; 490 return -EINVAL;
359 } 491 }
360 492
361 /* TODO tiling */ 493 radeon_object_get_tiling_flags(obj->driver_private,
494 &tiling_flags, NULL);
495 if (tiling_flags & RADEON_TILING_MACRO)
496 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
497
498 if (tiling_flags & RADEON_TILING_MICRO)
499 fb_format |= AVIVO_D1GRPH_TILED;
500
362 if (radeon_crtc->crtc_id == 0) 501 if (radeon_crtc->crtc_id == 0)
363 WREG32(AVIVO_D1VGA_CONTROL, 0); 502 WREG32(AVIVO_D1VGA_CONTROL, 0);
364 else 503 else
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
509 radeon_crtc_set_base(crtc, x, y, old_fb); 648 radeon_crtc_set_base(crtc, x, y, old_fb);
510 radeon_legacy_atom_set_surface(crtc); 649 radeon_legacy_atom_set_surface(crtc);
511 } 650 }
651 atombios_overscan_setup(crtc, mode, adjusted_mode);
652 atombios_scaler_setup(crtc);
653 radeon_bandwidth_update(rdev);
512 return 0; 654 return 0;
513} 655}
514 656
@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
516 struct drm_display_mode *mode, 658 struct drm_display_mode *mode,
517 struct drm_display_mode *adjusted_mode) 659 struct drm_display_mode *adjusted_mode)
518{ 660{
661 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
662 return false;
519 return true; 663 return true;
520} 664}
521 665
@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
548 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 692 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
549 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 693 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
550} 694}
551
552void radeon_init_disp_bw_avivo(struct drm_device *dev,
553 struct drm_display_mode *mode1,
554 uint32_t pixel_bytes1,
555 struct drm_display_mode *mode2,
556 uint32_t pixel_bytes2)
557{
558 struct radeon_device *rdev = dev->dev_private;
559 fixed20_12 min_mem_eff;
560 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
561 fixed20_12 sclk_ff, mclk_ff;
562 uint32_t dc_lb_memory_split, temp;
563
564 min_mem_eff.full = rfixed_const_8(0);
565 if (rdev->disp_priority == 2) {
566 uint32_t mc_init_misc_lat_timer = 0;
567 if (rdev->family == CHIP_RV515)
568 mc_init_misc_lat_timer =
569 RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
570 else if (rdev->family == CHIP_RS690)
571 mc_init_misc_lat_timer =
572 RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
573
574 mc_init_misc_lat_timer &=
575 ~(R300_MC_DISP1R_INIT_LAT_MASK <<
576 R300_MC_DISP1R_INIT_LAT_SHIFT);
577 mc_init_misc_lat_timer &=
578 ~(R300_MC_DISP0R_INIT_LAT_MASK <<
579 R300_MC_DISP0R_INIT_LAT_SHIFT);
580
581 if (mode2)
582 mc_init_misc_lat_timer |=
583 (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
584 if (mode1)
585 mc_init_misc_lat_timer |=
586 (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
587
588 if (rdev->family == CHIP_RV515)
589 WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
590 mc_init_misc_lat_timer);
591 else if (rdev->family == CHIP_RS690)
592 WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
593 mc_init_misc_lat_timer);
594 }
595
596 /*
597 * determine is there is enough bw for current mode
598 */
599 temp_ff.full = rfixed_const(100);
600 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
601 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
602 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
603 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
604
605 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
606 temp_ff.full = rfixed_const(temp);
607 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
608 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
609
610 pix_clk.full = 0;
611 pix_clk2.full = 0;
612 peak_disp_bw.full = 0;
613 if (mode1) {
614 temp_ff.full = rfixed_const(1000);
615 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
616 pix_clk.full = rfixed_div(pix_clk, temp_ff);
617 temp_ff.full = rfixed_const(pixel_bytes1);
618 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
619 }
620 if (mode2) {
621 temp_ff.full = rfixed_const(1000);
622 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
623 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
624 temp_ff.full = rfixed_const(pixel_bytes2);
625 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
626 }
627
628 if (peak_disp_bw.full >= mem_bw.full) {
629 DRM_ERROR
630 ("You may not have enough display bandwidth for current mode\n"
631 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
632 printk("peak disp bw %d, mem_bw %d\n",
633 rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
634 }
635
636 /*
637 * Line Buffer Setup
638 * There is a single line buffer shared by both display controllers.
639 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
640 * controllers. The paritioning can either be done manually or via one of four
641 * preset allocations specified in bits 1:0:
642 * 0 - line buffer is divided in half and shared between each display controller
643 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
644 * 2 - D1 gets the whole buffer
645 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
646 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
647 * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
648 * 14:4; D2 allocation follows D1.
649 */
650
651 /* is auto or manual better ? */
652 dc_lb_memory_split =
653 RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
654 dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
655#if 1
656 /* auto */
657 if (mode1 && mode2) {
658 if (mode1->hdisplay > mode2->hdisplay) {
659 if (mode1->hdisplay > 2560)
660 dc_lb_memory_split |=
661 AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
662 else
663 dc_lb_memory_split |=
664 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
665 } else if (mode2->hdisplay > mode1->hdisplay) {
666 if (mode2->hdisplay > 2560)
667 dc_lb_memory_split |=
668 AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
669 else
670 dc_lb_memory_split |=
671 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
672 } else
673 dc_lb_memory_split |=
674 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
675 } else if (mode1) {
676 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
677 } else if (mode2) {
678 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
679 }
680#else
681 /* manual */
682 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
683 dc_lb_memory_split &=
684 ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
685 AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
686 if (mode1) {
687 dc_lb_memory_split |=
688 ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
689 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
690 } else if (mode2) {
691 dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
692 }
693#endif
694 WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
695}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c550932a108f..05a44896dffb 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
110 if (i < 0 || i > rdev->gart.num_gpu_pages) { 110 if (i < 0 || i > rdev->gart.num_gpu_pages) {
111 return -EINVAL; 111 return -EINVAL;
112 } 112 }
113 rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); 113 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
114 return 0; 114 return 0;
115} 115}
116 116
@@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev)
173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); 173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174 } 174 }
175 /* Write VRAM size in case we are limiting it */ 175 /* Write VRAM size in case we are limiting it */
176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
177 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 177 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
178 * if the aperture is 64MB but we have 32MB VRAM
179 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
180 * to 64MB, otherwise the gpu accidentially dies */
181 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
178 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); 182 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); 183 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180 WREG32(RADEON_MC_FB_LOCATION, tmp); 184 WREG32(RADEON_MC_FB_LOCATION, tmp);
@@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev)
215 r100_pci_gart_disable(rdev); 219 r100_pci_gart_disable(rdev);
216 220
217 /* Setup GPU memory space */ 221 /* Setup GPU memory space */
218 rdev->mc.vram_location = 0xFFFFFFFFUL;
219 rdev->mc.gtt_location = 0xFFFFFFFFUL; 222 rdev->mc.gtt_location = 0xFFFFFFFFUL;
220 if (rdev->flags & RADEON_IS_AGP) { 223 if (rdev->flags & RADEON_IS_AGP) {
221 r = radeon_agp_init(rdev); 224 r = radeon_agp_init(rdev);
@@ -753,6 +756,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
753} 756}
754 757
755/** 758/**
759 * r100_cs_packet_next_vline() - parse userspace VLINE packet
760 * @parser: parser structure holding parsing context.
761 *
762 * Userspace sends a special sequence for VLINE waits.
763 * PACKET0 - VLINE_START_END + value
764 * PACKET0 - WAIT_UNTIL +_value
765 * RELOC (P3) - crtc_id in reloc.
766 *
767 * This function parses this and relocates the VLINE START END
768 * and WAIT UNTIL packets to the correct crtc.
769 * It also detects a switched off crtc and nulls out the
770 * wait in that case.
771 */
772int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
773{
774 struct radeon_cs_chunk *ib_chunk;
775 struct drm_mode_object *obj;
776 struct drm_crtc *crtc;
777 struct radeon_crtc *radeon_crtc;
778 struct radeon_cs_packet p3reloc, waitreloc;
779 int crtc_id;
780 int r;
781 uint32_t header, h_idx, reg;
782
783 ib_chunk = &p->chunks[p->chunk_ib_idx];
784
785 /* parse the wait until */
786 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
787 if (r)
788 return r;
789
790 /* check its a wait until and only 1 count */
791 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
792 waitreloc.count != 0) {
793 DRM_ERROR("vline wait had illegal wait until segment\n");
794 r = -EINVAL;
795 return r;
796 }
797
798 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
799 DRM_ERROR("vline wait had illegal wait until\n");
800 r = -EINVAL;
801 return r;
802 }
803
804 /* jump over the NOP */
805 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
806 if (r)
807 return r;
808
809 h_idx = p->idx - 2;
810 p->idx += waitreloc.count;
811 p->idx += p3reloc.count;
812
813 header = ib_chunk->kdata[h_idx];
814 crtc_id = ib_chunk->kdata[h_idx + 5];
815 reg = ib_chunk->kdata[h_idx] >> 2;
816 mutex_lock(&p->rdev->ddev->mode_config.mutex);
817 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
818 if (!obj) {
819 DRM_ERROR("cannot find crtc %d\n", crtc_id);
820 r = -EINVAL;
821 goto out;
822 }
823 crtc = obj_to_crtc(obj);
824 radeon_crtc = to_radeon_crtc(crtc);
825 crtc_id = radeon_crtc->crtc_id;
826
827 if (!crtc->enabled) {
828 /* if the CRTC isn't enabled - we need to nop out the wait until */
829 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
830 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
831 } else if (crtc_id == 1) {
832 switch (reg) {
833 case AVIVO_D1MODE_VLINE_START_END:
834 header &= R300_CP_PACKET0_REG_MASK;
835 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
836 break;
837 case RADEON_CRTC_GUI_TRIG_VLINE:
838 header &= R300_CP_PACKET0_REG_MASK;
839 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
840 break;
841 default:
842 DRM_ERROR("unknown crtc reloc\n");
843 r = -EINVAL;
844 goto out;
845 }
846 ib_chunk->kdata[h_idx] = header;
847 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
848 }
849out:
850 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
851 return r;
852}
853
854/**
756 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 855 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
757 * @parser: parser structure holding parsing context. 856 * @parser: parser structure holding parsing context.
758 * @data: pointer to relocation data 857 * @data: pointer to relocation data
@@ -814,6 +913,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
814 unsigned idx; 913 unsigned idx;
815 bool onereg; 914 bool onereg;
816 int r; 915 int r;
916 u32 tile_flags = 0;
817 917
818 ib = p->ib->ptr; 918 ib = p->ib->ptr;
819 ib_chunk = &p->chunks[p->chunk_ib_idx]; 919 ib_chunk = &p->chunks[p->chunk_ib_idx];
@@ -825,6 +925,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
825 } 925 }
826 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 926 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
827 switch (reg) { 927 switch (reg) {
928 case RADEON_CRTC_GUI_TRIG_VLINE:
929 r = r100_cs_packet_parse_vline(p);
930 if (r) {
931 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
932 idx, reg);
933 r100_cs_dump_packet(p, pkt);
934 return r;
935 }
936 break;
828 /* FIXME: only allow PACKET3 blit? easier to check for out of 937 /* FIXME: only allow PACKET3 blit? easier to check for out of
829 * range access */ 938 * range access */
830 case RADEON_DST_PITCH_OFFSET: 939 case RADEON_DST_PITCH_OFFSET:
@@ -838,7 +947,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
838 } 947 }
839 tmp = ib_chunk->kdata[idx] & 0x003fffff; 948 tmp = ib_chunk->kdata[idx] & 0x003fffff;
840 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 949 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
841 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; 950
951 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
952 tile_flags |= RADEON_DST_TILE_MACRO;
953 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
954 if (reg == RADEON_SRC_PITCH_OFFSET) {
955 DRM_ERROR("Cannot src blit from microtiled surface\n");
956 r100_cs_dump_packet(p, pkt);
957 return -EINVAL;
958 }
959 tile_flags |= RADEON_DST_TILE_MICRO;
960 }
961
962 tmp |= tile_flags;
963 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
842 break; 964 break;
843 case RADEON_RB3D_DEPTHOFFSET: 965 case RADEON_RB3D_DEPTHOFFSET:
844 case RADEON_RB3D_COLOROFFSET: 966 case RADEON_RB3D_COLOROFFSET:
@@ -869,6 +991,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
869 case R300_TX_OFFSET_0+52: 991 case R300_TX_OFFSET_0+52:
870 case R300_TX_OFFSET_0+56: 992 case R300_TX_OFFSET_0+56:
871 case R300_TX_OFFSET_0+60: 993 case R300_TX_OFFSET_0+60:
994 /* rn50 has no 3D engine so fail on any 3d setup */
995 if (ASIC_IS_RN50(p->rdev)) {
996 DRM_ERROR("attempt to use RN50 3D engine failed\n");
997 return -EINVAL;
998 }
872 r = r100_cs_packet_next_reloc(p, &reloc); 999 r = r100_cs_packet_next_reloc(p, &reloc);
873 if (r) { 1000 if (r) {
874 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1001 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -878,6 +1005,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
878 } 1005 }
879 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1006 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
880 break; 1007 break;
1008 case R300_RB3D_COLORPITCH0:
1009 case RADEON_RB3D_COLORPITCH:
1010 r = r100_cs_packet_next_reloc(p, &reloc);
1011 if (r) {
1012 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1013 idx, reg);
1014 r100_cs_dump_packet(p, pkt);
1015 return r;
1016 }
1017
1018 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1019 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1020 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1021 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1022
1023 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1024 tmp |= tile_flags;
1025 ib[idx] = tmp;
1026 break;
881 default: 1027 default:
882 /* FIXME: we don't want to allow anyothers packet */ 1028 /* FIXME: we don't want to allow anyothers packet */
883 break; 1029 break;
@@ -1256,29 +1402,100 @@ static void r100_vram_get_type(struct radeon_device *rdev)
1256 } 1402 }
1257} 1403}
1258 1404
1259void r100_vram_info(struct radeon_device *rdev) 1405static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1260{ 1406{
1261 r100_vram_get_type(rdev); 1407 u32 aper_size;
1408 u8 byte;
1409
1410 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1411
1412 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1413 * that is has the 2nd generation multifunction PCI interface
1414 */
1415 if (rdev->family == CHIP_RV280 ||
1416 rdev->family >= CHIP_RV350) {
1417 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1418 ~RADEON_HDP_APER_CNTL);
1419 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1420 return aper_size * 2;
1421 }
1422
1423 /* Older cards have all sorts of funny issues to deal with. First
1424 * check if it's a multifunction card by reading the PCI config
1425 * header type... Limit those to one aperture size
1426 */
1427 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1428 if (byte & 0x80) {
1429 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1430 DRM_INFO("Limiting VRAM to one aperture\n");
1431 return aper_size;
1432 }
1433
1434 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1435 * have set it up. We don't write this as it's broken on some ASICs but
1436 * we expect the BIOS to have done the right thing (might be too optimistic...)
1437 */
1438 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1439 return aper_size * 2;
1440 return aper_size;
1441}
1442
1443void r100_vram_init_sizes(struct radeon_device *rdev)
1444{
1445 u64 config_aper_size;
1446 u32 accessible;
1447
1448 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1262 1449
1263 if (rdev->flags & RADEON_IS_IGP) { 1450 if (rdev->flags & RADEON_IS_IGP) {
1264 uint32_t tom; 1451 uint32_t tom;
1265 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1452 /* read NB_TOM to get the amount of ram stolen for the GPU */
1266 tom = RREG32(RADEON_NB_TOM); 1453 tom = RREG32(RADEON_NB_TOM);
1267 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1454 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1268 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 1455 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1456 rdev->mc.vram_location = (tom & 0xffff) << 16;
1457 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1458 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1269 } else { 1459 } else {
1270 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 1460 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1271 /* Some production boards of m6 will report 0 1461 /* Some production boards of m6 will report 0
1272 * if it's 8 MB 1462 * if it's 8 MB
1273 */ 1463 */
1274 if (rdev->mc.vram_size == 0) { 1464 if (rdev->mc.real_vram_size == 0) {
1275 rdev->mc.vram_size = 8192 * 1024; 1465 rdev->mc.real_vram_size = 8192 * 1024;
1276 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 1466 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1277 } 1467 }
1468 /* let driver place VRAM */
1469 rdev->mc.vram_location = 0xFFFFFFFFUL;
1470 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1471 * Novell bug 204882 + along with lots of ubuntu ones */
1472 if (config_aper_size > rdev->mc.real_vram_size)
1473 rdev->mc.mc_vram_size = config_aper_size;
1474 else
1475 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1278 } 1476 }
1279 1477
1478 /* work out accessible VRAM */
1479 accessible = r100_get_accessible_vram(rdev);
1480
1280 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 1481 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1281 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 1482 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1483
1484 if (accessible > rdev->mc.aper_size)
1485 accessible = rdev->mc.aper_size;
1486
1487 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1488 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1489
1490 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1491 rdev->mc.real_vram_size = rdev->mc.aper_size;
1492}
1493
1494void r100_vram_info(struct radeon_device *rdev)
1495{
1496 r100_vram_get_type(rdev);
1497
1498 r100_vram_init_sizes(rdev);
1282} 1499}
1283 1500
1284 1501
@@ -1533,3 +1750,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1533 return 0; 1750 return 0;
1534#endif 1751#endif
1535} 1752}
1753
1754int r100_set_surface_reg(struct radeon_device *rdev, int reg,
1755 uint32_t tiling_flags, uint32_t pitch,
1756 uint32_t offset, uint32_t obj_size)
1757{
1758 int surf_index = reg * 16;
1759 int flags = 0;
1760
1761 /* r100/r200 divide by 16 */
1762 if (rdev->family < CHIP_R300)
1763 flags = pitch / 16;
1764 else
1765 flags = pitch / 8;
1766
1767 if (rdev->family <= CHIP_RS200) {
1768 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1769 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1770 flags |= RADEON_SURF_TILE_COLOR_BOTH;
1771 if (tiling_flags & RADEON_TILING_MACRO)
1772 flags |= RADEON_SURF_TILE_COLOR_MACRO;
1773 } else if (rdev->family <= CHIP_RV280) {
1774 if (tiling_flags & (RADEON_TILING_MACRO))
1775 flags |= R200_SURF_TILE_COLOR_MACRO;
1776 if (tiling_flags & RADEON_TILING_MICRO)
1777 flags |= R200_SURF_TILE_COLOR_MICRO;
1778 } else {
1779 if (tiling_flags & RADEON_TILING_MACRO)
1780 flags |= R300_SURF_TILE_MACRO;
1781 if (tiling_flags & RADEON_TILING_MICRO)
1782 flags |= R300_SURF_TILE_MICRO;
1783 }
1784
1785 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
1786 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
1787 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
1788 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
1789 return 0;
1790}
1791
1792void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
1793{
1794 int surf_index = reg * 16;
1795 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
1796}
1797
1798void r100_bandwidth_update(struct radeon_device *rdev)
1799{
1800 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
1801 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
1802 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
1803 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
1804 fixed20_12 memtcas_ff[8] = {
1805 fixed_init(1),
1806 fixed_init(2),
1807 fixed_init(3),
1808 fixed_init(0),
1809 fixed_init_half(1),
1810 fixed_init_half(2),
1811 fixed_init(0),
1812 };
1813 fixed20_12 memtcas_rs480_ff[8] = {
1814 fixed_init(0),
1815 fixed_init(1),
1816 fixed_init(2),
1817 fixed_init(3),
1818 fixed_init(0),
1819 fixed_init_half(1),
1820 fixed_init_half(2),
1821 fixed_init_half(3),
1822 };
1823 fixed20_12 memtcas2_ff[8] = {
1824 fixed_init(0),
1825 fixed_init(1),
1826 fixed_init(2),
1827 fixed_init(3),
1828 fixed_init(4),
1829 fixed_init(5),
1830 fixed_init(6),
1831 fixed_init(7),
1832 };
1833 fixed20_12 memtrbs[8] = {
1834 fixed_init(1),
1835 fixed_init_half(1),
1836 fixed_init(2),
1837 fixed_init_half(2),
1838 fixed_init(3),
1839 fixed_init_half(3),
1840 fixed_init(4),
1841 fixed_init_half(4)
1842 };
1843 fixed20_12 memtrbs_r4xx[8] = {
1844 fixed_init(4),
1845 fixed_init(5),
1846 fixed_init(6),
1847 fixed_init(7),
1848 fixed_init(8),
1849 fixed_init(9),
1850 fixed_init(10),
1851 fixed_init(11)
1852 };
1853 fixed20_12 min_mem_eff;
1854 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
1855 fixed20_12 cur_latency_mclk, cur_latency_sclk;
1856 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
1857 disp_drain_rate2, read_return_rate;
1858 fixed20_12 time_disp1_drop_priority;
1859 int c;
1860 int cur_size = 16; /* in octawords */
1861 int critical_point = 0, critical_point2;
1862/* uint32_t read_return_rate, time_disp1_drop_priority; */
1863 int stop_req, max_stop_req;
1864 struct drm_display_mode *mode1 = NULL;
1865 struct drm_display_mode *mode2 = NULL;
1866 uint32_t pixel_bytes1 = 0;
1867 uint32_t pixel_bytes2 = 0;
1868
1869 if (rdev->mode_info.crtcs[0]->base.enabled) {
1870 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
1871 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
1872 }
1873 if (rdev->mode_info.crtcs[1]->base.enabled) {
1874 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
1875 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
1876 }
1877
1878 min_mem_eff.full = rfixed_const_8(0);
1879 /* get modes */
1880 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
1881 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
1882 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
1883 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
1884 /* check crtc enables */
1885 if (mode2)
1886 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
1887 if (mode1)
1888 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
1889 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
1890 }
1891
1892 /*
1893 * determine is there is enough bw for current mode
1894 */
1895 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
1896 temp_ff.full = rfixed_const(100);
1897 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
1898 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
1899 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
1900
1901 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
1902 temp_ff.full = rfixed_const(temp);
1903 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
1904
1905 pix_clk.full = 0;
1906 pix_clk2.full = 0;
1907 peak_disp_bw.full = 0;
1908 if (mode1) {
1909 temp_ff.full = rfixed_const(1000);
1910 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
1911 pix_clk.full = rfixed_div(pix_clk, temp_ff);
1912 temp_ff.full = rfixed_const(pixel_bytes1);
1913 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
1914 }
1915 if (mode2) {
1916 temp_ff.full = rfixed_const(1000);
1917 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
1918 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
1919 temp_ff.full = rfixed_const(pixel_bytes2);
1920 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
1921 }
1922
1923 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
1924 if (peak_disp_bw.full >= mem_bw.full) {
1925 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
1926 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
1927 }
1928
1929 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
1930 temp = RREG32(RADEON_MEM_TIMING_CNTL);
1931 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
1932 mem_trcd = ((temp >> 2) & 0x3) + 1;
1933 mem_trp = ((temp & 0x3)) + 1;
1934 mem_tras = ((temp & 0x70) >> 4) + 1;
1935 } else if (rdev->family == CHIP_R300 ||
1936 rdev->family == CHIP_R350) { /* r300, r350 */
1937 mem_trcd = (temp & 0x7) + 1;
1938 mem_trp = ((temp >> 8) & 0x7) + 1;
1939 mem_tras = ((temp >> 11) & 0xf) + 4;
1940 } else if (rdev->family == CHIP_RV350 ||
1941 rdev->family <= CHIP_RV380) {
1942 /* rv3x0 */
1943 mem_trcd = (temp & 0x7) + 3;
1944 mem_trp = ((temp >> 8) & 0x7) + 3;
1945 mem_tras = ((temp >> 11) & 0xf) + 6;
1946 } else if (rdev->family == CHIP_R420 ||
1947 rdev->family == CHIP_R423 ||
1948 rdev->family == CHIP_RV410) {
1949 /* r4xx */
1950 mem_trcd = (temp & 0xf) + 3;
1951 if (mem_trcd > 15)
1952 mem_trcd = 15;
1953 mem_trp = ((temp >> 8) & 0xf) + 3;
1954 if (mem_trp > 15)
1955 mem_trp = 15;
1956 mem_tras = ((temp >> 12) & 0x1f) + 6;
1957 if (mem_tras > 31)
1958 mem_tras = 31;
1959 } else { /* RV200, R200 */
1960 mem_trcd = (temp & 0x7) + 1;
1961 mem_trp = ((temp >> 8) & 0x7) + 1;
1962 mem_tras = ((temp >> 12) & 0xf) + 4;
1963 }
1964 /* convert to FF */
1965 trcd_ff.full = rfixed_const(mem_trcd);
1966 trp_ff.full = rfixed_const(mem_trp);
1967 tras_ff.full = rfixed_const(mem_tras);
1968
1969 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
1970 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
1971 data = (temp & (7 << 20)) >> 20;
1972 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
1973 if (rdev->family == CHIP_RS480) /* don't think rs400 */
1974 tcas_ff = memtcas_rs480_ff[data];
1975 else
1976 tcas_ff = memtcas_ff[data];
1977 } else
1978 tcas_ff = memtcas2_ff[data];
1979
1980 if (rdev->family == CHIP_RS400 ||
1981 rdev->family == CHIP_RS480) {
1982 /* extra cas latency stored in bits 23-25 0-4 clocks */
1983 data = (temp >> 23) & 0x7;
1984 if (data < 5)
1985 tcas_ff.full += rfixed_const(data);
1986 }
1987
1988 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
1989 /* on the R300, Tcas is included in Trbs.
1990 */
1991 temp = RREG32(RADEON_MEM_CNTL);
1992 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
1993 if (data == 1) {
1994 if (R300_MEM_USE_CD_CH_ONLY & temp) {
1995 temp = RREG32(R300_MC_IND_INDEX);
1996 temp &= ~R300_MC_IND_ADDR_MASK;
1997 temp |= R300_MC_READ_CNTL_CD_mcind;
1998 WREG32(R300_MC_IND_INDEX, temp);
1999 temp = RREG32(R300_MC_IND_DATA);
2000 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2001 } else {
2002 temp = RREG32(R300_MC_READ_CNTL_AB);
2003 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2004 }
2005 } else {
2006 temp = RREG32(R300_MC_READ_CNTL_AB);
2007 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2008 }
2009 if (rdev->family == CHIP_RV410 ||
2010 rdev->family == CHIP_R420 ||
2011 rdev->family == CHIP_R423)
2012 trbs_ff = memtrbs_r4xx[data];
2013 else
2014 trbs_ff = memtrbs[data];
2015 tcas_ff.full += trbs_ff.full;
2016 }
2017
2018 sclk_eff_ff.full = sclk_ff.full;
2019
2020 if (rdev->flags & RADEON_IS_AGP) {
2021 fixed20_12 agpmode_ff;
2022 agpmode_ff.full = rfixed_const(radeon_agpmode);
2023 temp_ff.full = rfixed_const_666(16);
2024 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2025 }
2026 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2027
2028 if (ASIC_IS_R300(rdev)) {
2029 sclk_delay_ff.full = rfixed_const(250);
2030 } else {
2031 if ((rdev->family == CHIP_RV100) ||
2032 rdev->flags & RADEON_IS_IGP) {
2033 if (rdev->mc.vram_is_ddr)
2034 sclk_delay_ff.full = rfixed_const(41);
2035 else
2036 sclk_delay_ff.full = rfixed_const(33);
2037 } else {
2038 if (rdev->mc.vram_width == 128)
2039 sclk_delay_ff.full = rfixed_const(57);
2040 else
2041 sclk_delay_ff.full = rfixed_const(41);
2042 }
2043 }
2044
2045 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2046
2047 if (rdev->mc.vram_is_ddr) {
2048 if (rdev->mc.vram_width == 32) {
2049 k1.full = rfixed_const(40);
2050 c = 3;
2051 } else {
2052 k1.full = rfixed_const(20);
2053 c = 1;
2054 }
2055 } else {
2056 k1.full = rfixed_const(40);
2057 c = 3;
2058 }
2059
2060 temp_ff.full = rfixed_const(2);
2061 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2062 temp_ff.full = rfixed_const(c);
2063 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2064 temp_ff.full = rfixed_const(4);
2065 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2066 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2067 mc_latency_mclk.full += k1.full;
2068
2069 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2070 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2071
2072 /*
2073 HW cursor time assuming worst case of full size colour cursor.
2074 */
2075 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2076 temp_ff.full += trcd_ff.full;
2077 if (temp_ff.full < tras_ff.full)
2078 temp_ff.full = tras_ff.full;
2079 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2080
2081 temp_ff.full = rfixed_const(cur_size);
2082 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2083 /*
2084 Find the total latency for the display data.
2085 */
2086 disp_latency_overhead.full = rfixed_const(80);
2087 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2088 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2089 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2090
2091 if (mc_latency_mclk.full > mc_latency_sclk.full)
2092 disp_latency.full = mc_latency_mclk.full;
2093 else
2094 disp_latency.full = mc_latency_sclk.full;
2095
2096 /* setup Max GRPH_STOP_REQ default value */
2097 if (ASIC_IS_RV100(rdev))
2098 max_stop_req = 0x5c;
2099 else
2100 max_stop_req = 0x7c;
2101
2102 if (mode1) {
2103 /* CRTC1
2104 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2105 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2106 */
2107 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2108
2109 if (stop_req > max_stop_req)
2110 stop_req = max_stop_req;
2111
2112 /*
2113 Find the drain rate of the display buffer.
2114 */
2115 temp_ff.full = rfixed_const((16/pixel_bytes1));
2116 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2117
2118 /*
2119 Find the critical point of the display buffer.
2120 */
2121 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2122 crit_point_ff.full += rfixed_const_half(0);
2123
2124 critical_point = rfixed_trunc(crit_point_ff);
2125
2126 if (rdev->disp_priority == 2) {
2127 critical_point = 0;
2128 }
2129
2130 /*
2131 The critical point should never be above max_stop_req-4. Setting
2132 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2133 */
2134 if (max_stop_req - critical_point < 4)
2135 critical_point = 0;
2136
2137 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2138 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2139 critical_point = 0x10;
2140 }
2141
2142 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2143 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2144 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2145 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2146 if ((rdev->family == CHIP_R350) &&
2147 (stop_req > 0x15)) {
2148 stop_req -= 0x10;
2149 }
2150 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2151 temp |= RADEON_GRPH_BUFFER_SIZE;
2152 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2153 RADEON_GRPH_CRITICAL_AT_SOF |
2154 RADEON_GRPH_STOP_CNTL);
2155 /*
2156 Write the result into the register.
2157 */
2158 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2159 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2160
2161#if 0
2162 if ((rdev->family == CHIP_RS400) ||
2163 (rdev->family == CHIP_RS480)) {
2164 /* attempt to program RS400 disp regs correctly ??? */
2165 temp = RREG32(RS400_DISP1_REG_CNTL);
2166 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2167 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2168 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2169 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2170 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2171 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2172 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2173 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2174 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2175 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2176 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2177 }
2178#endif
2179
2180 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2181 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2182 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2183 }
2184
2185 if (mode2) {
2186 u32 grph2_cntl;
2187 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2188
2189 if (stop_req > max_stop_req)
2190 stop_req = max_stop_req;
2191
2192 /*
2193 Find the drain rate of the display buffer.
2194 */
2195 temp_ff.full = rfixed_const((16/pixel_bytes2));
2196 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2197
2198 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2199 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2200 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2201 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2202 if ((rdev->family == CHIP_R350) &&
2203 (stop_req > 0x15)) {
2204 stop_req -= 0x10;
2205 }
2206 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2207 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2208 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2209 RADEON_GRPH_CRITICAL_AT_SOF |
2210 RADEON_GRPH_STOP_CNTL);
2211
2212 if ((rdev->family == CHIP_RS100) ||
2213 (rdev->family == CHIP_RS200))
2214 critical_point2 = 0;
2215 else {
2216 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2217 temp_ff.full = rfixed_const(temp);
2218 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2219 if (sclk_ff.full < temp_ff.full)
2220 temp_ff.full = sclk_ff.full;
2221
2222 read_return_rate.full = temp_ff.full;
2223
2224 if (mode1) {
2225 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2226 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2227 } else {
2228 time_disp1_drop_priority.full = 0;
2229 }
2230 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2231 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2232 crit_point_ff.full += rfixed_const_half(0);
2233
2234 critical_point2 = rfixed_trunc(crit_point_ff);
2235
2236 if (rdev->disp_priority == 2) {
2237 critical_point2 = 0;
2238 }
2239
2240 if (max_stop_req - critical_point2 < 4)
2241 critical_point2 = 0;
2242
2243 }
2244
2245 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2246 /* some R300 cards have problem with this set to 0 */
2247 critical_point2 = 0x10;
2248 }
2249
2250 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2251 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2252
2253 if ((rdev->family == CHIP_RS400) ||
2254 (rdev->family == CHIP_RS480)) {
2255#if 0
2256 /* attempt to program RS400 disp2 regs correctly ??? */
2257 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2258 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2259 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2260 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2261 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2262 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2263 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2264 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2265 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2266 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2267 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2268 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2269#endif
2270 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2271 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2272 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2273 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2274 }
2275
2276 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2277 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
2278 }
2279}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index e2ed5bc08170..9c8d41534a5d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -30,6 +30,8 @@
30#include "drm.h" 30#include "drm.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_drm.h"
34#include "radeon_share.h"
33 35
34/* r300,r350,rv350,rv370,rv380 depends on : */ 36/* r300,r350,rv350,rv370,rv380 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev); 37void r100_hdp_reset(struct radeon_device *rdev);
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_cs_packet_parse(struct radeon_cs_parser *p, 46int r100_cs_packet_parse(struct radeon_cs_parser *p,
45 struct radeon_cs_packet *pkt, 47 struct radeon_cs_packet *pkt,
46 unsigned idx); 48 unsigned idx);
49int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
47int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 50int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc); 51 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p, 52int r100_cs_parse_packet0(struct radeon_cs_parser *p,
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150 if (i < 0 || i > rdev->gart.num_gpu_pages) { 153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
151 return -EINVAL; 154 return -EINVAL;
152 } 155 }
153 addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; 156 addr = (lower_32_bits(addr) >> 8) |
154 writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); 157 ((upper_32_bits(addr) & 0xff) << 24) |
158 0xc;
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr, ((void __iomem *)ptr) + (i * 4));
155 return 0; 163 return 0;
156} 164}
157 165
@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev)
579 } else { 587 } else {
580 rdev->mc.vram_width = 64; 588 rdev->mc.vram_width = 64;
581 } 589 }
582 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
583 590
584 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 591 r100_vram_init_sizes(rdev);
585 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
586} 592}
587 593
588 594
@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)
970 976
971static const unsigned r300_reg_safe_bm[159] = { 977static const unsigned r300_reg_safe_bm[159] = {
972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 978 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
973 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 979 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 980 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
975 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 981 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
976 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 982 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1019 struct radeon_cs_reloc *reloc; 1025 struct radeon_cs_reloc *reloc;
1020 struct r300_cs_track *track; 1026 struct r300_cs_track *track;
1021 volatile uint32_t *ib; 1027 volatile uint32_t *ib;
1022 uint32_t tmp; 1028 uint32_t tmp, tile_flags = 0;
1023 unsigned i; 1029 unsigned i;
1024 int r; 1030 int r;
1025 1031
@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1027 ib_chunk = &p->chunks[p->chunk_ib_idx]; 1033 ib_chunk = &p->chunks[p->chunk_ib_idx];
1028 track = (struct r300_cs_track*)p->track; 1034 track = (struct r300_cs_track*)p->track;
1029 switch(reg) { 1035 switch(reg) {
1036 case AVIVO_D1MODE_VLINE_START_END:
1037 case RADEON_CRTC_GUI_TRIG_VLINE:
1038 r = r100_cs_packet_parse_vline(p);
1039 if (r) {
1040 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1041 idx, reg);
1042 r100_cs_dump_packet(p, pkt);
1043 return r;
1044 }
1045 break;
1030 case RADEON_DST_PITCH_OFFSET: 1046 case RADEON_DST_PITCH_OFFSET:
1031 case RADEON_SRC_PITCH_OFFSET: 1047 case RADEON_SRC_PITCH_OFFSET:
1032 r = r100_cs_packet_next_reloc(p, &reloc); 1048 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1038 } 1054 }
1039 tmp = ib_chunk->kdata[idx] & 0x003fffff; 1055 tmp = ib_chunk->kdata[idx] & 0x003fffff;
1040 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 1056 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1041 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; 1057
1058 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1059 tile_flags |= RADEON_DST_TILE_MACRO;
1060 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1061 if (reg == RADEON_SRC_PITCH_OFFSET) {
1062 DRM_ERROR("Cannot src blit from microtiled surface\n");
1063 r100_cs_dump_packet(p, pkt);
1064 return -EINVAL;
1065 }
1066 tile_flags |= RADEON_DST_TILE_MICRO;
1067 }
1068 tmp |= tile_flags;
1069 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
1042 break; 1070 break;
1043 case R300_RB3D_COLOROFFSET0: 1071 case R300_RB3D_COLOROFFSET0:
1044 case R300_RB3D_COLOROFFSET1: 1072 case R300_RB3D_COLOROFFSET1:
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1127 /* RB3D_COLORPITCH1 */ 1155 /* RB3D_COLORPITCH1 */
1128 /* RB3D_COLORPITCH2 */ 1156 /* RB3D_COLORPITCH2 */
1129 /* RB3D_COLORPITCH3 */ 1157 /* RB3D_COLORPITCH3 */
1158 r = r100_cs_packet_next_reloc(p, &reloc);
1159 if (r) {
1160 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1161 idx, reg);
1162 r100_cs_dump_packet(p, pkt);
1163 return r;
1164 }
1165
1166 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1167 tile_flags |= R300_COLOR_TILE_ENABLE;
1168 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1169 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
1170
1171 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1172 tmp |= tile_flags;
1173 ib[idx] = tmp;
1174
1130 i = (reg - 0x4E38) >> 2; 1175 i = (reg - 0x4E38) >> 2;
1131 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; 1176 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
1132 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { 1177 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1182 break; 1227 break;
1183 case 0x4F24: 1228 case 0x4F24:
1184 /* ZB_DEPTHPITCH */ 1229 /* ZB_DEPTHPITCH */
1230 r = r100_cs_packet_next_reloc(p, &reloc);
1231 if (r) {
1232 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1233 idx, reg);
1234 r100_cs_dump_packet(p, pkt);
1235 return r;
1236 }
1237
1238 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1239 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
1240 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1241 tile_flags |= R300_DEPTHMICROTILE_TILED;;
1242
1243 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1244 tmp |= tile_flags;
1245 ib[idx] = tmp;
1246
1185 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 1247 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
1186 break; 1248 break;
1187 case 0x4104: 1249 case 0x4104:
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 70f48609515e..4b7afef35a65 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -27,7 +27,9 @@
27#ifndef _R300_REG_H_ 27#ifndef _R300_REG_H_
28#define _R300_REG_H_ 28#define _R300_REG_H_
29 29
30 30#define R300_SURF_TILE_MACRO (1<<16)
31#define R300_SURF_TILE_MICRO (2<<16)
32#define R300_SURF_TILE_BOTH (3<<16)
31 33
32 34
33#define R300_MC_INIT_MISC_LAT_TIMER 0x180 35#define R300_MC_INIT_MISC_LAT_TIMER 0x180
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 9070a1c2ce23..036691b38cb7 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -445,6 +445,7 @@
445#define AVIVO_D1MODE_DATA_FORMAT 0x6528 445#define AVIVO_D1MODE_DATA_FORMAT 0x6528
446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) 446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C 447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
448#define AVIVO_D1MODE_VLINE_START_END 0x6538
448#define AVIVO_D1MODE_VIEWPORT_START 0x6580 449#define AVIVO_D1MODE_VIEWPORT_START 0x6580
449#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 450#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
450#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 451#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
@@ -496,6 +497,7 @@
496#define AVIVO_D2CUR_SIZE 0x6c10 497#define AVIVO_D2CUR_SIZE 0x6c10
497#define AVIVO_D2CUR_POSITION 0x6c14 498#define AVIVO_D2CUR_POSITION 0x6c14
498 499
500#define AVIVO_D2MODE_VLINE_START_END 0x6d38
499#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 501#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
500#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 502#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
501#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 503#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 570a244bd88b..09fb0b6ec7dd 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_share.h"
31 32
32/* r520,rv530,rv560,rv570,r580 depends on : */ 33/* r520,rv530,rv560,rv570,r580 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev); 34void r100_hdp_reset(struct radeon_device *rdev);
@@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev)
94 "programming pipes. Bad things might happen.\n"); 95 "programming pipes. Bad things might happen.\n");
95 } 96 }
96 /* Write VRAM size in case we are limiting it */ 97 /* Write VRAM size in case we are limiting it */
97 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 98 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
98 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 99 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
99 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); 100 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
100 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); 101 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
101 WREG32_MC(R520_MC_FB_LOCATION, tmp); 102 WREG32_MC(R520_MC_FB_LOCATION, tmp);
@@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev)
226 227
227void r520_vram_info(struct radeon_device *rdev) 228void r520_vram_info(struct radeon_device *rdev)
228{ 229{
230 fixed20_12 a;
231
229 r520_vram_get_type(rdev); 232 r520_vram_get_type(rdev);
230 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
231 233
232 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 234 r100_vram_init_sizes(rdev);
233 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 235 /* FIXME: we should enforce default clock in case GPU is not in
236 * default setup
237 */
238 a.full = rfixed_const(100);
239 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
240 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
241}
242
243void r520_bandwidth_update(struct radeon_device *rdev)
244{
245 rv515_bandwidth_avivo_update(rdev);
234} 246}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c45559fc97fd..538cd907df69 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev)
67 "programming pipes. Bad things might happen.\n"); 67 "programming pipes. Bad things might happen.\n");
68 } 68 }
69 69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); 71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); 72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R600_MC_VM_FB_LOCATION, tmp); 73 WREG32(R600_MC_VM_FB_LOCATION, tmp);
@@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev)
140void r600_vram_info(struct radeon_device *rdev) 140void r600_vram_info(struct radeon_device *rdev)
141{ 141{
142 r600_vram_get_type(rdev); 142 r600_vram_get_type(rdev);
143 rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); 143 rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE);
144 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
144 145
145 /* Could aper size report 0 ? */ 146 /* Could aper size report 0 ? */
146 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 147 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d61f2fc61df5..b1d945b8ed6c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@ extern int radeon_agpmode;
64extern int radeon_vram_limit; 64extern int radeon_vram_limit;
65extern int radeon_gart_size; 65extern int radeon_gart_size;
66extern int radeon_benchmarking; 66extern int radeon_benchmarking;
67extern int radeon_testing;
67extern int radeon_connector_table; 68extern int radeon_connector_table;
68 69
69/* 70/*
@@ -113,6 +114,7 @@ enum radeon_family {
113 CHIP_RV770, 114 CHIP_RV770,
114 CHIP_RV730, 115 CHIP_RV730,
115 CHIP_RV710, 116 CHIP_RV710,
117 CHIP_RS880,
116 CHIP_LAST, 118 CHIP_LAST,
117}; 119};
118 120
@@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev);
201struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 203struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
202void radeon_fence_unref(struct radeon_fence **fence); 204void radeon_fence_unref(struct radeon_fence **fence);
203 205
206/*
207 * Tiling registers
208 */
209struct radeon_surface_reg {
210 struct radeon_object *robj;
211};
212
213#define RADEON_GEM_MAX_SURFACES 8
204 214
205/* 215/*
206 * Radeon buffer. 216 * Radeon buffer.
@@ -213,6 +223,7 @@ struct radeon_object_list {
213 uint64_t gpu_offset; 223 uint64_t gpu_offset;
214 unsigned rdomain; 224 unsigned rdomain;
215 unsigned wdomain; 225 unsigned wdomain;
226 uint32_t tiling_flags;
216}; 227};
217 228
218int radeon_object_init(struct radeon_device *rdev); 229int radeon_object_init(struct radeon_device *rdev);
@@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head);
242int radeon_object_fbdev_mmap(struct radeon_object *robj, 253int radeon_object_fbdev_mmap(struct radeon_object *robj,
243 struct vm_area_struct *vma); 254 struct vm_area_struct *vma);
244unsigned long radeon_object_size(struct radeon_object *robj); 255unsigned long radeon_object_size(struct radeon_object *robj);
245 256void radeon_object_clear_surface_reg(struct radeon_object *robj);
246 257int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
258 bool force_drop);
259void radeon_object_set_tiling_flags(struct radeon_object *robj,
260 uint32_t tiling_flags, uint32_t pitch);
261void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
262void radeon_bo_move_notify(struct ttm_buffer_object *bo,
263 struct ttm_mem_reg *mem);
264void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
247/* 265/*
248 * GEM objects. 266 * GEM objects.
249 */ 267 */
@@ -315,8 +333,11 @@ struct radeon_mc {
315 unsigned gtt_location; 333 unsigned gtt_location;
316 unsigned gtt_size; 334 unsigned gtt_size;
317 unsigned vram_location; 335 unsigned vram_location;
318 unsigned vram_size; 336 /* for some chips with <= 32MB we need to lie
337 * about vram size near mc fb location */
338 unsigned mc_vram_size;
319 unsigned vram_width; 339 unsigned vram_width;
340 unsigned real_vram_size;
320 int vram_mtrr; 341 int vram_mtrr;
321 bool vram_is_ddr; 342 bool vram_is_ddr;
322}; 343};
@@ -474,6 +495,39 @@ struct radeon_wb {
474 uint64_t gpu_addr; 495 uint64_t gpu_addr;
475}; 496};
476 497
498/**
499 * struct radeon_pm - power management datas
500 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
501 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
502 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
503 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
504 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
505 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
506 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
507 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
508 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
509 * @sclk: GPU clock Mhz (core bandwith depends of this clock)
510 * @needed_bandwidth: current bandwidth needs
511 *
512 * It keeps track of various data needed to take powermanagement decision.
513 * Bandwith need is used to determine minimun clock of the GPU and memory.
514 * Equation between gpu/memory clock and available bandwidth is hw dependent
515 * (type of memory, bus size, efficiency, ...)
516 */
517struct radeon_pm {
518 fixed20_12 max_bandwidth;
519 fixed20_12 igp_sideport_mclk;
520 fixed20_12 igp_system_mclk;
521 fixed20_12 igp_ht_link_clk;
522 fixed20_12 igp_ht_link_width;
523 fixed20_12 k8_bandwidth;
524 fixed20_12 sideport_bandwidth;
525 fixed20_12 ht_bandwidth;
526 fixed20_12 core_bandwidth;
527 fixed20_12 sclk;
528 fixed20_12 needed_bandwidth;
529};
530
477 531
478/* 532/*
479 * Benchmarking 533 * Benchmarking
@@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev);
482 536
483 537
484/* 538/*
539 * Testing
540 */
541void radeon_test_moves(struct radeon_device *rdev);
542
543
544/*
485 * Debugfs 545 * Debugfs
486 */ 546 */
487int radeon_debugfs_add_files(struct radeon_device *rdev, 547int radeon_debugfs_add_files(struct radeon_device *rdev,
@@ -535,6 +595,11 @@ struct radeon_asic {
535 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 595 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
536 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 596 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
537 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 597 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
598 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
599 uint32_t tiling_flags, uint32_t pitch,
600 uint32_t offset, uint32_t obj_size);
601 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
602 void (*bandwidth_update)(struct radeon_device *rdev);
538}; 603};
539 604
540union radeon_asic_config { 605union radeon_asic_config {
@@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
566int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 631int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
567 struct drm_file *filp); 632 struct drm_file *filp);
568int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 633int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
634int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
635 struct drm_file *filp);
636int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
637 struct drm_file *filp);
569 638
570 639
571/* 640/*
@@ -594,8 +663,8 @@ struct radeon_device {
594 struct radeon_object *fbdev_robj; 663 struct radeon_object *fbdev_robj;
595 struct radeon_framebuffer *fbdev_rfb; 664 struct radeon_framebuffer *fbdev_rfb;
596 /* Register mmio */ 665 /* Register mmio */
597 unsigned long rmmio_base; 666 resource_size_t rmmio_base;
598 unsigned long rmmio_size; 667 resource_size_t rmmio_size;
599 void *rmmio; 668 void *rmmio;
600 radeon_rreg_t mm_rreg; 669 radeon_rreg_t mm_rreg;
601 radeon_wreg_t mm_wreg; 670 radeon_wreg_t mm_wreg;
@@ -619,11 +688,14 @@ struct radeon_device {
619 struct radeon_irq irq; 688 struct radeon_irq irq;
620 struct radeon_asic *asic; 689 struct radeon_asic *asic;
621 struct radeon_gem gem; 690 struct radeon_gem gem;
691 struct radeon_pm pm;
622 struct mutex cs_mutex; 692 struct mutex cs_mutex;
623 struct radeon_wb wb; 693 struct radeon_wb wb;
624 bool gpu_lockup; 694 bool gpu_lockup;
625 bool shutdown; 695 bool shutdown;
626 bool suspend; 696 bool suspend;
697 bool need_dma32;
698 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
627}; 699};
628 700
629int radeon_device_init(struct radeon_device *rdev, 701int radeon_device_init(struct radeon_device *rdev,
@@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
670/* 742/*
671 * ASICs helpers. 743 * ASICs helpers.
672 */ 744 */
745#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
746 (rdev->pdev->device == 0x5969))
673#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 747#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
674 (rdev->family == CHIP_RV200) || \ 748 (rdev->family == CHIP_RV200) || \
675 (rdev->family == CHIP_RS100) || \ 749 (rdev->family == CHIP_RS100) || \
@@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
796#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 870#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
797#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 871#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
798#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 872#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
873#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
874#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
875#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
799 876
800#endif 877#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e2e567395df8..9a75876e0c3b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev,
71 uint64_t dst_offset, 71 uint64_t dst_offset,
72 unsigned num_pages, 72 unsigned num_pages,
73 struct radeon_fence *fence); 73 struct radeon_fence *fence);
74int r100_set_surface_reg(struct radeon_device *rdev, int reg,
75 uint32_t tiling_flags, uint32_t pitch,
76 uint32_t offset, uint32_t obj_size);
77int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
78void r100_bandwidth_update(struct radeon_device *rdev);
74 79
75static struct radeon_asic r100_asic = { 80static struct radeon_asic r100_asic = {
76 .init = &r100_init, 81 .init = &r100_init,
@@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = {
100 .set_memory_clock = NULL, 105 .set_memory_clock = NULL,
101 .set_pcie_lanes = NULL, 106 .set_pcie_lanes = NULL,
102 .set_clock_gating = &radeon_legacy_set_clock_gating, 107 .set_clock_gating = &radeon_legacy_set_clock_gating,
108 .set_surface_reg = r100_set_surface_reg,
109 .clear_surface_reg = r100_clear_surface_reg,
110 .bandwidth_update = &r100_bandwidth_update,
103}; 111};
104 112
105 113
@@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev,
128 uint64_t dst_offset, 136 uint64_t dst_offset,
129 unsigned num_pages, 137 unsigned num_pages,
130 struct radeon_fence *fence); 138 struct radeon_fence *fence);
139
131static struct radeon_asic r300_asic = { 140static struct radeon_asic r300_asic = {
132 .init = &r300_init, 141 .init = &r300_init,
133 .errata = &r300_errata, 142 .errata = &r300_errata,
@@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = {
156 .set_memory_clock = NULL, 165 .set_memory_clock = NULL,
157 .set_pcie_lanes = &rv370_set_pcie_lanes, 166 .set_pcie_lanes = &rv370_set_pcie_lanes,
158 .set_clock_gating = &radeon_legacy_set_clock_gating, 167 .set_clock_gating = &radeon_legacy_set_clock_gating,
168 .set_surface_reg = r100_set_surface_reg,
169 .clear_surface_reg = r100_clear_surface_reg,
170 .bandwidth_update = &r100_bandwidth_update,
159}; 171};
160 172
161/* 173/*
@@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = {
193 .set_memory_clock = &radeon_atom_set_memory_clock, 205 .set_memory_clock = &radeon_atom_set_memory_clock,
194 .set_pcie_lanes = &rv370_set_pcie_lanes, 206 .set_pcie_lanes = &rv370_set_pcie_lanes,
195 .set_clock_gating = &radeon_atom_set_clock_gating, 207 .set_clock_gating = &radeon_atom_set_clock_gating,
208 .set_surface_reg = r100_set_surface_reg,
209 .clear_surface_reg = r100_clear_surface_reg,
210 .bandwidth_update = &r100_bandwidth_update,
196}; 211};
197 212
198 213
@@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = {
237 .set_memory_clock = NULL, 252 .set_memory_clock = NULL,
238 .set_pcie_lanes = NULL, 253 .set_pcie_lanes = NULL,
239 .set_clock_gating = &radeon_legacy_set_clock_gating, 254 .set_clock_gating = &radeon_legacy_set_clock_gating,
255 .set_surface_reg = r100_set_surface_reg,
256 .clear_surface_reg = r100_clear_surface_reg,
257 .bandwidth_update = &r100_bandwidth_update,
240}; 258};
241 259
242 260
@@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev);
254int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 272int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
255uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 273uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
256void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 274void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
275void rs600_bandwidth_update(struct radeon_device *rdev);
257static struct radeon_asic rs600_asic = { 276static struct radeon_asic rs600_asic = {
258 .init = &r300_init, 277 .init = &r300_init,
259 .errata = &rs600_errata, 278 .errata = &rs600_errata,
@@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = {
282 .set_memory_clock = &radeon_atom_set_memory_clock, 301 .set_memory_clock = &radeon_atom_set_memory_clock,
283 .set_pcie_lanes = NULL, 302 .set_pcie_lanes = NULL,
284 .set_clock_gating = &radeon_atom_set_clock_gating, 303 .set_clock_gating = &radeon_atom_set_clock_gating,
304 .bandwidth_update = &rs600_bandwidth_update,
285}; 305};
286 306
287 307
@@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev);
294void rs690_mc_fini(struct radeon_device *rdev); 314void rs690_mc_fini(struct radeon_device *rdev);
295uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 315uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
296void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 316void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
317void rs690_bandwidth_update(struct radeon_device *rdev);
297static struct radeon_asic rs690_asic = { 318static struct radeon_asic rs690_asic = {
298 .init = &r300_init, 319 .init = &r300_init,
299 .errata = &rs690_errata, 320 .errata = &rs690_errata,
@@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = {
322 .set_memory_clock = &radeon_atom_set_memory_clock, 343 .set_memory_clock = &radeon_atom_set_memory_clock,
323 .set_pcie_lanes = NULL, 344 .set_pcie_lanes = NULL,
324 .set_clock_gating = &radeon_atom_set_clock_gating, 345 .set_clock_gating = &radeon_atom_set_clock_gating,
346 .set_surface_reg = r100_set_surface_reg,
347 .clear_surface_reg = r100_clear_surface_reg,
348 .bandwidth_update = &rs690_bandwidth_update,
325}; 349};
326 350
327 351
@@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
339void rv515_ring_start(struct radeon_device *rdev); 363void rv515_ring_start(struct radeon_device *rdev);
340uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 364uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
341void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 365void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
366void rv515_bandwidth_update(struct radeon_device *rdev);
342static struct radeon_asic rv515_asic = { 367static struct radeon_asic rv515_asic = {
343 .init = &rv515_init, 368 .init = &rv515_init,
344 .errata = &rv515_errata, 369 .errata = &rv515_errata,
@@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = {
367 .set_memory_clock = &radeon_atom_set_memory_clock, 392 .set_memory_clock = &radeon_atom_set_memory_clock,
368 .set_pcie_lanes = &rv370_set_pcie_lanes, 393 .set_pcie_lanes = &rv370_set_pcie_lanes,
369 .set_clock_gating = &radeon_atom_set_clock_gating, 394 .set_clock_gating = &radeon_atom_set_clock_gating,
395 .set_surface_reg = r100_set_surface_reg,
396 .clear_surface_reg = r100_clear_surface_reg,
397 .bandwidth_update = &rv515_bandwidth_update,
370}; 398};
371 399
372 400
@@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev);
377void r520_vram_info(struct radeon_device *rdev); 405void r520_vram_info(struct radeon_device *rdev);
378int r520_mc_init(struct radeon_device *rdev); 406int r520_mc_init(struct radeon_device *rdev);
379void r520_mc_fini(struct radeon_device *rdev); 407void r520_mc_fini(struct radeon_device *rdev);
408void r520_bandwidth_update(struct radeon_device *rdev);
380static struct radeon_asic r520_asic = { 409static struct radeon_asic r520_asic = {
381 .init = &rv515_init, 410 .init = &rv515_init,
382 .errata = &r520_errata, 411 .errata = &r520_errata,
@@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = {
405 .set_memory_clock = &radeon_atom_set_memory_clock, 434 .set_memory_clock = &radeon_atom_set_memory_clock,
406 .set_pcie_lanes = &rv370_set_pcie_lanes, 435 .set_pcie_lanes = &rv370_set_pcie_lanes,
407 .set_clock_gating = &radeon_atom_set_clock_gating, 436 .set_clock_gating = &radeon_atom_set_clock_gating,
437 .set_surface_reg = r100_set_surface_reg,
438 .clear_surface_reg = r100_clear_surface_reg,
439 .bandwidth_update = &r520_bandwidth_update,
408}; 440};
409 441
410/* 442/*
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1f5a1a490984..fcfe5c02d744 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
103static bool radeon_atom_apply_quirks(struct drm_device *dev, 103static bool radeon_atom_apply_quirks(struct drm_device *dev,
104 uint32_t supported_device, 104 uint32_t supported_device,
105 int *connector_type, 105 int *connector_type,
106 struct radeon_i2c_bus_rec *i2c_bus) 106 struct radeon_i2c_bus_rec *i2c_bus,
107 uint8_t *line_mux)
107{ 108{
108 109
109 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ 110 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
127 if ((dev->pdev->device == 0x5653) && 128 if ((dev->pdev->device == 0x5653) &&
128 (dev->pdev->subsystem_vendor == 0x1462) && 129 (dev->pdev->subsystem_vendor == 0x1462) &&
129 (dev->pdev->subsystem_device == 0x0291)) { 130 (dev->pdev->subsystem_device == 0x0291)) {
130 if (*connector_type == DRM_MODE_CONNECTOR_LVDS) 131 if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
131 i2c_bus->valid = false; 132 i2c_bus->valid = false;
133 *line_mux = 53;
134 }
132 } 135 }
133 136
134 /* Funky macbooks */ 137 /* Funky macbooks */
@@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
526 529
527 if (!radeon_atom_apply_quirks 530 if (!radeon_atom_apply_quirks
528 (dev, (1 << i), &bios_connectors[i].connector_type, 531 (dev, (1 << i), &bios_connectors[i].connector_type,
529 &bios_connectors[i].ddc_bus)) 532 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
530 continue; 533 continue;
531 534
532 bios_connectors[i].valid = true; 535 bios_connectors[i].valid = true;
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c44403a2ca76..2e938f7496fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
63 if (r) { 63 if (r) {
64 goto out_cleanup; 64 goto out_cleanup;
65 } 65 }
66 r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); 66 r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence);
67 if (r) { 67 if (r) {
68 goto out_cleanup; 68 goto out_cleanup;
69 } 69 }
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
88 if (r) { 88 if (r) {
89 goto out_cleanup; 89 goto out_cleanup;
90 } 90 }
91 r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); 91 r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence);
92 if (r) { 92 if (r) {
93 goto out_cleanup; 93 goto out_cleanup;
94 } 94 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b843f9bdfb14..a169067efc4e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
127 sizeof(struct drm_radeon_cs_chunk))) { 127 sizeof(struct drm_radeon_cs_chunk))) {
128 return -EFAULT; 128 return -EFAULT;
129 } 129 }
130 p->chunks[i].length_dw = user_chunk.length_dw;
131 p->chunks[i].kdata = NULL;
130 p->chunks[i].chunk_id = user_chunk.chunk_id; 132 p->chunks[i].chunk_id = user_chunk.chunk_id;
133
131 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
132 p->chunk_relocs_idx = i; 135 p->chunk_relocs_idx = i;
133 } 136 }
134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 137 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
135 p->chunk_ib_idx = i; 138 p->chunk_ib_idx = i;
139 /* zero length IB isn't useful */
140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL;
136 } 142 }
143
137 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
138 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 145 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
139 146
140 p->chunks[i].kdata = NULL;
141 size = p->chunks[i].length_dw * sizeof(uint32_t); 147 size = p->chunks[i].length_dw * sizeof(uint32_t);
142 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); 148 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
143 if (p->chunks[i].kdata == NULL) { 149 if (p->chunks[i].kdata == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 5232441f119b..b13c79e38bc0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
111 111
112 if (ASIC_IS_AVIVO(rdev)) 112 if (ASIC_IS_AVIVO(rdev))
113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); 113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
114 else 114 else {
115 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
115 /* offset is from DISP(2)_BASE_ADDRESS */ 116 /* offset is from DISP(2)_BASE_ADDRESS */
116 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); 117 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
118 }
117} 119}
118 120
119int radeon_crtc_cursor_set(struct drm_crtc *crtc, 121int radeon_crtc_cursor_set(struct drm_crtc *crtc,
@@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
245 (RADEON_CUR_LOCK 247 (RADEON_CUR_LOCK
246 | ((xorigin ? 0 : x) << 16) 248 | ((xorigin ? 0 : x) << 16)
247 | (yorigin ? 0 : y))); 249 | (yorigin ? 0 : y)));
250 /* offset is from DISP(2)_BASE_ADDRESS */
251 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
252 (yorigin * 256)));
248 } 253 }
249 radeon_lock_cursor(crtc, false); 254 radeon_lock_cursor(crtc, false);
250 255
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f97563db4e59..a162ade74b7f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -48,6 +48,8 @@ static void radeon_surface_init(struct radeon_device *rdev)
48 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 48 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49 0); 49 0);
50 } 50 }
51 /* enable surfaces */
52 WREG32(RADEON_SURFACE_CNTL, 0);
51 } 53 }
52} 54}
53 55
@@ -119,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev)
119 if (rdev->mc.vram_location != 0xFFFFFFFFUL) { 121 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
120 /* vram location was already setup try to put gtt after 122 /* vram location was already setup try to put gtt after
121 * if it fits */ 123 * if it fits */
122 tmp = rdev->mc.vram_location + rdev->mc.vram_size; 124 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
123 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 125 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
124 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 126 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
125 rdev->mc.gtt_location = tmp; 127 rdev->mc.gtt_location = tmp;
@@ -134,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev)
134 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { 136 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
135 /* gtt location was already setup try to put vram before 137 /* gtt location was already setup try to put vram before
136 * if it fits */ 138 * if it fits */
137 if (rdev->mc.vram_size < rdev->mc.gtt_location) { 139 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
138 rdev->mc.vram_location = 0; 140 rdev->mc.vram_location = 0;
139 } else { 141 } else {
140 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; 142 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
141 tmp += (rdev->mc.vram_size - 1); 143 tmp += (rdev->mc.mc_vram_size - 1);
142 tmp &= ~(rdev->mc.vram_size - 1); 144 tmp &= ~(rdev->mc.mc_vram_size - 1);
143 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { 145 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
144 rdev->mc.vram_location = tmp; 146 rdev->mc.vram_location = tmp;
145 } else { 147 } else {
146 printk(KERN_ERR "[drm] vram too big to fit " 148 printk(KERN_ERR "[drm] vram too big to fit "
@@ -150,12 +152,14 @@ int radeon_mc_setup(struct radeon_device *rdev)
150 } 152 }
151 } else { 153 } else {
152 rdev->mc.vram_location = 0; 154 rdev->mc.vram_location = 0;
153 rdev->mc.gtt_location = rdev->mc.vram_size; 155 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
154 } 156 }
155 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); 157 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
156 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", 158 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
157 rdev->mc.vram_location, 159 rdev->mc.vram_location,
158 rdev->mc.vram_location + rdev->mc.vram_size - 1); 160 rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
161 if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
162 DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
159 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); 163 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
160 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", 164 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
161 rdev->mc.gtt_location, 165 rdev->mc.gtt_location,
@@ -450,6 +454,7 @@ int radeon_device_init(struct radeon_device *rdev,
450 uint32_t flags) 454 uint32_t flags)
451{ 455{
452 int r, ret; 456 int r, ret;
457 int dma_bits;
453 458
454 DRM_INFO("radeon: Initializing kernel modesetting.\n"); 459 DRM_INFO("radeon: Initializing kernel modesetting.\n");
455 rdev->shutdown = false; 460 rdev->shutdown = false;
@@ -492,8 +497,20 @@ int radeon_device_init(struct radeon_device *rdev,
492 return r; 497 return r;
493 } 498 }
494 499
495 /* Report DMA addressing limitation */ 500 /* set DMA mask + need_dma32 flags.
496 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 501 * PCIE - can handle 40-bits.
502 * IGP - can handle 40-bits (in theory)
503 * AGP - generally dma32 is safest
504 * PCI - only dma32
505 */
506 rdev->need_dma32 = false;
507 if (rdev->flags & RADEON_IS_AGP)
508 rdev->need_dma32 = true;
509 if (rdev->flags & RADEON_IS_PCI)
510 rdev->need_dma32 = true;
511
512 dma_bits = rdev->need_dma32 ? 32 : 40;
513 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
497 if (r) { 514 if (r) {
498 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 515 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
499 } 516 }
@@ -546,27 +563,22 @@ int radeon_device_init(struct radeon_device *rdev,
546 radeon_combios_asic_init(rdev->ddev); 563 radeon_combios_asic_init(rdev->ddev);
547 } 564 }
548 } 565 }
566 /* Initialize clocks */
567 r = radeon_clocks_init(rdev);
568 if (r) {
569 return r;
570 }
549 /* Get vram informations */ 571 /* Get vram informations */
550 radeon_vram_info(rdev); 572 radeon_vram_info(rdev);
551 /* Device is severly broken if aper size > vram size. 573
552 * for RN50/M6/M7 - Novell bug 204882 ?
553 */
554 if (rdev->mc.vram_size < rdev->mc.aper_size) {
555 rdev->mc.aper_size = rdev->mc.vram_size;
556 }
557 /* Add an MTRR for the VRAM */ 574 /* Add an MTRR for the VRAM */
558 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 575 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
559 MTRR_TYPE_WRCOMB, 1); 576 MTRR_TYPE_WRCOMB, 1);
560 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", 577 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
561 rdev->mc.vram_size >> 20, 578 rdev->mc.real_vram_size >> 20,
562 (unsigned)rdev->mc.aper_size >> 20); 579 (unsigned)rdev->mc.aper_size >> 20);
563 DRM_INFO("RAM width %dbits %cDR\n", 580 DRM_INFO("RAM width %dbits %cDR\n",
564 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); 581 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
565 /* Initialize clocks */
566 r = radeon_clocks_init(rdev);
567 if (r) {
568 return r;
569 }
570 /* Initialize memory controller (also test AGP) */ 582 /* Initialize memory controller (also test AGP) */
571 r = radeon_mc_init(rdev); 583 r = radeon_mc_init(rdev);
572 if (r) { 584 if (r) {
@@ -626,6 +638,9 @@ int radeon_device_init(struct radeon_device *rdev,
626 if (!ret) { 638 if (!ret) {
627 DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); 639 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
628 } 640 }
641 if (radeon_testing) {
642 radeon_test_moves(rdev);
643 }
629 if (radeon_benchmarking) { 644 if (radeon_benchmarking) {
630 radeon_benchmark(rdev); 645 radeon_benchmark(rdev);
631 } 646 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3efcf1a526be..a8fa1bb84cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
187 187
188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
189 radeon_crtc->crtc_id = index; 189 radeon_crtc->crtc_id = index;
190 rdev->mode_info.crtcs[index] = radeon_crtc;
190 191
191 radeon_crtc->mode_set.crtc = &radeon_crtc->base; 192 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
192 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 193 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll,
491 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; 492 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
492 current_freq = radeon_div(tmp, ref_div * post_div); 493 current_freq = radeon_div(tmp, ref_div * post_div);
493 494
494 error = abs(current_freq - freq); 495 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
496 error = freq - current_freq;
497 error = error < 0 ? 0xffffffff : error;
498 } else
499 error = abs(current_freq - freq);
495 vco_diff = abs(vco - best_vco); 500 vco_diff = abs(vco - best_vco);
496 501
497 if ((best_vco == 0 && error < best_error) || 502 if ((best_vco == 0 && error < best_error) ||
@@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev)
657 } 662 }
658} 663}
659 664
660void radeon_init_disp_bandwidth(struct drm_device *dev) 665bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
666 struct drm_display_mode *mode,
667 struct drm_display_mode *adjusted_mode)
661{ 668{
662 struct radeon_device *rdev = dev->dev_private; 669 struct drm_device *dev = crtc->dev;
663 struct drm_display_mode *modes[2]; 670 struct drm_encoder *encoder;
664 int pixel_bytes[2]; 671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
665 struct drm_crtc *crtc; 672 struct radeon_encoder *radeon_encoder;
666 673 bool first = true;
667 pixel_bytes[0] = pixel_bytes[1] = 0;
668 modes[0] = modes[1] = NULL;
669
670 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
672 674
673 if (crtc->enabled && crtc->fb) { 675 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
674 modes[radeon_crtc->crtc_id] = &crtc->mode; 676 radeon_encoder = to_radeon_encoder(encoder);
675 pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; 677 if (encoder->crtc != crtc)
678 continue;
679 if (first) {
680 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
681 radeon_crtc->devices = radeon_encoder->devices;
682 memcpy(&radeon_crtc->native_mode,
683 &radeon_encoder->native_mode,
684 sizeof(struct radeon_native_mode));
685 first = false;
686 } else {
687 if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
688 /* WARNING: Right now this can't happen but
689 * in the future we need to check that scaling
690 * are consistent accross different encoder
691 * (ie all encoder can work with the same
692 * scaling).
693 */
694 DRM_ERROR("Scaling not consistent accross encoder.\n");
695 return false;
696 }
676 } 697 }
677 } 698 }
678 699 if (radeon_crtc->rmx_type != RMX_OFF) {
679 if (ASIC_IS_AVIVO(rdev)) { 700 fixed20_12 a, b;
680 radeon_init_disp_bw_avivo(dev, 701 a.full = rfixed_const(crtc->mode.vdisplay);
681 modes[0], 702 b.full = rfixed_const(radeon_crtc->native_mode.panel_xres);
682 pixel_bytes[0], 703 radeon_crtc->vsc.full = rfixed_div(a, b);
683 modes[1], 704 a.full = rfixed_const(crtc->mode.hdisplay);
684 pixel_bytes[1]); 705 b.full = rfixed_const(radeon_crtc->native_mode.panel_yres);
706 radeon_crtc->hsc.full = rfixed_div(a, b);
685 } else { 707 } else {
686 radeon_init_disp_bw_legacy(dev, 708 radeon_crtc->vsc.full = rfixed_const(1);
687 modes[0], 709 radeon_crtc->hsc.full = rfixed_const(1);
688 pixel_bytes[0],
689 modes[1],
690 pixel_bytes[1]);
691 } 710 }
711 return true;
692} 712}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 84ba69f48784..3cfcee17dc56 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -89,6 +89,7 @@ int radeon_agpmode = 0;
89int radeon_vram_limit = 0; 89int radeon_vram_limit = 0;
90int radeon_gart_size = 512; /* default gart size */ 90int radeon_gart_size = 512; /* default gart size */
91int radeon_benchmarking = 0; 91int radeon_benchmarking = 0;
92int radeon_testing = 0;
92int radeon_connector_table = 0; 93int radeon_connector_table = 0;
93#endif 94#endif
94 95
@@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600);
117MODULE_PARM_DESC(benchmark, "Run benchmark"); 118MODULE_PARM_DESC(benchmark, "Run benchmark");
118module_param_named(benchmark, radeon_benchmarking, int, 0444); 119module_param_named(benchmark, radeon_benchmarking, int, 0444);
119 120
121MODULE_PARM_DESC(test, "Run tests");
122module_param_named(test, radeon_testing, int, 0444);
123
120MODULE_PARM_DESC(connector_table, "Force connector table"); 124MODULE_PARM_DESC(connector_table, "Force connector table");
121module_param_named(connector_table, radeon_connector_table, int, 0444); 125module_param_named(connector_table, radeon_connector_table, int, 0444);
122#endif 126#endif
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c8ef0d14ffab..0a92706eac19 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
154 154
155 if (mode->hdisplay < native_mode->panel_xres || 155 if (mode->hdisplay < native_mode->panel_xres ||
156 mode->vdisplay < native_mode->panel_yres) { 156 mode->vdisplay < native_mode->panel_yres) {
157 radeon_encoder->flags |= RADEON_USE_RMX;
158 if (ASIC_IS_AVIVO(rdev)) { 157 if (ASIC_IS_AVIVO(rdev)) {
159 adjusted_mode->hdisplay = native_mode->panel_xres; 158 adjusted_mode->hdisplay = native_mode->panel_xres;
160 adjusted_mode->vdisplay = native_mode->panel_yres; 159 adjusted_mode->vdisplay = native_mode->panel_yres;
@@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
197 } 196 }
198} 197}
199 198
199
200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
201 struct drm_display_mode *mode, 201 struct drm_display_mode *mode,
202 struct drm_display_mode *adjusted_mode) 202 struct drm_display_mode *adjusted_mode)
203{ 203{
204
205 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 204 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
206 205
207 radeon_encoder->flags &= ~RADEON_USE_RMX;
208
209 drm_mode_set_crtcinfo(adjusted_mode, 0); 206 drm_mode_set_crtcinfo(adjusted_mode, 0);
210 207
211 if (radeon_encoder->rmx_type != RMX_OFF) 208 if (radeon_encoder->rmx_type != RMX_OFF)
@@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
808 805
809} 806}
810 807
811static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
812{
813
814 WREG32(0x659C, 0x0);
815 WREG32(0x6594, 0x705);
816 WREG32(0x65A4, 0x10001);
817 WREG32(0x65D8, 0x0);
818 WREG32(0x65B0, 0x0);
819 WREG32(0x65C0, 0x0);
820 WREG32(0x65D4, 0x0);
821 WREG32(0x6578, 0x0);
822 WREG32(0x657C, 0x841880A8);
823 WREG32(0x6578, 0x1);
824 WREG32(0x657C, 0x84208680);
825 WREG32(0x6578, 0x2);
826 WREG32(0x657C, 0xBFF880B0);
827 WREG32(0x6578, 0x100);
828 WREG32(0x657C, 0x83D88088);
829 WREG32(0x6578, 0x101);
830 WREG32(0x657C, 0x84608680);
831 WREG32(0x6578, 0x102);
832 WREG32(0x657C, 0xBFF080D0);
833 WREG32(0x6578, 0x200);
834 WREG32(0x657C, 0x83988068);
835 WREG32(0x6578, 0x201);
836 WREG32(0x657C, 0x84A08680);
837 WREG32(0x6578, 0x202);
838 WREG32(0x657C, 0xBFF080F8);
839 WREG32(0x6578, 0x300);
840 WREG32(0x657C, 0x83588058);
841 WREG32(0x6578, 0x301);
842 WREG32(0x657C, 0x84E08660);
843 WREG32(0x6578, 0x302);
844 WREG32(0x657C, 0xBFF88120);
845 WREG32(0x6578, 0x400);
846 WREG32(0x657C, 0x83188040);
847 WREG32(0x6578, 0x401);
848 WREG32(0x657C, 0x85008660);
849 WREG32(0x6578, 0x402);
850 WREG32(0x657C, 0xBFF88150);
851 WREG32(0x6578, 0x500);
852 WREG32(0x657C, 0x82D88030);
853 WREG32(0x6578, 0x501);
854 WREG32(0x657C, 0x85408640);
855 WREG32(0x6578, 0x502);
856 WREG32(0x657C, 0xBFF88180);
857 WREG32(0x6578, 0x600);
858 WREG32(0x657C, 0x82A08018);
859 WREG32(0x6578, 0x601);
860 WREG32(0x657C, 0x85808620);
861 WREG32(0x6578, 0x602);
862 WREG32(0x657C, 0xBFF081B8);
863 WREG32(0x6578, 0x700);
864 WREG32(0x657C, 0x82608010);
865 WREG32(0x6578, 0x701);
866 WREG32(0x657C, 0x85A08600);
867 WREG32(0x6578, 0x702);
868 WREG32(0x657C, 0x800081F0);
869 WREG32(0x6578, 0x800);
870 WREG32(0x657C, 0x8228BFF8);
871 WREG32(0x6578, 0x801);
872 WREG32(0x657C, 0x85E085E0);
873 WREG32(0x6578, 0x802);
874 WREG32(0x657C, 0xBFF88228);
875 WREG32(0x6578, 0x10000);
876 WREG32(0x657C, 0x82A8BF00);
877 WREG32(0x6578, 0x10001);
878 WREG32(0x657C, 0x82A08CC0);
879 WREG32(0x6578, 0x10002);
880 WREG32(0x657C, 0x8008BEF8);
881 WREG32(0x6578, 0x10100);
882 WREG32(0x657C, 0x81F0BF28);
883 WREG32(0x6578, 0x10101);
884 WREG32(0x657C, 0x83608CA0);
885 WREG32(0x6578, 0x10102);
886 WREG32(0x657C, 0x8018BED0);
887 WREG32(0x6578, 0x10200);
888 WREG32(0x657C, 0x8148BF38);
889 WREG32(0x6578, 0x10201);
890 WREG32(0x657C, 0x84408C80);
891 WREG32(0x6578, 0x10202);
892 WREG32(0x657C, 0x8008BEB8);
893 WREG32(0x6578, 0x10300);
894 WREG32(0x657C, 0x80B0BF78);
895 WREG32(0x6578, 0x10301);
896 WREG32(0x657C, 0x85008C20);
897 WREG32(0x6578, 0x10302);
898 WREG32(0x657C, 0x8020BEA0);
899 WREG32(0x6578, 0x10400);
900 WREG32(0x657C, 0x8028BF90);
901 WREG32(0x6578, 0x10401);
902 WREG32(0x657C, 0x85E08BC0);
903 WREG32(0x6578, 0x10402);
904 WREG32(0x657C, 0x8018BE90);
905 WREG32(0x6578, 0x10500);
906 WREG32(0x657C, 0xBFB8BFB0);
907 WREG32(0x6578, 0x10501);
908 WREG32(0x657C, 0x86C08B40);
909 WREG32(0x6578, 0x10502);
910 WREG32(0x657C, 0x8010BE90);
911 WREG32(0x6578, 0x10600);
912 WREG32(0x657C, 0xBF58BFC8);
913 WREG32(0x6578, 0x10601);
914 WREG32(0x657C, 0x87A08AA0);
915 WREG32(0x6578, 0x10602);
916 WREG32(0x657C, 0x8010BE98);
917 WREG32(0x6578, 0x10700);
918 WREG32(0x657C, 0xBF10BFF0);
919 WREG32(0x6578, 0x10701);
920 WREG32(0x657C, 0x886089E0);
921 WREG32(0x6578, 0x10702);
922 WREG32(0x657C, 0x8018BEB0);
923 WREG32(0x6578, 0x10800);
924 WREG32(0x657C, 0xBED8BFE8);
925 WREG32(0x6578, 0x10801);
926 WREG32(0x657C, 0x89408940);
927 WREG32(0x6578, 0x10802);
928 WREG32(0x657C, 0xBFE8BED8);
929 WREG32(0x6578, 0x20000);
930 WREG32(0x657C, 0x80008000);
931 WREG32(0x6578, 0x20001);
932 WREG32(0x657C, 0x90008000);
933 WREG32(0x6578, 0x20002);
934 WREG32(0x657C, 0x80008000);
935 WREG32(0x6578, 0x20003);
936 WREG32(0x657C, 0x80008000);
937 WREG32(0x6578, 0x20100);
938 WREG32(0x657C, 0x80108000);
939 WREG32(0x6578, 0x20101);
940 WREG32(0x657C, 0x8FE0BF70);
941 WREG32(0x6578, 0x20102);
942 WREG32(0x657C, 0xBFE880C0);
943 WREG32(0x6578, 0x20103);
944 WREG32(0x657C, 0x80008000);
945 WREG32(0x6578, 0x20200);
946 WREG32(0x657C, 0x8018BFF8);
947 WREG32(0x6578, 0x20201);
948 WREG32(0x657C, 0x8F80BF08);
949 WREG32(0x6578, 0x20202);
950 WREG32(0x657C, 0xBFD081A0);
951 WREG32(0x6578, 0x20203);
952 WREG32(0x657C, 0xBFF88000);
953 WREG32(0x6578, 0x20300);
954 WREG32(0x657C, 0x80188000);
955 WREG32(0x6578, 0x20301);
956 WREG32(0x657C, 0x8EE0BEC0);
957 WREG32(0x6578, 0x20302);
958 WREG32(0x657C, 0xBFB082A0);
959 WREG32(0x6578, 0x20303);
960 WREG32(0x657C, 0x80008000);
961 WREG32(0x6578, 0x20400);
962 WREG32(0x657C, 0x80188000);
963 WREG32(0x6578, 0x20401);
964 WREG32(0x657C, 0x8E00BEA0);
965 WREG32(0x6578, 0x20402);
966 WREG32(0x657C, 0xBF8883C0);
967 WREG32(0x6578, 0x20403);
968 WREG32(0x657C, 0x80008000);
969 WREG32(0x6578, 0x20500);
970 WREG32(0x657C, 0x80188000);
971 WREG32(0x6578, 0x20501);
972 WREG32(0x657C, 0x8D00BE90);
973 WREG32(0x6578, 0x20502);
974 WREG32(0x657C, 0xBF588500);
975 WREG32(0x6578, 0x20503);
976 WREG32(0x657C, 0x80008008);
977 WREG32(0x6578, 0x20600);
978 WREG32(0x657C, 0x80188000);
979 WREG32(0x6578, 0x20601);
980 WREG32(0x657C, 0x8BC0BE98);
981 WREG32(0x6578, 0x20602);
982 WREG32(0x657C, 0xBF308660);
983 WREG32(0x6578, 0x20603);
984 WREG32(0x657C, 0x80008008);
985 WREG32(0x6578, 0x20700);
986 WREG32(0x657C, 0x80108000);
987 WREG32(0x6578, 0x20701);
988 WREG32(0x657C, 0x8A80BEB0);
989 WREG32(0x6578, 0x20702);
990 WREG32(0x657C, 0xBF0087C0);
991 WREG32(0x6578, 0x20703);
992 WREG32(0x657C, 0x80008008);
993 WREG32(0x6578, 0x20800);
994 WREG32(0x657C, 0x80108000);
995 WREG32(0x6578, 0x20801);
996 WREG32(0x657C, 0x8920BED0);
997 WREG32(0x6578, 0x20802);
998 WREG32(0x657C, 0xBED08920);
999 WREG32(0x6578, 0x20803);
1000 WREG32(0x657C, 0x80008010);
1001 WREG32(0x6578, 0x30000);
1002 WREG32(0x657C, 0x90008000);
1003 WREG32(0x6578, 0x30001);
1004 WREG32(0x657C, 0x80008000);
1005 WREG32(0x6578, 0x30100);
1006 WREG32(0x657C, 0x8FE0BF90);
1007 WREG32(0x6578, 0x30101);
1008 WREG32(0x657C, 0xBFF880A0);
1009 WREG32(0x6578, 0x30200);
1010 WREG32(0x657C, 0x8F60BF40);
1011 WREG32(0x6578, 0x30201);
1012 WREG32(0x657C, 0xBFE88180);
1013 WREG32(0x6578, 0x30300);
1014 WREG32(0x657C, 0x8EC0BF00);
1015 WREG32(0x6578, 0x30301);
1016 WREG32(0x657C, 0xBFC88280);
1017 WREG32(0x6578, 0x30400);
1018 WREG32(0x657C, 0x8DE0BEE0);
1019 WREG32(0x6578, 0x30401);
1020 WREG32(0x657C, 0xBFA083A0);
1021 WREG32(0x6578, 0x30500);
1022 WREG32(0x657C, 0x8CE0BED0);
1023 WREG32(0x6578, 0x30501);
1024 WREG32(0x657C, 0xBF7884E0);
1025 WREG32(0x6578, 0x30600);
1026 WREG32(0x657C, 0x8BA0BED8);
1027 WREG32(0x6578, 0x30601);
1028 WREG32(0x657C, 0xBF508640);
1029 WREG32(0x6578, 0x30700);
1030 WREG32(0x657C, 0x8A60BEE8);
1031 WREG32(0x6578, 0x30701);
1032 WREG32(0x657C, 0xBF2087A0);
1033 WREG32(0x6578, 0x30800);
1034 WREG32(0x657C, 0x8900BF00);
1035 WREG32(0x6578, 0x30801);
1036 WREG32(0x657C, 0xBF008900);
1037}
1038
1039static void 808static void
1040atombios_yuv_setup(struct drm_encoder *encoder, bool enable) 809atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1041{ 810{
@@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1074} 843}
1075 844
1076static void 845static void
1077atombios_overscan_setup(struct drm_encoder *encoder,
1078 struct drm_display_mode *mode,
1079 struct drm_display_mode *adjusted_mode)
1080{
1081 struct drm_device *dev = encoder->dev;
1082 struct radeon_device *rdev = dev->dev_private;
1083 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1084 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1085 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
1086 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
1087
1088 memset(&args, 0, sizeof(args));
1089
1090 args.usOverscanRight = 0;
1091 args.usOverscanLeft = 0;
1092 args.usOverscanBottom = 0;
1093 args.usOverscanTop = 0;
1094 args.ucCRTC = radeon_crtc->crtc_id;
1095
1096 if (radeon_encoder->flags & RADEON_USE_RMX) {
1097 if (radeon_encoder->rmx_type == RMX_FULL) {
1098 args.usOverscanRight = 0;
1099 args.usOverscanLeft = 0;
1100 args.usOverscanBottom = 0;
1101 args.usOverscanTop = 0;
1102 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
1103 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1104 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1105 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1106 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1107 } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
1108 int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
1109 int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
1110
1111 if (a1 > a2) {
1112 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1113 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1114 } else if (a2 > a1) {
1115 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1116 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1117 }
1118 }
1119 }
1120
1121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1122
1123}
1124
1125static void
1126atombios_scaler_setup(struct drm_encoder *encoder)
1127{
1128 struct drm_device *dev = encoder->dev;
1129 struct radeon_device *rdev = dev->dev_private;
1130 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1131 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1132 ENABLE_SCALER_PS_ALLOCATION args;
1133 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
1134 /* fixme - fill in enc_priv for atom dac */
1135 enum radeon_tv_std tv_std = TV_STD_NTSC;
1136
1137 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
1138 return;
1139
1140 memset(&args, 0, sizeof(args));
1141
1142 args.ucScaler = radeon_crtc->crtc_id;
1143
1144 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
1145 switch (tv_std) {
1146 case TV_STD_NTSC:
1147 default:
1148 args.ucTVStandard = ATOM_TV_NTSC;
1149 break;
1150 case TV_STD_PAL:
1151 args.ucTVStandard = ATOM_TV_PAL;
1152 break;
1153 case TV_STD_PAL_M:
1154 args.ucTVStandard = ATOM_TV_PALM;
1155 break;
1156 case TV_STD_PAL_60:
1157 args.ucTVStandard = ATOM_TV_PAL60;
1158 break;
1159 case TV_STD_NTSC_J:
1160 args.ucTVStandard = ATOM_TV_NTSCJ;
1161 break;
1162 case TV_STD_SCART_PAL:
1163 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
1164 break;
1165 case TV_STD_SECAM:
1166 args.ucTVStandard = ATOM_TV_SECAM;
1167 break;
1168 case TV_STD_PAL_CN:
1169 args.ucTVStandard = ATOM_TV_PALCN;
1170 break;
1171 }
1172 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1173 } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
1174 args.ucTVStandard = ATOM_TV_CV;
1175 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1176 } else if (radeon_encoder->flags & RADEON_USE_RMX) {
1177 if (radeon_encoder->rmx_type == RMX_FULL)
1178 args.ucEnable = ATOM_SCALER_EXPANSION;
1179 else if (radeon_encoder->rmx_type == RMX_CENTER)
1180 args.ucEnable = ATOM_SCALER_CENTER;
1181 else if (radeon_encoder->rmx_type == RMX_ASPECT)
1182 args.ucEnable = ATOM_SCALER_EXPANSION;
1183 } else {
1184 if (ASIC_IS_AVIVO(rdev))
1185 args.ucEnable = ATOM_SCALER_DISABLE;
1186 else
1187 args.ucEnable = ATOM_SCALER_CENTER;
1188 }
1189
1190 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1191
1192 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
1193 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
1194 atom_rv515_force_tv_scaler(rdev);
1195 }
1196
1197}
1198
1199static void
1200radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) 846radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1201{ 847{
1202 struct drm_device *dev = encoder->dev; 848 struct drm_device *dev = encoder->dev;
@@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1448 radeon_encoder->pixel_clock = adjusted_mode->clock; 1094 radeon_encoder->pixel_clock = adjusted_mode->clock;
1449 1095
1450 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 1096 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1451 atombios_overscan_setup(encoder, mode, adjusted_mode);
1452 atombios_scaler_setup(encoder);
1453 atombios_set_encoder_crtc_source(encoder); 1097 atombios_set_encoder_crtc_source(encoder);
1454 1098
1455 if (ASIC_IS_AVIVO(rdev)) { 1099 if (ASIC_IS_AVIVO(rdev)) {
@@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1667 1311
1668 radeon_encoder->encoder_id = encoder_id; 1312 radeon_encoder->encoder_id = encoder_id;
1669 radeon_encoder->devices = supported_device; 1313 radeon_encoder->devices = supported_device;
1314 radeon_encoder->rmx_type = RMX_OFF;
1670 1315
1671 switch (radeon_encoder->encoder_id) { 1316 switch (radeon_encoder->encoder_id) {
1672 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1317 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9e8f191eb64a..3206c0ad7b6c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno,
101 break; 101 break;
102 case 24: 102 case 24:
103 case 32: 103 case 32:
104 fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | 104 fb->pseudo_palette[regno] =
105 (green & 0xff00) | 105 (((red >> 8) & 0xff) << info->var.red.offset) |
106 ((blue & 0xff00) >> 8); 106 (((green >> 8) & 0xff) << info->var.green.offset) |
107 (((blue >> 8) & 0xff) << info->var.blue.offset);
107 break; 108 break;
108 } 109 }
109 } 110 }
@@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var,
154 var->transp.length = 0; 155 var->transp.length = 0;
155 var->transp.offset = 0; 156 var->transp.offset = 0;
156 break; 157 break;
158#ifdef __LITTLE_ENDIAN
157 case 15: 159 case 15:
158 var->red.offset = 10; 160 var->red.offset = 10;
159 var->green.offset = 5; 161 var->green.offset = 5;
@@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var,
194 var->transp.length = 8; 196 var->transp.length = 8;
195 var->transp.offset = 24; 197 var->transp.offset = 24;
196 break; 198 break;
199#else
200 case 24:
201 var->red.offset = 8;
202 var->green.offset = 16;
203 var->blue.offset = 24;
204 var->red.length = 8;
205 var->green.length = 8;
206 var->blue.length = 8;
207 var->transp.length = 0;
208 var->transp.offset = 0;
209 break;
210 case 32:
211 var->red.offset = 8;
212 var->green.offset = 16;
213 var->blue.offset = 24;
214 var->red.length = 8;
215 var->green.length = 8;
216 var->blue.length = 8;
217 var->transp.length = 8;
218 var->transp.offset = 0;
219 break;
220#endif
197 default: 221 default:
198 return -EINVAL; 222 return -EINVAL;
199 } 223 }
@@ -447,10 +471,10 @@ static struct notifier_block paniced = {
447 .notifier_call = radeonfb_panic, 471 .notifier_call = radeonfb_panic,
448}; 472};
449 473
450static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) 474static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
451{ 475{
452 int aligned = width; 476 int aligned = width;
453 int align_large = (ASIC_IS_AVIVO(rdev)); 477 int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
454 int pitch_mask = 0; 478 int pitch_mask = 0;
455 479
456 switch (bpp / 8) { 480 switch (bpp / 8) {
@@ -488,12 +512,13 @@ int radeonfb_create(struct radeon_device *rdev,
488 u64 fb_gpuaddr; 512 u64 fb_gpuaddr;
489 void *fbptr = NULL; 513 void *fbptr = NULL;
490 unsigned long tmp; 514 unsigned long tmp;
515 bool fb_tiled = false; /* useful for testing */
491 516
492 mode_cmd.width = surface_width; 517 mode_cmd.width = surface_width;
493 mode_cmd.height = surface_height; 518 mode_cmd.height = surface_height;
494 mode_cmd.bpp = 32; 519 mode_cmd.bpp = 32;
495 /* need to align pitch with crtc limits */ 520 /* need to align pitch with crtc limits */
496 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); 521 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
497 mode_cmd.depth = 24; 522 mode_cmd.depth = 24;
498 523
499 size = mode_cmd.pitch * mode_cmd.height; 524 size = mode_cmd.pitch * mode_cmd.height;
@@ -511,6 +536,8 @@ int radeonfb_create(struct radeon_device *rdev,
511 } 536 }
512 robj = gobj->driver_private; 537 robj = gobj->driver_private;
513 538
539 if (fb_tiled)
540 radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch);
514 mutex_lock(&rdev->ddev->struct_mutex); 541 mutex_lock(&rdev->ddev->struct_mutex);
515 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 542 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
516 if (fb == NULL) { 543 if (fb == NULL) {
@@ -539,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev,
539 } 566 }
540 rfbdev = info->par; 567 rfbdev = info->par;
541 568
569 if (fb_tiled)
570 radeon_object_check_tiling(robj, 0, 0);
571
542 ret = radeon_object_kmap(robj, &fbptr); 572 ret = radeon_object_kmap(robj, &fbptr);
543 if (ret) { 573 if (ret) {
544 goto out_unref; 574 goto out_unref;
@@ -572,6 +602,11 @@ int radeonfb_create(struct radeon_device *rdev,
572 info->var.width = -1; 602 info->var.width = -1;
573 info->var.xres = fb_width; 603 info->var.xres = fb_width;
574 info->var.yres = fb_height; 604 info->var.yres = fb_height;
605
606 /* setup aperture base/size for vesafb takeover */
607 info->aperture_base = rdev->ddev->mode_config.fb_base;
608 info->aperture_size = rdev->mc.real_vram_size;
609
575 info->fix.mmio_start = 0; 610 info->fix.mmio_start = 0;
576 info->fix.mmio_len = 0; 611 info->fix.mmio_len = 0;
577 info->pixmap.size = 64*1024; 612 info->pixmap.size = 64*1024;
@@ -600,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev,
600 info->var.transp.offset = 0; 635 info->var.transp.offset = 0;
601 info->var.transp.length = 0; 636 info->var.transp.length = 0;
602 break; 637 break;
638#ifdef __LITTLE_ENDIAN
603 case 15: 639 case 15:
604 info->var.red.offset = 10; 640 info->var.red.offset = 10;
605 info->var.green.offset = 5; 641 info->var.green.offset = 5;
@@ -639,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev,
639 info->var.transp.offset = 24; 675 info->var.transp.offset = 24;
640 info->var.transp.length = 8; 676 info->var.transp.length = 8;
641 break; 677 break;
678#else
679 case 24:
680 info->var.red.offset = 8;
681 info->var.green.offset = 16;
682 info->var.blue.offset = 24;
683 info->var.red.length = 8;
684 info->var.green.length = 8;
685 info->var.blue.length = 8;
686 info->var.transp.offset = 0;
687 info->var.transp.length = 0;
688 break;
689 case 32:
690 info->var.red.offset = 8;
691 info->var.green.offset = 16;
692 info->var.blue.offset = 24;
693 info->var.red.length = 8;
694 info->var.green.length = 8;
695 info->var.blue.length = 8;
696 info->var.transp.offset = 0;
697 info->var.transp.length = 8;
698 break;
642 default: 699 default:
700#endif
643 break; 701 break;
644 } 702 }
645 703
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 96afbf5ae2ad..b4e48dd2e859 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -195,7 +195,7 @@ retry:
195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196 radeon_fence_signaled(fence), timeout); 196 radeon_fence_signaled(fence), timeout);
197 if (unlikely(r == -ERESTARTSYS)) { 197 if (unlikely(r == -ERESTARTSYS)) {
198 return -ERESTART; 198 return -EBUSY;
199 } 199 }
200 } else { 200 } else {
201 r = wait_event_timeout(rdev->fence_drv.queue, 201 r = wait_event_timeout(rdev->fence_drv.queue,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d343a15316ec..2977539880fb 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
177 return -ENOMEM; 177 return -ENOMEM;
178 } 178 }
179 rdev->gart.pages[p] = pagelist[i]; 179 rdev->gart.pages[p] = pagelist[i];
180 page_base = (uint32_t)rdev->gart.pages_addr[p]; 180 page_base = rdev->gart.pages_addr[p];
181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { 181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
182 radeon_gart_set_page(rdev, t, page_base); 182 radeon_gart_set_page(rdev, t, page_base);
183 page_base += 4096; 183 page_base += 4096;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index eb516034235d..cded5180c752 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
157 struct radeon_device *rdev = dev->dev_private; 157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data; 158 struct drm_radeon_gem_info *args = data;
159 159
160 args->vram_size = rdev->mc.vram_size; 160 args->vram_size = rdev->mc.real_vram_size;
161 /* FIXME: report somethings that makes sense */ 161 /* FIXME: report somethings that makes sense */
162 args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); 162 args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
163 args->gart_size = rdev->mc.gtt_size; 163 args->gart_size = rdev->mc.gtt_size;
164 return 0; 164 return 0;
165} 165}
@@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
285 mutex_unlock(&dev->struct_mutex); 285 mutex_unlock(&dev->struct_mutex);
286 return r; 286 return r;
287} 287}
288
289int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
290 struct drm_file *filp)
291{
292 struct drm_radeon_gem_set_tiling *args = data;
293 struct drm_gem_object *gobj;
294 struct radeon_object *robj;
295 int r = 0;
296
297 DRM_DEBUG("%d \n", args->handle);
298 gobj = drm_gem_object_lookup(dev, filp, args->handle);
299 if (gobj == NULL)
300 return -EINVAL;
301 robj = gobj->driver_private;
302 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
303 mutex_lock(&dev->struct_mutex);
304 drm_gem_object_unreference(gobj);
305 mutex_unlock(&dev->struct_mutex);
306 return r;
307}
308
309int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
310 struct drm_file *filp)
311{
312 struct drm_radeon_gem_get_tiling *args = data;
313 struct drm_gem_object *gobj;
314 struct radeon_object *robj;
315 int r = 0;
316
317 DRM_DEBUG("\n");
318 gobj = drm_gem_object_lookup(dev, filp, args->handle);
319 if (gobj == NULL)
320 return -EINVAL;
321 robj = gobj->driver_private;
322 radeon_object_get_tiling_flags(robj, &args->tiling_flags,
323 &args->pitch);
324 mutex_lock(&dev->struct_mutex);
325 drm_gem_object_unreference(gobj);
326 mutex_unlock(&dev->struct_mutex);
327 return r;
328}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4612a7c146d1..937a2f1cdb46 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -291,5 +291,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
291 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), 291 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
292 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), 292 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
293 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 293 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
294 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
295 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
294}; 296};
295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 297int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8086ecf7f03d..7d06dc98a42a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -29,6 +29,171 @@
29#include "radeon_fixed.h" 29#include "radeon_fixed.h"
30#include "radeon.h" 30#include "radeon.h"
31 31
32static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
33 struct drm_display_mode *mode,
34 struct drm_display_mode *adjusted_mode)
35{
36 struct drm_device *dev = crtc->dev;
37 struct radeon_device *rdev = dev->dev_private;
38 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
39 int xres = mode->hdisplay;
40 int yres = mode->vdisplay;
41 bool hscale = true, vscale = true;
42 int hsync_wid;
43 int vsync_wid;
44 int hsync_start;
45 int blank_width;
46 u32 scale, inc, crtc_more_cntl;
47 u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
48 u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
49 u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
50 struct radeon_native_mode *native_mode = &radeon_crtc->native_mode;
51
52 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
53 (RADEON_VERT_STRETCH_RESERVED |
54 RADEON_VERT_AUTO_RATIO_INC);
55 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
56 (RADEON_HORZ_FP_LOOP_STRETCH |
57 RADEON_HORZ_AUTO_RATIO_INC);
58
59 crtc_more_cntl = 0;
60 if ((rdev->family == CHIP_RS100) ||
61 (rdev->family == CHIP_RS200)) {
62 /* This is to workaround the asic bug for RMX, some versions
63 of BIOS dosen't have this register initialized correctly. */
64 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
65 }
66
67
68 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
69 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
70
71 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
72 if (!hsync_wid)
73 hsync_wid = 1;
74 hsync_start = mode->crtc_hsync_start - 8;
75
76 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
77 | ((hsync_wid & 0x3f) << 16)
78 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
79 ? RADEON_CRTC_H_SYNC_POL
80 : 0));
81
82 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
83 | ((mode->crtc_vdisplay - 1) << 16));
84
85 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
86 if (!vsync_wid)
87 vsync_wid = 1;
88
89 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
90 | ((vsync_wid & 0x1f) << 16)
91 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
92 ? RADEON_CRTC_V_SYNC_POL
93 : 0));
94
95 fp_horz_vert_active = 0;
96
97 if (native_mode->panel_xres == 0 ||
98 native_mode->panel_yres == 0) {
99 hscale = false;
100 vscale = false;
101 } else {
102 if (xres > native_mode->panel_xres)
103 xres = native_mode->panel_xres;
104 if (yres > native_mode->panel_yres)
105 yres = native_mode->panel_yres;
106
107 if (xres == native_mode->panel_xres)
108 hscale = false;
109 if (yres == native_mode->panel_yres)
110 vscale = false;
111 }
112
113 switch (radeon_crtc->rmx_type) {
114 case RMX_FULL:
115 case RMX_ASPECT:
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 break;
140 case RMX_CENTER:
141 fp_horz_stretch |= ((xres/8-1) << 16);
142 fp_vert_stretch |= ((yres-1) << 12);
143
144 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
145 RADEON_CRTC_AUTO_VERT_CENTER_EN);
146
147 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
148 if (blank_width > 110)
149 blank_width = 110;
150
151 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
152 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
153
154 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
155 if (!hsync_wid)
156 hsync_wid = 1;
157
158 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
159 | ((hsync_wid & 0x3f) << 16)
160 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
161 ? RADEON_CRTC_H_SYNC_POL
162 : 0));
163
164 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
165 | ((mode->crtc_vdisplay - 1) << 16));
166
167 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
168 if (!vsync_wid)
169 vsync_wid = 1;
170
171 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
172 | ((vsync_wid & 0x1f) << 16)
173 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
174 ? RADEON_CRTC_V_SYNC_POL
175 : 0)));
176
177 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
178 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
179 break;
180 case RMX_OFF:
181 default:
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 break;
185 }
186
187 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
188 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
189 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
190 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
191 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
192 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
193 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
194 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
195}
196
32void radeon_restore_common_regs(struct drm_device *dev) 197void radeon_restore_common_regs(struct drm_device *dev)
33{ 198{
34 /* don't need this yet */ 199 /* don't need this yet */
@@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
235 uint64_t base; 400 uint64_t base;
236 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; 401 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
237 uint32_t crtc_pitch, pitch_pixels; 402 uint32_t crtc_pitch, pitch_pixels;
403 uint32_t tiling_flags;
238 404
239 DRM_DEBUG("\n"); 405 DRM_DEBUG("\n");
240 406
@@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
244 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { 410 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
245 return -EINVAL; 411 return -EINVAL;
246 } 412 }
247 crtc_offset = (u32)base; 413 /* if scanout was in GTT this really wouldn't work */
414 /* crtc offset is from display base addr not FB location */
415 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
416
417 base -= radeon_crtc->legacy_display_base_addr;
418
248 crtc_offset_cntl = 0; 419 crtc_offset_cntl = 0;
249 420
250 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); 421 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
@@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
253 (crtc->fb->bits_per_pixel * 8)); 424 (crtc->fb->bits_per_pixel * 8));
254 crtc_pitch |= crtc_pitch << 16; 425 crtc_pitch |= crtc_pitch << 16;
255 426
256 /* TODO tiling */ 427 radeon_object_get_tiling_flags(obj->driver_private,
257 if (0) { 428 &tiling_flags, NULL);
429 if (tiling_flags & RADEON_TILING_MICRO)
430 DRM_ERROR("trying to scanout microtiled buffer\n");
431
432 if (tiling_flags & RADEON_TILING_MACRO) {
258 if (ASIC_IS_R300(rdev)) 433 if (ASIC_IS_R300(rdev))
259 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | 434 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
260 R300_CRTC_MICRO_TILE_BUFFER_DIS | 435 R300_CRTC_MICRO_TILE_BUFFER_DIS |
@@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
270 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; 445 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
271 } 446 }
272 447
273 448 if (tiling_flags & RADEON_TILING_MACRO) {
274 /* TODO more tiling */
275 if (0) {
276 if (ASIC_IS_R300(rdev)) { 449 if (ASIC_IS_R300(rdev)) {
277 crtc_tile_x0_y0 = x | (y << 16); 450 crtc_tile_x0_y0 = x | (y << 16);
278 base &= ~0x7ff; 451 base &= ~0x7ff;
279 } else { 452 } else {
280 int byteshift = crtc->fb->bits_per_pixel >> 4; 453 int byteshift = crtc->fb->bits_per_pixel >> 4;
281 int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; 454 int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11;
282 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); 455 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
283 crtc_offset_cntl |= (y % 16); 456 crtc_offset_cntl |= (y % 16);
284 } 457 }
@@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
303 476
304 base &= ~7; 477 base &= ~7;
305 478
306 /* update sarea TODO */
307
308 crtc_offset = (u32)base; 479 crtc_offset = (u32)base;
309 480
310 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); 481 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
311 482
312 if (ASIC_IS_R300(rdev)) { 483 if (ASIC_IS_R300(rdev)) {
313 if (radeon_crtc->crtc_id) 484 if (radeon_crtc->crtc_id)
@@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
751 struct drm_display_mode *mode, 922 struct drm_display_mode *mode,
752 struct drm_display_mode *adjusted_mode) 923 struct drm_display_mode *adjusted_mode)
753{ 924{
925 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
926 return false;
754 return true; 927 return true;
755} 928}
756 929
@@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
759 struct drm_display_mode *adjusted_mode, 932 struct drm_display_mode *adjusted_mode,
760 int x, int y, struct drm_framebuffer *old_fb) 933 int x, int y, struct drm_framebuffer *old_fb)
761{ 934{
762 935 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
763 DRM_DEBUG("\n"); 936 struct drm_device *dev = crtc->dev;
937 struct radeon_device *rdev = dev->dev_private;
764 938
765 /* TODO TV */ 939 /* TODO TV */
766
767 radeon_crtc_set_base(crtc, x, y, old_fb); 940 radeon_crtc_set_base(crtc, x, y, old_fb);
768 radeon_set_crtc_timing(crtc, adjusted_mode); 941 radeon_set_crtc_timing(crtc, adjusted_mode);
769 radeon_set_pll(crtc, adjusted_mode); 942 radeon_set_pll(crtc, adjusted_mode);
770 radeon_init_disp_bandwidth(crtc->dev); 943 radeon_bandwidth_update(rdev);
771 944 if (radeon_crtc->crtc_id == 0) {
945 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
946 } else {
947 if (radeon_crtc->rmx_type != RMX_OFF) {
948 /* FIXME: only first crtc has rmx what should we
949 * do ?
950 */
951 DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
952 }
953 }
772 return 0; 954 return 0;
773} 955}
774 956
@@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev,
799 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; 981 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
800 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); 982 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
801} 983}
802
803void radeon_init_disp_bw_legacy(struct drm_device *dev,
804 struct drm_display_mode *mode1,
805 uint32_t pixel_bytes1,
806 struct drm_display_mode *mode2,
807 uint32_t pixel_bytes2)
808{
809 struct radeon_device *rdev = dev->dev_private;
810 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
811 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
812 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
813 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
814 fixed20_12 memtcas_ff[8] = {
815 fixed_init(1),
816 fixed_init(2),
817 fixed_init(3),
818 fixed_init(0),
819 fixed_init_half(1),
820 fixed_init_half(2),
821 fixed_init(0),
822 };
823 fixed20_12 memtcas_rs480_ff[8] = {
824 fixed_init(0),
825 fixed_init(1),
826 fixed_init(2),
827 fixed_init(3),
828 fixed_init(0),
829 fixed_init_half(1),
830 fixed_init_half(2),
831 fixed_init_half(3),
832 };
833 fixed20_12 memtcas2_ff[8] = {
834 fixed_init(0),
835 fixed_init(1),
836 fixed_init(2),
837 fixed_init(3),
838 fixed_init(4),
839 fixed_init(5),
840 fixed_init(6),
841 fixed_init(7),
842 };
843 fixed20_12 memtrbs[8] = {
844 fixed_init(1),
845 fixed_init_half(1),
846 fixed_init(2),
847 fixed_init_half(2),
848 fixed_init(3),
849 fixed_init_half(3),
850 fixed_init(4),
851 fixed_init_half(4)
852 };
853 fixed20_12 memtrbs_r4xx[8] = {
854 fixed_init(4),
855 fixed_init(5),
856 fixed_init(6),
857 fixed_init(7),
858 fixed_init(8),
859 fixed_init(9),
860 fixed_init(10),
861 fixed_init(11)
862 };
863 fixed20_12 min_mem_eff;
864 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
865 fixed20_12 cur_latency_mclk, cur_latency_sclk;
866 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
867 disp_drain_rate2, read_return_rate;
868 fixed20_12 time_disp1_drop_priority;
869 int c;
870 int cur_size = 16; /* in octawords */
871 int critical_point = 0, critical_point2;
872/* uint32_t read_return_rate, time_disp1_drop_priority; */
873 int stop_req, max_stop_req;
874
875 min_mem_eff.full = rfixed_const_8(0);
876 /* get modes */
877 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
878 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
879 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
880 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
881 /* check crtc enables */
882 if (mode2)
883 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
884 if (mode1)
885 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
886 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
887 }
888
889 /*
890 * determine is there is enough bw for current mode
891 */
892 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
893 temp_ff.full = rfixed_const(100);
894 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
895 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
896 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
897
898 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
899 temp_ff.full = rfixed_const(temp);
900 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
901
902 pix_clk.full = 0;
903 pix_clk2.full = 0;
904 peak_disp_bw.full = 0;
905 if (mode1) {
906 temp_ff.full = rfixed_const(1000);
907 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
908 pix_clk.full = rfixed_div(pix_clk, temp_ff);
909 temp_ff.full = rfixed_const(pixel_bytes1);
910 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
911 }
912 if (mode2) {
913 temp_ff.full = rfixed_const(1000);
914 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
915 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
916 temp_ff.full = rfixed_const(pixel_bytes2);
917 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
918 }
919
920 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
921 if (peak_disp_bw.full >= mem_bw.full) {
922 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
923 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
924 }
925
926 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
927 temp = RREG32(RADEON_MEM_TIMING_CNTL);
928 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
929 mem_trcd = ((temp >> 2) & 0x3) + 1;
930 mem_trp = ((temp & 0x3)) + 1;
931 mem_tras = ((temp & 0x70) >> 4) + 1;
932 } else if (rdev->family == CHIP_R300 ||
933 rdev->family == CHIP_R350) { /* r300, r350 */
934 mem_trcd = (temp & 0x7) + 1;
935 mem_trp = ((temp >> 8) & 0x7) + 1;
936 mem_tras = ((temp >> 11) & 0xf) + 4;
937 } else if (rdev->family == CHIP_RV350 ||
938 rdev->family <= CHIP_RV380) {
939 /* rv3x0 */
940 mem_trcd = (temp & 0x7) + 3;
941 mem_trp = ((temp >> 8) & 0x7) + 3;
942 mem_tras = ((temp >> 11) & 0xf) + 6;
943 } else if (rdev->family == CHIP_R420 ||
944 rdev->family == CHIP_R423 ||
945 rdev->family == CHIP_RV410) {
946 /* r4xx */
947 mem_trcd = (temp & 0xf) + 3;
948 if (mem_trcd > 15)
949 mem_trcd = 15;
950 mem_trp = ((temp >> 8) & 0xf) + 3;
951 if (mem_trp > 15)
952 mem_trp = 15;
953 mem_tras = ((temp >> 12) & 0x1f) + 6;
954 if (mem_tras > 31)
955 mem_tras = 31;
956 } else { /* RV200, R200 */
957 mem_trcd = (temp & 0x7) + 1;
958 mem_trp = ((temp >> 8) & 0x7) + 1;
959 mem_tras = ((temp >> 12) & 0xf) + 4;
960 }
961 /* convert to FF */
962 trcd_ff.full = rfixed_const(mem_trcd);
963 trp_ff.full = rfixed_const(mem_trp);
964 tras_ff.full = rfixed_const(mem_tras);
965
966 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
967 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
968 data = (temp & (7 << 20)) >> 20;
969 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
970 if (rdev->family == CHIP_RS480) /* don't think rs400 */
971 tcas_ff = memtcas_rs480_ff[data];
972 else
973 tcas_ff = memtcas_ff[data];
974 } else
975 tcas_ff = memtcas2_ff[data];
976
977 if (rdev->family == CHIP_RS400 ||
978 rdev->family == CHIP_RS480) {
979 /* extra cas latency stored in bits 23-25 0-4 clocks */
980 data = (temp >> 23) & 0x7;
981 if (data < 5)
982 tcas_ff.full += rfixed_const(data);
983 }
984
985 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
986 /* on the R300, Tcas is included in Trbs.
987 */
988 temp = RREG32(RADEON_MEM_CNTL);
989 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
990 if (data == 1) {
991 if (R300_MEM_USE_CD_CH_ONLY & temp) {
992 temp = RREG32(R300_MC_IND_INDEX);
993 temp &= ~R300_MC_IND_ADDR_MASK;
994 temp |= R300_MC_READ_CNTL_CD_mcind;
995 WREG32(R300_MC_IND_INDEX, temp);
996 temp = RREG32(R300_MC_IND_DATA);
997 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
998 } else {
999 temp = RREG32(R300_MC_READ_CNTL_AB);
1000 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1001 }
1002 } else {
1003 temp = RREG32(R300_MC_READ_CNTL_AB);
1004 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1005 }
1006 if (rdev->family == CHIP_RV410 ||
1007 rdev->family == CHIP_R420 ||
1008 rdev->family == CHIP_R423)
1009 trbs_ff = memtrbs_r4xx[data];
1010 else
1011 trbs_ff = memtrbs[data];
1012 tcas_ff.full += trbs_ff.full;
1013 }
1014
1015 sclk_eff_ff.full = sclk_ff.full;
1016
1017 if (rdev->flags & RADEON_IS_AGP) {
1018 fixed20_12 agpmode_ff;
1019 agpmode_ff.full = rfixed_const(radeon_agpmode);
1020 temp_ff.full = rfixed_const_666(16);
1021 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
1022 }
1023 /* TODO PCIE lanes may affect this - agpmode == 16?? */
1024
1025 if (ASIC_IS_R300(rdev)) {
1026 sclk_delay_ff.full = rfixed_const(250);
1027 } else {
1028 if ((rdev->family == CHIP_RV100) ||
1029 rdev->flags & RADEON_IS_IGP) {
1030 if (rdev->mc.vram_is_ddr)
1031 sclk_delay_ff.full = rfixed_const(41);
1032 else
1033 sclk_delay_ff.full = rfixed_const(33);
1034 } else {
1035 if (rdev->mc.vram_width == 128)
1036 sclk_delay_ff.full = rfixed_const(57);
1037 else
1038 sclk_delay_ff.full = rfixed_const(41);
1039 }
1040 }
1041
1042 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
1043
1044 if (rdev->mc.vram_is_ddr) {
1045 if (rdev->mc.vram_width == 32) {
1046 k1.full = rfixed_const(40);
1047 c = 3;
1048 } else {
1049 k1.full = rfixed_const(20);
1050 c = 1;
1051 }
1052 } else {
1053 k1.full = rfixed_const(40);
1054 c = 3;
1055 }
1056
1057 temp_ff.full = rfixed_const(2);
1058 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
1059 temp_ff.full = rfixed_const(c);
1060 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
1061 temp_ff.full = rfixed_const(4);
1062 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
1063 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
1064 mc_latency_mclk.full += k1.full;
1065
1066 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
1067 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
1068
1069 /*
1070 HW cursor time assuming worst case of full size colour cursor.
1071 */
1072 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
1073 temp_ff.full += trcd_ff.full;
1074 if (temp_ff.full < tras_ff.full)
1075 temp_ff.full = tras_ff.full;
1076 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
1077
1078 temp_ff.full = rfixed_const(cur_size);
1079 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
1080 /*
1081 Find the total latency for the display data.
1082 */
1083 disp_latency_overhead.full = rfixed_const(80);
1084 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
1085 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
1086 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
1087
1088 if (mc_latency_mclk.full > mc_latency_sclk.full)
1089 disp_latency.full = mc_latency_mclk.full;
1090 else
1091 disp_latency.full = mc_latency_sclk.full;
1092
1093 /* setup Max GRPH_STOP_REQ default value */
1094 if (ASIC_IS_RV100(rdev))
1095 max_stop_req = 0x5c;
1096 else
1097 max_stop_req = 0x7c;
1098
1099 if (mode1) {
1100 /* CRTC1
1101 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
1102 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
1103 */
1104 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
1105
1106 if (stop_req > max_stop_req)
1107 stop_req = max_stop_req;
1108
1109 /*
1110 Find the drain rate of the display buffer.
1111 */
1112 temp_ff.full = rfixed_const((16/pixel_bytes1));
1113 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
1114
1115 /*
1116 Find the critical point of the display buffer.
1117 */
1118 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
1119 crit_point_ff.full += rfixed_const_half(0);
1120
1121 critical_point = rfixed_trunc(crit_point_ff);
1122
1123 if (rdev->disp_priority == 2) {
1124 critical_point = 0;
1125 }
1126
1127 /*
1128 The critical point should never be above max_stop_req-4. Setting
1129 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
1130 */
1131 if (max_stop_req - critical_point < 4)
1132 critical_point = 0;
1133
1134 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
1135 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
1136 critical_point = 0x10;
1137 }
1138
1139 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
1140 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
1141 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1142 temp &= ~(RADEON_GRPH_START_REQ_MASK);
1143 if ((rdev->family == CHIP_R350) &&
1144 (stop_req > 0x15)) {
1145 stop_req -= 0x10;
1146 }
1147 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1148 temp |= RADEON_GRPH_BUFFER_SIZE;
1149 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
1150 RADEON_GRPH_CRITICAL_AT_SOF |
1151 RADEON_GRPH_STOP_CNTL);
1152 /*
1153 Write the result into the register.
1154 */
1155 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1156 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1157
1158#if 0
1159 if ((rdev->family == CHIP_RS400) ||
1160 (rdev->family == CHIP_RS480)) {
1161 /* attempt to program RS400 disp regs correctly ??? */
1162 temp = RREG32(RS400_DISP1_REG_CNTL);
1163 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
1164 RS400_DISP1_STOP_REQ_LEVEL_MASK);
1165 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
1166 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1167 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1168 temp = RREG32(RS400_DMIF_MEM_CNTL1);
1169 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
1170 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
1171 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
1172 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
1173 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
1174 }
1175#endif
1176
1177 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
1178 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
1179 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
1180 }
1181
1182 if (mode2) {
1183 u32 grph2_cntl;
1184 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
1185
1186 if (stop_req > max_stop_req)
1187 stop_req = max_stop_req;
1188
1189 /*
1190 Find the drain rate of the display buffer.
1191 */
1192 temp_ff.full = rfixed_const((16/pixel_bytes2));
1193 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
1194
1195 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
1196 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
1197 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1198 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
1199 if ((rdev->family == CHIP_R350) &&
1200 (stop_req > 0x15)) {
1201 stop_req -= 0x10;
1202 }
1203 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1204 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
1205 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
1206 RADEON_GRPH_CRITICAL_AT_SOF |
1207 RADEON_GRPH_STOP_CNTL);
1208
1209 if ((rdev->family == CHIP_RS100) ||
1210 (rdev->family == CHIP_RS200))
1211 critical_point2 = 0;
1212 else {
1213 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
1214 temp_ff.full = rfixed_const(temp);
1215 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
1216 if (sclk_ff.full < temp_ff.full)
1217 temp_ff.full = sclk_ff.full;
1218
1219 read_return_rate.full = temp_ff.full;
1220
1221 if (mode1) {
1222 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
1223 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
1224 } else {
1225 time_disp1_drop_priority.full = 0;
1226 }
1227 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
1228 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
1229 crit_point_ff.full += rfixed_const_half(0);
1230
1231 critical_point2 = rfixed_trunc(crit_point_ff);
1232
1233 if (rdev->disp_priority == 2) {
1234 critical_point2 = 0;
1235 }
1236
1237 if (max_stop_req - critical_point2 < 4)
1238 critical_point2 = 0;
1239
1240 }
1241
1242 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
1243 /* some R300 cards have problem with this set to 0 */
1244 critical_point2 = 0x10;
1245 }
1246
1247 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1248 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1249
1250 if ((rdev->family == CHIP_RS400) ||
1251 (rdev->family == CHIP_RS480)) {
1252#if 0
1253 /* attempt to program RS400 disp2 regs correctly ??? */
1254 temp = RREG32(RS400_DISP2_REQ_CNTL1);
1255 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
1256 RS400_DISP2_STOP_REQ_LEVEL_MASK);
1257 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
1258 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1259 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1260 temp = RREG32(RS400_DISP2_REQ_CNTL2);
1261 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
1262 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
1263 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
1264 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
1265 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
1266#endif
1267 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
1268 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
1269 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
1270 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
1271 }
1272
1273 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
1274 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
1275 }
1276}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2c2f42de1d4c..34d0f58eb944 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -30,170 +30,6 @@
30#include "atom.h" 30#include "atom.h"
31 31
32 32
33static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
34 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode)
36{
37 struct drm_device *dev = encoder->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
40 int xres = mode->hdisplay;
41 int yres = mode->vdisplay;
42 bool hscale = true, vscale = true;
43 int hsync_wid;
44 int vsync_wid;
45 int hsync_start;
46 uint32_t scale, inc;
47 uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
48 uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
49 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
50
51 DRM_DEBUG("\n");
52
53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
54 (RADEON_VERT_STRETCH_RESERVED |
55 RADEON_VERT_AUTO_RATIO_INC);
56 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
57 (RADEON_HORZ_FP_LOOP_STRETCH |
58 RADEON_HORZ_AUTO_RATIO_INC);
59
60 crtc_more_cntl = 0;
61 if ((rdev->family == CHIP_RS100) ||
62 (rdev->family == CHIP_RS200)) {
63 /* This is to workaround the asic bug for RMX, some versions
64 of BIOS dosen't have this register initialized correctly. */
65 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
66 }
67
68
69 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
70 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
71
72 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
73 if (!hsync_wid)
74 hsync_wid = 1;
75 hsync_start = mode->crtc_hsync_start - 8;
76
77 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
78 | ((hsync_wid & 0x3f) << 16)
79 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
80 ? RADEON_CRTC_H_SYNC_POL
81 : 0));
82
83 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
84 | ((mode->crtc_vdisplay - 1) << 16));
85
86 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
87 if (!vsync_wid)
88 vsync_wid = 1;
89
90 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
91 | ((vsync_wid & 0x1f) << 16)
92 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
93 ? RADEON_CRTC_V_SYNC_POL
94 : 0));
95
96 fp_horz_vert_active = 0;
97
98 if (native_mode->panel_xres == 0 ||
99 native_mode->panel_yres == 0) {
100 hscale = false;
101 vscale = false;
102 } else {
103 if (xres > native_mode->panel_xres)
104 xres = native_mode->panel_xres;
105 if (yres > native_mode->panel_yres)
106 yres = native_mode->panel_yres;
107
108 if (xres == native_mode->panel_xres)
109 hscale = false;
110 if (yres == native_mode->panel_yres)
111 vscale = false;
112 }
113
114 if (radeon_encoder->flags & RADEON_USE_RMX) {
115 if (radeon_encoder->rmx_type != RMX_CENTER) {
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
140 int blank_width;
141
142 fp_horz_stretch |= ((xres/8-1) << 16);
143 fp_vert_stretch |= ((yres-1) << 12);
144
145 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
146 RADEON_CRTC_AUTO_VERT_CENTER_EN);
147
148 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
149 if (blank_width > 110)
150 blank_width = 110;
151
152 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
153 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
154
155 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
156 if (!hsync_wid)
157 hsync_wid = 1;
158
159 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
160 | ((hsync_wid & 0x3f) << 16)
161 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
162 ? RADEON_CRTC_H_SYNC_POL
163 : 0));
164
165 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
166 | ((mode->crtc_vdisplay - 1) << 16));
167
168 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
169 if (!vsync_wid)
170 vsync_wid = 1;
171
172 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
173 | ((vsync_wid & 0x1f) << 16)
174 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
175 ? RADEON_CRTC_V_SYNC_POL
176 : 0)));
177
178 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
179 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
180 }
181 } else {
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 }
185
186 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
187 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
188 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
189 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
190 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
191 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
192 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
193 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
194
195}
196
197static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) 33static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
198{ 34{
199 struct drm_device *dev = encoder->dev; 35 struct drm_device *dev = encoder->dev;
@@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
287 123
288 DRM_DEBUG("\n"); 124 DRM_DEBUG("\n");
289 125
290 if (radeon_crtc->crtc_id == 0)
291 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
292
293 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); 126 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
294 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; 127 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
295 128
@@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
318 151
319 if (radeon_crtc->crtc_id == 0) { 152 if (radeon_crtc->crtc_id == 0) {
320 if (ASIC_IS_R300(rdev)) { 153 if (ASIC_IS_R300(rdev)) {
321 if (radeon_encoder->flags & RADEON_USE_RMX) 154 if (radeon_encoder->rmx_type != RMX_OFF)
322 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; 155 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
323 } else 156 } else
324 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; 157 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
@@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
350 183
351 drm_mode_set_crtcinfo(adjusted_mode, 0); 184 drm_mode_set_crtcinfo(adjusted_mode, 0);
352 185
353 radeon_encoder->flags &= ~RADEON_USE_RMX;
354
355 if (radeon_encoder->rmx_type != RMX_OFF) 186 if (radeon_encoder->rmx_type != RMX_OFF)
356 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); 187 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
357 188
@@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
455 286
456 DRM_DEBUG("\n"); 287 DRM_DEBUG("\n");
457 288
458 if (radeon_crtc->crtc_id == 0)
459 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
460
461 if (radeon_crtc->crtc_id == 0) { 289 if (radeon_crtc->crtc_id == 0) {
462 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { 290 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
463 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & 291 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
@@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
653 481
654 DRM_DEBUG("\n"); 482 DRM_DEBUG("\n");
655 483
656 if (radeon_crtc->crtc_id == 0)
657 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
658
659 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); 484 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
660 tmp &= 0xfffff; 485 tmp &= 0xfffff;
661 if (rdev->family == CHIP_RV280) { 486 if (rdev->family == CHIP_RV280) {
@@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
711 if (radeon_crtc->crtc_id == 0) { 536 if (radeon_crtc->crtc_id == 0) {
712 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { 537 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
713 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; 538 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
714 if (radeon_encoder->flags & RADEON_USE_RMX) 539 if (radeon_encoder->rmx_type != RMX_OFF)
715 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; 540 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
716 else 541 else
717 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; 542 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
@@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
820 645
821 DRM_DEBUG("\n"); 646 DRM_DEBUG("\n");
822 647
823 if (radeon_crtc->crtc_id == 0)
824 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
825
826 if (rdev->is_atom_bios) { 648 if (rdev->is_atom_bios) {
827 radeon_encoder->pixel_clock = adjusted_mode->clock; 649 radeon_encoder->pixel_clock = adjusted_mode->clock;
828 atombios_external_tmds_setup(encoder, ATOM_ENABLE); 650 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
@@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
856 if (radeon_crtc->crtc_id == 0) { 678 if (radeon_crtc->crtc_id == 0) {
857 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { 679 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
858 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; 680 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
859 if (radeon_encoder->flags & RADEON_USE_RMX) 681 if (radeon_encoder->rmx_type != RMX_OFF)
860 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; 682 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
861 else 683 else
862 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; 684 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
@@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
1014 836
1015 DRM_DEBUG("\n"); 837 DRM_DEBUG("\n");
1016 838
1017 if (radeon_crtc->crtc_id == 0)
1018 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
1019
1020 if (rdev->family != CHIP_R200) { 839 if (rdev->family != CHIP_R200) {
1021 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 840 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1022 if (rdev->family == CHIP_R420 || 841 if (rdev->family == CHIP_R420 ||
@@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1243 1062
1244 radeon_encoder->encoder_id = encoder_id; 1063 radeon_encoder->encoder_id = encoder_id;
1245 radeon_encoder->devices = supported_device; 1064 radeon_encoder->devices = supported_device;
1065 radeon_encoder->rmx_type = RMX_OFF;
1246 1066
1247 switch (radeon_encoder->encoder_id) { 1067 switch (radeon_encoder->encoder_id) {
1248 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1068 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9173b687462b..3b09a1f2d8f9 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -36,6 +36,9 @@
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/i2c-id.h> 37#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include "radeon_fixed.h"
40
41struct radeon_device;
39 42
40#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) 43#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
41#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) 44#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
@@ -124,6 +127,7 @@ struct radeon_tmds_pll {
124#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) 127#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) 128#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
127 131
128struct radeon_pll { 132struct radeon_pll {
129 uint16_t reference_freq; 133 uint16_t reference_freq;
@@ -170,6 +174,18 @@ struct radeon_mode_info {
170 struct atom_context *atom_context; 174 struct atom_context *atom_context;
171 enum radeon_connector_table connector_table; 175 enum radeon_connector_table connector_table;
172 bool mode_config_initialized; 176 bool mode_config_initialized;
177 struct radeon_crtc *crtcs[2];
178};
179
180struct radeon_native_mode {
181 /* preferred mode */
182 uint32_t panel_xres, panel_yres;
183 uint32_t hoverplus, hsync_width;
184 uint32_t hblank;
185 uint32_t voverplus, vsync_width;
186 uint32_t vblank;
187 uint32_t dotclock;
188 uint32_t flags;
173}; 189};
174 190
175struct radeon_crtc { 191struct radeon_crtc {
@@ -185,19 +201,13 @@ struct radeon_crtc {
185 uint64_t cursor_addr; 201 uint64_t cursor_addr;
186 int cursor_width; 202 int cursor_width;
187 int cursor_height; 203 int cursor_height;
188}; 204 uint32_t legacy_display_base_addr;
189 205 uint32_t legacy_cursor_offset;
190#define RADEON_USE_RMX 1 206 enum radeon_rmx_type rmx_type;
191 207 uint32_t devices;
192struct radeon_native_mode { 208 fixed20_12 vsc;
193 /* preferred mode */ 209 fixed20_12 hsc;
194 uint32_t panel_xres, panel_yres; 210 struct radeon_native_mode native_mode;
195 uint32_t hoverplus, hsync_width;
196 uint32_t hblank;
197 uint32_t voverplus, vsync_width;
198 uint32_t vblank;
199 uint32_t dotclock;
200 uint32_t flags;
201}; 211};
202 212
203struct radeon_encoder_primary_dac { 213struct radeon_encoder_primary_dac {
@@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder);
383void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 393void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
384void radeon_combios_asic_init(struct drm_device *dev); 394void radeon_combios_asic_init(struct drm_device *dev);
385extern int radeon_static_clocks_init(struct drm_device *dev); 395extern int radeon_static_clocks_init(struct drm_device *dev);
386void radeon_init_disp_bw_legacy(struct drm_device *dev, 396bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
387 struct drm_display_mode *mode1, 397 struct drm_display_mode *mode,
388 uint32_t pixel_bytes1, 398 struct drm_display_mode *adjusted_mode);
389 struct drm_display_mode *mode2, 399void atom_rv515_force_tv_scaler(struct radeon_device *rdev);
390 uint32_t pixel_bytes2);
391void radeon_init_disp_bw_avivo(struct drm_device *dev,
392 struct drm_display_mode *mode1,
393 uint32_t pixel_bytes1,
394 struct drm_display_mode *mode2,
395 uint32_t pixel_bytes2);
396void radeon_init_disp_bandwidth(struct drm_device *dev);
397 400
398#endif 401#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bac0d06c52ac..dd9ac2fed6d6 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -44,6 +44,9 @@ struct radeon_object {
44 uint64_t gpu_addr; 44 uint64_t gpu_addr;
45 void *kptr; 45 void *kptr;
46 bool is_iomem; 46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
47}; 50};
48 51
49int radeon_ttm_init(struct radeon_device *rdev); 52int radeon_ttm_init(struct radeon_device *rdev);
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
70 73
71 robj = container_of(tobj, struct radeon_object, tobj); 74 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list); 75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
73 kfree(robj); 77 kfree(robj);
74} 78}
75 79
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{ 103{
100 uint32_t flags = 0; 104 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) { 105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM; 106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
103 } 107 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) { 108 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT; 109 flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
106 } 110 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) { 111 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM; 112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
109 } 113 }
110 if (!flags) { 114 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM; 115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
112 } 116 }
113 return flags; 117 return flags;
114} 118}
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev,
141 } 145 }
142 robj->rdev = rdev; 146 robj->rdev = rdev;
143 robj->gobj = gobj; 147 robj->gobj = gobj;
148 robj->surface_reg = -1;
144 INIT_LIST_HEAD(&robj->list); 149 INIT_LIST_HEAD(&robj->list);
145 150
146 flags = radeon_object_flags_from_domain(domain); 151 flags = radeon_object_flags_from_domain(domain);
@@ -304,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj)
304 } 309 }
305 spin_lock(&robj->tobj.lock); 310 spin_lock(&robj->tobj.lock);
306 if (robj->tobj.sync_obj) { 311 if (robj->tobj.sync_obj) {
307 r = ttm_bo_wait(&robj->tobj, true, false, false); 312 r = ttm_bo_wait(&robj->tobj, true, true, false);
308 } 313 }
309 spin_unlock(&robj->tobj.lock); 314 spin_unlock(&robj->tobj.lock);
310 radeon_object_unreserve(robj); 315 radeon_object_unreserve(robj);
@@ -403,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
403 struct radeon_object *robj; 408 struct radeon_object *robj;
404 struct radeon_fence *old_fence = NULL; 409 struct radeon_fence *old_fence = NULL;
405 struct list_head *i; 410 struct list_head *i;
406 uint32_t flags;
407 int r; 411 int r;
408 412
409 r = radeon_object_list_reserve(head); 413 r = radeon_object_list_reserve(head);
@@ -414,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
414 list_for_each(i, head) { 418 list_for_each(i, head) {
415 lobj = list_entry(i, struct radeon_object_list, list); 419 lobj = list_entry(i, struct radeon_object_list, list);
416 robj = lobj->robj; 420 robj = lobj->robj;
417 if (lobj->wdomain) {
418 flags = radeon_object_flags_from_domain(lobj->wdomain);
419 flags |= TTM_PL_FLAG_TT;
420 } else {
421 flags = radeon_object_flags_from_domain(lobj->rdomain);
422 flags |= TTM_PL_FLAG_TT;
423 flags |= TTM_PL_FLAG_VRAM;
424 }
425 if (!robj->pin_count) { 421 if (!robj->pin_count) {
426 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; 422 if (lobj->wdomain) {
423 robj->tobj.proposed_placement =
424 radeon_object_flags_from_domain(lobj->wdomain);
425 } else {
426 robj->tobj.proposed_placement =
427 radeon_object_flags_from_domain(lobj->rdomain);
428 }
427 r = ttm_buffer_object_validate(&robj->tobj, 429 r = ttm_buffer_object_validate(&robj->tobj,
428 robj->tobj.proposed_placement, 430 robj->tobj.proposed_placement,
429 true, false); 431 true, false);
430 if (unlikely(r)) { 432 if (unlikely(r)) {
431 radeon_object_list_unreserve(head);
432 DRM_ERROR("radeon: failed to validate.\n"); 433 DRM_ERROR("radeon: failed to validate.\n");
433 return r; 434 return r;
434 } 435 }
435 radeon_object_gpu_addr(robj); 436 radeon_object_gpu_addr(robj);
436 } 437 }
437 lobj->gpu_offset = robj->gpu_addr; 438 lobj->gpu_offset = robj->gpu_addr;
439 lobj->tiling_flags = robj->tiling_flags;
438 if (fence) { 440 if (fence) {
439 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 441 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
440 robj->tobj.sync_obj = radeon_fence_ref(fence); 442 robj->tobj.sync_obj = radeon_fence_ref(fence);
@@ -479,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj)
479{ 481{
480 return robj->tobj.num_pages << PAGE_SHIFT; 482 return robj->tobj.num_pages << PAGE_SHIFT;
481} 483}
484
485int radeon_object_get_surface_reg(struct radeon_object *robj)
486{
487 struct radeon_device *rdev = robj->rdev;
488 struct radeon_surface_reg *reg;
489 struct radeon_object *old_object;
490 int steal;
491 int i;
492
493 if (!robj->tiling_flags)
494 return 0;
495
496 if (robj->surface_reg >= 0) {
497 reg = &rdev->surface_regs[robj->surface_reg];
498 i = robj->surface_reg;
499 goto out;
500 }
501
502 steal = -1;
503 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
504
505 reg = &rdev->surface_regs[i];
506 if (!reg->robj)
507 break;
508
509 old_object = reg->robj;
510 if (old_object->pin_count == 0)
511 steal = i;
512 }
513
514 /* if we are all out */
515 if (i == RADEON_GEM_MAX_SURFACES) {
516 if (steal == -1)
517 return -ENOMEM;
518 /* find someone with a surface reg and nuke their BO */
519 reg = &rdev->surface_regs[steal];
520 old_object = reg->robj;
521 /* blow away the mapping */
522 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
523 ttm_bo_unmap_virtual(&old_object->tobj);
524 old_object->surface_reg = -1;
525 i = steal;
526 }
527
528 robj->surface_reg = i;
529 reg->robj = robj;
530
531out:
532 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
533 robj->tobj.mem.mm_node->start << PAGE_SHIFT,
534 robj->tobj.num_pages << PAGE_SHIFT);
535 return 0;
536}
537
538void radeon_object_clear_surface_reg(struct radeon_object *robj)
539{
540 struct radeon_device *rdev = robj->rdev;
541 struct radeon_surface_reg *reg;
542
543 if (robj->surface_reg == -1)
544 return;
545
546 reg = &rdev->surface_regs[robj->surface_reg];
547 radeon_clear_surface_reg(rdev, robj->surface_reg);
548
549 reg->robj = NULL;
550 robj->surface_reg = -1;
551}
552
553void radeon_object_set_tiling_flags(struct radeon_object *robj,
554 uint32_t tiling_flags, uint32_t pitch)
555{
556 robj->tiling_flags = tiling_flags;
557 robj->pitch = pitch;
558}
559
560void radeon_object_get_tiling_flags(struct radeon_object *robj,
561 uint32_t *tiling_flags,
562 uint32_t *pitch)
563{
564 if (tiling_flags)
565 *tiling_flags = robj->tiling_flags;
566 if (pitch)
567 *pitch = robj->pitch;
568}
569
570int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
571 bool force_drop)
572{
573 if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
574 return 0;
575
576 if (force_drop) {
577 radeon_object_clear_surface_reg(robj);
578 return 0;
579 }
580
581 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
582 if (!has_moved)
583 return 0;
584
585 if (robj->surface_reg >= 0)
586 radeon_object_clear_surface_reg(robj);
587 return 0;
588 }
589
590 if ((robj->surface_reg >= 0) && !has_moved)
591 return 0;
592
593 return radeon_object_get_surface_reg(robj);
594}
595
596void radeon_bo_move_notify(struct ttm_buffer_object *bo,
597 struct ttm_mem_reg *mem)
598{
599 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
600 radeon_object_check_tiling(robj, 0, 1);
601}
602
603void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
604{
605 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
606 radeon_object_check_tiling(robj, 0, 0);
607}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index a853261d1881..60d159308b88 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
126 } 126 }
127} 127}
128 128
129static void radeon_ib_cpu_flush(struct radeon_device *rdev,
130 struct radeon_ib *ib)
131{
132 unsigned long tmp;
133 unsigned i;
134
135 /* To force CPU cache flush ugly but seems reliable */
136 for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
137 tmp = readl(&ib->ptr[i]);
138 }
139}
140
141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 129int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142{ 130{
143 int r = 0; 131 int r = 0;
144 132
145 mutex_lock(&rdev->ib_pool.mutex); 133 mutex_lock(&rdev->ib_pool.mutex);
146 radeon_ib_align(rdev, ib); 134 radeon_ib_align(rdev, ib);
147 radeon_ib_cpu_flush(rdev, ib);
148 if (!ib->length_dw || !rdev->cp.ready) { 135 if (!ib->length_dw || !rdev->cp.ready) {
149 /* TODO: Nothings in the ib we should report. */ 136 /* TODO: Nothings in the ib we should report. */
150 mutex_unlock(&rdev->ib_pool.mutex); 137 mutex_unlock(&rdev->ib_pool.mutex);
151 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
152 return -EINVAL; 139 return -EINVAL;
153 } 140 }
154 /* 64 dwords should be enought for fence too */ 141 /* 64 dwords should be enough for fence too */
155 r = radeon_ring_lock(rdev, 64); 142 r = radeon_ring_lock(rdev, 64);
156 if (r) { 143 if (r) {
157 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); 144 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h
new file mode 100644
index 000000000000..63a773578f17
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_share.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_SHARE_H__
29#define __RADEON_SHARE_H__
30
31void r100_vram_init_sizes(struct radeon_device *rdev);
32
33void rs690_line_buffer_adjust(struct radeon_device *rdev,
34 struct drm_display_mode *mode1,
35 struct drm_display_mode *mode2);
36
37void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
38
39#endif
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
new file mode 100644
index 000000000000..03c33cf4e14c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2009 VMware, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Michel Dänzer
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
29
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev)
32{
33 struct radeon_object *vram_obj = NULL;
34 struct radeon_object **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size;
38 int r;
39
40 size = 1024 * 1024;
41
42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size
44 */
45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 -
46 rdev->cp.ring_size) / size;
47
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) {
50 DRM_ERROR("Failed to allocate %d pointers\n", n);
51 r = 1;
52 goto out_cleanup;
53 }
54
55 r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
56 false, &vram_obj);
57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n");
59 goto out_cleanup;
60 }
61
62 r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
63 if (r) {
64 DRM_ERROR("Failed to pin VRAM object\n");
65 goto out_cleanup;
66 }
67
68 for (i = 0; i < n; i++) {
69 void *gtt_map, *vram_map;
70 void **gtt_start, **gtt_end;
71 void **vram_start, **vram_end;
72
73 r = radeon_object_create(rdev, NULL, size, true,
74 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
75 if (r) {
76 DRM_ERROR("Failed to create GTT object %d\n", i);
77 goto out_cleanup;
78 }
79
80 r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
81 if (r) {
82 DRM_ERROR("Failed to pin GTT object %d\n", i);
83 goto out_cleanup;
84 }
85
86 r = radeon_object_kmap(gtt_obj[i], &gtt_map);
87 if (r) {
88 DRM_ERROR("Failed to map GTT object %d\n", i);
89 goto out_cleanup;
90 }
91
92 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
93 gtt_start < gtt_end;
94 gtt_start++)
95 *gtt_start = gtt_start;
96
97 radeon_object_kunmap(gtt_obj[i]);
98
99 r = radeon_fence_create(rdev, &fence);
100 if (r) {
101 DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
102 goto out_cleanup;
103 }
104
105 r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence);
106 if (r) {
107 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
108 goto out_cleanup;
109 }
110
111 r = radeon_fence_wait(fence, false);
112 if (r) {
113 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
114 goto out_cleanup;
115 }
116
117 radeon_fence_unref(&fence);
118
119 r = radeon_object_kmap(vram_obj, &vram_map);
120 if (r) {
121 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
122 goto out_cleanup;
123 }
124
125 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
126 vram_start = vram_map, vram_end = vram_map + size;
127 vram_start < vram_end;
128 gtt_start++, vram_start++) {
129 if (*vram_start != gtt_start) {
130 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
131 "expected 0x%p (GTT map 0x%p-0x%p)\n",
132 i, *vram_start, gtt_start, gtt_map,
133 gtt_end);
134 radeon_object_kunmap(vram_obj);
135 goto out_cleanup;
136 }
137 *vram_start = vram_start;
138 }
139
140 radeon_object_kunmap(vram_obj);
141
142 r = radeon_fence_create(rdev, &fence);
143 if (r) {
144 DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
145 goto out_cleanup;
146 }
147
148 r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence);
149 if (r) {
150 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
151 goto out_cleanup;
152 }
153
154 r = radeon_fence_wait(fence, false);
155 if (r) {
156 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
157 goto out_cleanup;
158 }
159
160 radeon_fence_unref(&fence);
161
162 r = radeon_object_kmap(gtt_obj[i], &gtt_map);
163 if (r) {
164 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
165 goto out_cleanup;
166 }
167
168 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
169 vram_start = vram_map, vram_end = vram_map + size;
170 gtt_start < gtt_end;
171 gtt_start++, vram_start++) {
172 if (*gtt_start != vram_start) {
173 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
174 "expected 0x%p (VRAM map 0x%p-0x%p)\n",
175 i, *gtt_start, vram_start, vram_map,
176 vram_end);
177 radeon_object_kunmap(gtt_obj[i]);
178 goto out_cleanup;
179 }
180 }
181
182 radeon_object_kunmap(gtt_obj[i]);
183
184 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
185 gtt_addr - rdev->mc.gtt_location);
186 }
187
188out_cleanup:
189 if (vram_obj) {
190 radeon_object_unpin(vram_obj);
191 radeon_object_unref(&vram_obj);
192 }
193 if (gtt_obj) {
194 for (i = 0; i < n; i++) {
195 if (gtt_obj[i]) {
196 radeon_object_unpin(gtt_obj[i]);
197 radeon_object_unref(&gtt_obj[i]);
198 }
199 }
200 kfree(gtt_obj);
201 }
202 if (fence) {
203 radeon_fence_unref(&fence);
204 }
205 if (r) {
206 printk(KERN_WARNING "Error while testing BO move.\n");
207 }
208}
209
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1227a97f5169..15c3531377ed 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
355 if (!rdev->cp.ready) { 355 if (!rdev->cp.ready) {
356 /* use memcpy */ 356 /* use memcpy */
357 DRM_ERROR("CP is not ready use memcpy.\n"); 357 DRM_ERROR("CP is not ready use memcpy.\n");
358 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 358 goto memcpy;
359 } 359 }
360 360
361 if (old_mem->mem_type == TTM_PL_VRAM && 361 if (old_mem->mem_type == TTM_PL_VRAM &&
362 new_mem->mem_type == TTM_PL_SYSTEM) { 362 new_mem->mem_type == TTM_PL_SYSTEM) {
363 return radeon_move_vram_ram(bo, evict, interruptible, 363 r = radeon_move_vram_ram(bo, evict, interruptible,
364 no_wait, new_mem); 364 no_wait, new_mem);
365 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 365 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
366 new_mem->mem_type == TTM_PL_VRAM) { 366 new_mem->mem_type == TTM_PL_VRAM) {
367 return radeon_move_ram_vram(bo, evict, interruptible, 367 r = radeon_move_ram_vram(bo, evict, interruptible,
368 no_wait, new_mem); 368 no_wait, new_mem);
369 } else { 369 } else {
370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
371 if (unlikely(r)) {
372 return r;
373 }
374 } 371 }
372
373 if (r) {
374memcpy:
375 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
376 }
377
375 return r; 378 return r;
376} 379}
377 380
@@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
429 .sync_obj_flush = &radeon_sync_obj_flush, 432 .sync_obj_flush = &radeon_sync_obj_flush,
430 .sync_obj_unref = &radeon_sync_obj_unref, 433 .sync_obj_unref = &radeon_sync_obj_unref,
431 .sync_obj_ref = &radeon_sync_obj_ref, 434 .sync_obj_ref = &radeon_sync_obj_ref,
435 .move_notify = &radeon_bo_move_notify,
436 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
432}; 437};
433 438
434int radeon_ttm_init(struct radeon_device *rdev) 439int radeon_ttm_init(struct radeon_device *rdev)
@@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev)
442 /* No others user of address space so set it to 0 */ 447 /* No others user of address space so set it to 0 */
443 r = ttm_bo_device_init(&rdev->mman.bdev, 448 r = ttm_bo_device_init(&rdev->mman.bdev,
444 rdev->mman.mem_global_ref.object, 449 rdev->mman.mem_global_ref.object,
445 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); 450 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
451 rdev->need_dma32);
446 if (r) { 452 if (r) {
447 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 453 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
448 return r; 454 return r;
449 } 455 }
450 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, 456 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
451 ((rdev->mc.aper_size) >> PAGE_SHIFT)); 457 ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
452 if (r) { 458 if (r) {
453 DRM_ERROR("Failed initializing VRAM heap.\n"); 459 DRM_ERROR("Failed initializing VRAM heap.\n");
454 return r; 460 return r;
@@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
465 return r; 471 return r;
466 } 472 }
467 DRM_INFO("radeon: %uM of VRAM memory ready\n", 473 DRM_INFO("radeon: %uM of VRAM memory ready\n",
468 rdev->mc.vram_size / (1024 * 1024)); 474 rdev->mc.real_vram_size / (1024 * 1024));
469 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 475 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
470 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 476 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
471 if (r) { 477 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index cc074b5a8f74..b29affd9c5d8 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -29,6 +29,7 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "radeon_reg.h" 30#include "radeon_reg.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h"
32 33
33/* rs400,rs480 depends on : */ 34/* rs400,rs480 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev); 35void r100_hdp_reset(struct radeon_device *rdev);
@@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev)
164 WREG32(RADEON_BUS_CNTL, tmp); 165 WREG32(RADEON_BUS_CNTL, tmp);
165 } 166 }
166 /* Table should be in 32bits address space so ignore bits above. */ 167 /* Table should be in 32bits address space so ignore bits above. */
167 tmp = rdev->gart.table_addr & 0xfffff000; 168 tmp = (u32)rdev->gart.table_addr & 0xfffff000;
169 tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
170
168 WREG32_MC(RS480_GART_BASE, tmp); 171 WREG32_MC(RS480_GART_BASE, tmp);
169 /* TODO: more tweaking here */ 172 /* TODO: more tweaking here */
170 WREG32_MC(RS480_GART_FEATURE_ID, 173 WREG32_MC(RS480_GART_FEATURE_ID,
@@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev)
201 204
202int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 205int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
203{ 206{
207 uint32_t entry;
208
204 if (i < 0 || i > rdev->gart.num_gpu_pages) { 209 if (i < 0 || i > rdev->gart.num_gpu_pages) {
205 return -EINVAL; 210 return -EINVAL;
206 } 211 }
207 rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); 212
213 entry = (lower_32_bits(addr) & PAGE_MASK) |
214 ((upper_32_bits(addr) & 0xff) << 4) |
215 0xc;
216 entry = cpu_to_le32(entry);
217 rdev->gart.table.ram.ptr[i] = entry;
208 return 0; 218 return 0;
209} 219}
210 220
@@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev)
223 233
224 rs400_gpu_init(rdev); 234 rs400_gpu_init(rdev);
225 rs400_gart_disable(rdev); 235 rs400_gart_disable(rdev);
226 rdev->mc.gtt_location = rdev->mc.vram_size; 236 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
227 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); 237 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
228 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); 238 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
229 rdev->mc.vram_location = 0xFFFFFFFFUL;
230 r = radeon_mc_setup(rdev); 239 r = radeon_mc_setup(rdev);
231 if (r) { 240 if (r) {
232 return r; 241 return r;
@@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev)
238 "programming pipes. Bad things might happen.\n"); 247 "programming pipes. Bad things might happen.\n");
239 } 248 }
240 249
241 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 250 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
242 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); 251 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
243 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); 252 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
244 WREG32(RADEON_MC_FB_LOCATION, tmp); 253 WREG32(RADEON_MC_FB_LOCATION, tmp);
@@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev)
284 */ 293 */
285void rs400_vram_info(struct radeon_device *rdev) 294void rs400_vram_info(struct radeon_device *rdev)
286{ 295{
287 uint32_t tom;
288
289 rs400_gart_adjust_size(rdev); 296 rs400_gart_adjust_size(rdev);
290 /* DDR for all card after R300 & IGP */ 297 /* DDR for all card after R300 & IGP */
291 rdev->mc.vram_is_ddr = true; 298 rdev->mc.vram_is_ddr = true;
292 rdev->mc.vram_width = 128; 299 rdev->mc.vram_width = 128;
293 300
294 /* read NB_TOM to get the amount of ram stolen for the GPU */ 301 r100_vram_init_sizes(rdev);
295 tom = RREG32(RADEON_NB_TOM);
296 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
297 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
298
299 /* Could aper size report 0 ? */
300 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
301 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
302} 302}
303 303
304 304
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index ab0c967553e6..bbea6dee4a94 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev)
223 printk(KERN_WARNING "Failed to wait MC idle while " 223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n"); 224 "programming pipes. Bad things might happen.\n");
225 } 225 }
226 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 226 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); 227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); 228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
229 WREG32_MC(RS600_MC_FB_LOCATION, tmp); 229 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
@@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev)
301 rdev->mc.vram_width = 128; 301 rdev->mc.vram_width = 128;
302} 302}
303 303
304void rs600_bandwidth_update(struct radeon_device *rdev)
305{
306 /* FIXME: implement, should this be like rs690 ? */
307}
308
304 309
305/* 310/*
306 * Indirect registers accessor 311 * Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 79ba85042b5f..839595b00728 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -28,6 +28,9 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "rs690r.h"
32#include "atom.h"
33#include "atom-bits.h"
31 34
32/* rs690,rs740 depends on : */ 35/* rs690,rs740 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev); 36void r100_hdp_reset(struct radeon_device *rdev);
@@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev)
64 rs400_gart_disable(rdev); 67 rs400_gart_disable(rdev);
65 68
66 /* Setup GPU memory space */ 69 /* Setup GPU memory space */
67 rdev->mc.gtt_location = rdev->mc.vram_size; 70 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
68 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); 71 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
69 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); 72 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
70 rdev->mc.vram_location = 0xFFFFFFFFUL; 73 rdev->mc.vram_location = 0xFFFFFFFFUL;
@@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev)
79 printk(KERN_WARNING "Failed to wait MC idle while " 82 printk(KERN_WARNING "Failed to wait MC idle while "
80 "programming pipes. Bad things might happen.\n"); 83 "programming pipes. Bad things might happen.\n");
81 } 84 }
82 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 85 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
83 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); 86 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
84 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); 87 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
85 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); 88 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
@@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev)
138/* 141/*
139 * VRAM info. 142 * VRAM info.
140 */ 143 */
144void rs690_pm_info(struct radeon_device *rdev)
145{
146 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
147 struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
148 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
149 void *ptr;
150 uint16_t data_offset;
151 uint8_t frev, crev;
152 fixed20_12 tmp;
153
154 atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
155 &frev, &crev, &data_offset);
156 ptr = rdev->mode_info.atom_context->bios + data_offset;
157 info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
158 info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
159 /* Get various system informations from bios */
160 switch (crev) {
161 case 1:
162 tmp.full = rfixed_const(100);
163 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
164 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
165 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
166 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
167 rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
168 break;
169 case 2:
170 tmp.full = rfixed_const(100);
171 rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
172 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
173 rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
174 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
175 rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
176 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
177 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
178 break;
179 default:
180 tmp.full = rfixed_const(100);
181 /* We assume the slower possible clock ie worst case */
182 /* DDR 333Mhz */
183 rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
184 /* FIXME: system clock ? */
185 rdev->pm.igp_system_mclk.full = rfixed_const(100);
186 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
187 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
188 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
189 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
190 break;
191 }
192 /* Compute various bandwidth */
193 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
194 tmp.full = rfixed_const(4);
195 rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
196 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
197 * = ht_clk * ht_width / 5
198 */
199 tmp.full = rfixed_const(5);
200 rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
201 rdev->pm.igp_ht_link_width);
202 rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
203 if (tmp.full < rdev->pm.max_bandwidth.full) {
204 /* HT link is a limiting factor */
205 rdev->pm.max_bandwidth.full = tmp.full;
206 }
207 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
208 * = (sideport_clk * 14) / 10
209 */
210 tmp.full = rfixed_const(14);
211 rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
212 tmp.full = rfixed_const(10);
213 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
214}
215
141void rs690_vram_info(struct radeon_device *rdev) 216void rs690_vram_info(struct radeon_device *rdev)
142{ 217{
143 uint32_t tmp; 218 uint32_t tmp;
219 fixed20_12 a;
144 220
145 rs400_gart_adjust_size(rdev); 221 rs400_gart_adjust_size(rdev);
146 /* DDR for all card after R300 & IGP */ 222 /* DDR for all card after R300 & IGP */
@@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev)
152 } else { 228 } else {
153 rdev->mc.vram_width = 64; 229 rdev->mc.vram_width = 64;
154 } 230 }
155 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 231 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
232 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
156 233
157 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 234 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
158 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 235 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
236 rs690_pm_info(rdev);
237 /* FIXME: we should enforce default clock in case GPU is not in
238 * default setup
239 */
240 a.full = rfixed_const(100);
241 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
242 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
243 a.full = rfixed_const(16);
244 /* core_bandwidth = sclk(Mhz) * 16 */
245 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
246}
247
248void rs690_line_buffer_adjust(struct radeon_device *rdev,
249 struct drm_display_mode *mode1,
250 struct drm_display_mode *mode2)
251{
252 u32 tmp;
253
254 /*
255 * Line Buffer Setup
256 * There is a single line buffer shared by both display controllers.
257 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
258 * the display controllers. The paritioning can either be done
259 * manually or via one of four preset allocations specified in bits 1:0:
260 * 0 - line buffer is divided in half and shared between crtc
261 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
262 * 2 - D1 gets the whole buffer
263 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
264 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
265 * allocation mode. In manual allocation mode, D1 always starts at 0,
266 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
267 */
268 tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK;
269 tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE;
270 /* auto */
271 if (mode1 && mode2) {
272 if (mode1->hdisplay > mode2->hdisplay) {
273 if (mode1->hdisplay > 2560)
274 tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
275 else
276 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
277 } else if (mode2->hdisplay > mode1->hdisplay) {
278 if (mode2->hdisplay > 2560)
279 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
280 else
281 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
282 } else
283 tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
284 } else if (mode1) {
285 tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY;
286 } else if (mode2) {
287 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
288 }
289 WREG32(DC_LB_MEMORY_SPLIT, tmp);
159} 290}
160 291
292struct rs690_watermark {
293 u32 lb_request_fifo_depth;
294 fixed20_12 num_line_pair;
295 fixed20_12 estimated_width;
296 fixed20_12 worst_case_latency;
297 fixed20_12 consumption_rate;
298 fixed20_12 active_time;
299 fixed20_12 dbpp;
300 fixed20_12 priority_mark_max;
301 fixed20_12 priority_mark;
302 fixed20_12 sclk;
303};
304
305void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
306 struct radeon_crtc *crtc,
307 struct rs690_watermark *wm)
308{
309 struct drm_display_mode *mode = &crtc->base.mode;
310 fixed20_12 a, b, c;
311 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
312 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
313 /* FIXME: detect IGP with sideport memory, i don't think there is any
314 * such product available
315 */
316 bool sideport = false;
317
318 if (!crtc->base.enabled) {
319 /* FIXME: wouldn't it better to set priority mark to maximum */
320 wm->lb_request_fifo_depth = 4;
321 return;
322 }
323
324 if (crtc->vsc.full > rfixed_const(2))
325 wm->num_line_pair.full = rfixed_const(2);
326 else
327 wm->num_line_pair.full = rfixed_const(1);
328
329 b.full = rfixed_const(mode->crtc_hdisplay);
330 c.full = rfixed_const(256);
331 a.full = rfixed_mul(wm->num_line_pair, b);
332 request_fifo_depth.full = rfixed_div(a, c);
333 if (a.full < rfixed_const(4)) {
334 wm->lb_request_fifo_depth = 4;
335 } else {
336 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
337 }
338
339 /* Determine consumption rate
340 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
341 * vtaps = number of vertical taps,
342 * vsc = vertical scaling ratio, defined as source/destination
343 * hsc = horizontal scaling ration, defined as source/destination
344 */
345 a.full = rfixed_const(mode->clock);
346 b.full = rfixed_const(1000);
347 a.full = rfixed_div(a, b);
348 pclk.full = rfixed_div(b, a);
349 if (crtc->rmx_type != RMX_OFF) {
350 b.full = rfixed_const(2);
351 if (crtc->vsc.full > b.full)
352 b.full = crtc->vsc.full;
353 b.full = rfixed_mul(b, crtc->hsc);
354 c.full = rfixed_const(2);
355 b.full = rfixed_div(b, c);
356 consumption_time.full = rfixed_div(pclk, b);
357 } else {
358 consumption_time.full = pclk.full;
359 }
360 a.full = rfixed_const(1);
361 wm->consumption_rate.full = rfixed_div(a, consumption_time);
362
363
364 /* Determine line time
365 * LineTime = total time for one line of displayhtotal
366 * LineTime = total number of horizontal pixels
367 * pclk = pixel clock period(ns)
368 */
369 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
370 line_time.full = rfixed_mul(a, pclk);
371
372 /* Determine active time
373 * ActiveTime = time of active region of display within one line,
374 * hactive = total number of horizontal active pixels
375 * htotal = total number of horizontal pixels
376 */
377 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
378 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
379 wm->active_time.full = rfixed_mul(line_time, b);
380 wm->active_time.full = rfixed_div(wm->active_time, a);
381
382 /* Maximun bandwidth is the minimun bandwidth of all component */
383 rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
384 if (sideport) {
385 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
386 rdev->pm.sideport_bandwidth.full)
387 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
388 read_delay_latency.full = rfixed_const(370 * 800 * 1000);
389 read_delay_latency.full = rfixed_div(read_delay_latency,
390 rdev->pm.igp_sideport_mclk);
391 } else {
392 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
393 rdev->pm.k8_bandwidth.full)
394 rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
395 if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
396 rdev->pm.ht_bandwidth.full)
397 rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
398 read_delay_latency.full = rfixed_const(5000);
399 }
400
401 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
402 a.full = rfixed_const(16);
403 rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
404 a.full = rfixed_const(1000);
405 rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
406 /* Determine chunk time
407 * ChunkTime = the time it takes the DCP to send one chunk of data
408 * to the LB which consists of pipeline delay and inter chunk gap
409 * sclk = system clock(ns)
410 */
411 a.full = rfixed_const(256 * 13);
412 chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
413 a.full = rfixed_const(10);
414 chunk_time.full = rfixed_div(chunk_time, a);
415
416 /* Determine the worst case latency
417 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
418 * WorstCaseLatency = worst case time from urgent to when the MC starts
419 * to return data
420 * READ_DELAY_IDLE_MAX = constant of 1us
421 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
422 * which consists of pipeline delay and inter chunk gap
423 */
424 if (rfixed_trunc(wm->num_line_pair) > 1) {
425 a.full = rfixed_const(3);
426 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
427 wm->worst_case_latency.full += read_delay_latency.full;
428 } else {
429 a.full = rfixed_const(2);
430 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
431 wm->worst_case_latency.full += read_delay_latency.full;
432 }
433
434 /* Determine the tolerable latency
435 * TolerableLatency = Any given request has only 1 line time
436 * for the data to be returned
437 * LBRequestFifoDepth = Number of chunk requests the LB can
438 * put into the request FIFO for a display
439 * LineTime = total time for one line of display
440 * ChunkTime = the time it takes the DCP to send one chunk
441 * of data to the LB which consists of
442 * pipeline delay and inter chunk gap
443 */
444 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
445 tolerable_latency.full = line_time.full;
446 } else {
447 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
448 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
449 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
450 tolerable_latency.full = line_time.full - tolerable_latency.full;
451 }
452 /* We assume worst case 32bits (4 bytes) */
453 wm->dbpp.full = rfixed_const(4 * 8);
454
455 /* Determine the maximum priority mark
456 * width = viewport width in pixels
457 */
458 a.full = rfixed_const(16);
459 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
460 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
461
462 /* Determine estimated width */
463 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
464 estimated_width.full = rfixed_div(estimated_width, consumption_time);
465 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
466 wm->priority_mark.full = rfixed_const(10);
467 } else {
468 a.full = rfixed_const(16);
469 wm->priority_mark.full = rfixed_div(estimated_width, a);
470 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
471 }
472}
473
474void rs690_bandwidth_update(struct radeon_device *rdev)
475{
476 struct drm_display_mode *mode0 = NULL;
477 struct drm_display_mode *mode1 = NULL;
478 struct rs690_watermark wm0;
479 struct rs690_watermark wm1;
480 u32 tmp;
481 fixed20_12 priority_mark02, priority_mark12, fill_rate;
482 fixed20_12 a, b;
483
484 if (rdev->mode_info.crtcs[0]->base.enabled)
485 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
486 if (rdev->mode_info.crtcs[1]->base.enabled)
487 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
488 /*
489 * Set display0/1 priority up in the memory controller for
490 * modes if the user specifies HIGH for displaypriority
491 * option.
492 */
493 if (rdev->disp_priority == 2) {
494 tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
495 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
496 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
497 if (mode1)
498 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
499 if (mode0)
500 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
501 WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
502 }
503 rs690_line_buffer_adjust(rdev, mode0, mode1);
504
505 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
506 WREG32(DCP_CONTROL, 0);
507 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
508 WREG32(DCP_CONTROL, 2);
509
510 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
511 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
512
513 tmp = (wm0.lb_request_fifo_depth - 1);
514 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
515 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
516
517 if (mode0 && mode1) {
518 if (rfixed_trunc(wm0.dbpp) > 64)
519 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
520 else
521 a.full = wm0.num_line_pair.full;
522 if (rfixed_trunc(wm1.dbpp) > 64)
523 b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
524 else
525 b.full = wm1.num_line_pair.full;
526 a.full += b.full;
527 fill_rate.full = rfixed_div(wm0.sclk, a);
528 if (wm0.consumption_rate.full > fill_rate.full) {
529 b.full = wm0.consumption_rate.full - fill_rate.full;
530 b.full = rfixed_mul(b, wm0.active_time);
531 a.full = rfixed_mul(wm0.worst_case_latency,
532 wm0.consumption_rate);
533 a.full = a.full + b.full;
534 b.full = rfixed_const(16 * 1000);
535 priority_mark02.full = rfixed_div(a, b);
536 } else {
537 a.full = rfixed_mul(wm0.worst_case_latency,
538 wm0.consumption_rate);
539 b.full = rfixed_const(16 * 1000);
540 priority_mark02.full = rfixed_div(a, b);
541 }
542 if (wm1.consumption_rate.full > fill_rate.full) {
543 b.full = wm1.consumption_rate.full - fill_rate.full;
544 b.full = rfixed_mul(b, wm1.active_time);
545 a.full = rfixed_mul(wm1.worst_case_latency,
546 wm1.consumption_rate);
547 a.full = a.full + b.full;
548 b.full = rfixed_const(16 * 1000);
549 priority_mark12.full = rfixed_div(a, b);
550 } else {
551 a.full = rfixed_mul(wm1.worst_case_latency,
552 wm1.consumption_rate);
553 b.full = rfixed_const(16 * 1000);
554 priority_mark12.full = rfixed_div(a, b);
555 }
556 if (wm0.priority_mark.full > priority_mark02.full)
557 priority_mark02.full = wm0.priority_mark.full;
558 if (rfixed_trunc(priority_mark02) < 0)
559 priority_mark02.full = 0;
560 if (wm0.priority_mark_max.full > priority_mark02.full)
561 priority_mark02.full = wm0.priority_mark_max.full;
562 if (wm1.priority_mark.full > priority_mark12.full)
563 priority_mark12.full = wm1.priority_mark.full;
564 if (rfixed_trunc(priority_mark12) < 0)
565 priority_mark12.full = 0;
566 if (wm1.priority_mark_max.full > priority_mark12.full)
567 priority_mark12.full = wm1.priority_mark_max.full;
568 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
569 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
570 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
571 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
572 } else if (mode0) {
573 if (rfixed_trunc(wm0.dbpp) > 64)
574 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
575 else
576 a.full = wm0.num_line_pair.full;
577 fill_rate.full = rfixed_div(wm0.sclk, a);
578 if (wm0.consumption_rate.full > fill_rate.full) {
579 b.full = wm0.consumption_rate.full - fill_rate.full;
580 b.full = rfixed_mul(b, wm0.active_time);
581 a.full = rfixed_mul(wm0.worst_case_latency,
582 wm0.consumption_rate);
583 a.full = a.full + b.full;
584 b.full = rfixed_const(16 * 1000);
585 priority_mark02.full = rfixed_div(a, b);
586 } else {
587 a.full = rfixed_mul(wm0.worst_case_latency,
588 wm0.consumption_rate);
589 b.full = rfixed_const(16 * 1000);
590 priority_mark02.full = rfixed_div(a, b);
591 }
592 if (wm0.priority_mark.full > priority_mark02.full)
593 priority_mark02.full = wm0.priority_mark.full;
594 if (rfixed_trunc(priority_mark02) < 0)
595 priority_mark02.full = 0;
596 if (wm0.priority_mark_max.full > priority_mark02.full)
597 priority_mark02.full = wm0.priority_mark_max.full;
598 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
599 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
600 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
601 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
602 } else {
603 if (rfixed_trunc(wm1.dbpp) > 64)
604 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
605 else
606 a.full = wm1.num_line_pair.full;
607 fill_rate.full = rfixed_div(wm1.sclk, a);
608 if (wm1.consumption_rate.full > fill_rate.full) {
609 b.full = wm1.consumption_rate.full - fill_rate.full;
610 b.full = rfixed_mul(b, wm1.active_time);
611 a.full = rfixed_mul(wm1.worst_case_latency,
612 wm1.consumption_rate);
613 a.full = a.full + b.full;
614 b.full = rfixed_const(16 * 1000);
615 priority_mark12.full = rfixed_div(a, b);
616 } else {
617 a.full = rfixed_mul(wm1.worst_case_latency,
618 wm1.consumption_rate);
619 b.full = rfixed_const(16 * 1000);
620 priority_mark12.full = rfixed_div(a, b);
621 }
622 if (wm1.priority_mark.full > priority_mark12.full)
623 priority_mark12.full = wm1.priority_mark.full;
624 if (rfixed_trunc(priority_mark12) < 0)
625 priority_mark12.full = 0;
626 if (wm1.priority_mark_max.full > priority_mark12.full)
627 priority_mark12.full = wm1.priority_mark_max.full;
628 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
629 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
630 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
631 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
632 }
633}
161 634
162/* 635/*
163 * Indirect registers accessor 636 * Indirect registers accessor
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
new file mode 100644
index 000000000000..c0d9faa2175b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690r.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RS690R_H
29#define RS690R_H
30
31/* RS690/RS740 registers */
32#define MC_INDEX 0x0078
33# define MC_INDEX_MASK 0x1FF
34# define MC_INDEX_WR_EN (1 << 9)
35# define MC_INDEX_WR_ACK 0x7F
36#define MC_DATA 0x007C
37#define HDP_FB_LOCATION 0x0134
38#define DC_LB_MEMORY_SPLIT 0x6520
39#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
40#define DC_LB_MEMORY_SPLIT_SHIFT 0
41#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
42#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
43#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
44#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
45#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
46#define DC_LB_DISP1_END_ADR_SHIFT 4
47#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
48#define D1MODE_PRIORITY_A_CNT 0x6548
49#define MODE_PRIORITY_MARK_MASK 0x00007FFF
50#define MODE_PRIORITY_OFF (1 << 16)
51#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
52#define MODE_PRIORITY_FORCE_MASK (1 << 24)
53#define D1MODE_PRIORITY_B_CNT 0x654C
54#define LB_MAX_REQ_OUTSTANDING 0x6D58
55#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
56#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
57#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
58#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
59#define DCP_CONTROL 0x6C9C
60#define D2MODE_PRIORITY_A_CNT 0x6D48
61#define D2MODE_PRIORITY_B_CNT 0x6D4C
62
63/* MC indirect registers */
64#define MC_STATUS_IDLE (1 << 0)
65#define MC_MISC_CNTL 0x18
66#define DISABLE_GTW (1 << 1)
67#define GART_INDEX_REG_EN (1 << 12)
68#define BLOCK_GFX_D3_EN (1 << 14)
69#define GART_FEATURE_ID 0x2B
70#define HANG_EN (1 << 11)
71#define TLB_ENABLE (1 << 18)
72#define P2P_ENABLE (1 << 19)
73#define GTW_LAC_EN (1 << 25)
74#define LEVEL2_GART (0 << 30)
75#define LEVEL1_GART (1 << 30)
76#define PDC_EN (1 << 31)
77#define GART_BASE 0x2C
78#define GART_CACHE_CNTRL 0x2E
79# define GART_CACHE_INVALIDATE (1 << 0)
80#define MC_STATUS 0x90
81#define MCCFG_FB_LOCATION 0x100
82#define MC_FB_START_MASK 0x0000FFFF
83#define MC_FB_START_SHIFT 0
84#define MC_FB_TOP_MASK 0xFFFF0000
85#define MC_FB_TOP_SHIFT 16
86#define MCCFG_AGP_LOCATION 0x101
87#define MC_AGP_START_MASK 0x0000FFFF
88#define MC_AGP_START_SHIFT 0
89#define MC_AGP_TOP_MASK 0xFFFF0000
90#define MC_AGP_TOP_SHIFT 16
91#define MCCFG_AGP_BASE 0x102
92#define MCCFG_AGP_BASE_2 0x103
93#define MC_INIT_MISC_LAT_TIMER 0x104
94#define MC_DISP0R_INIT_LAT_SHIFT 8
95#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
96#define MC_DISP1R_INIT_LAT_SHIFT 12
97#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
98
99#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffea37b1b3e2..551e608702e4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -27,8 +27,9 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include "drmP.h" 29#include "drmP.h"
30#include "radeon_reg.h" 30#include "rv515r.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h"
32 33
33/* rv515 depends on : */ 34/* rv515 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev); 35void r100_hdp_reset(struct radeon_device *rdev);
@@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev)
99 "programming pipes. Bad things might happen.\n"); 100 "programming pipes. Bad things might happen.\n");
100 } 101 }
101 /* Write VRAM size in case we are limiting it */ 102 /* Write VRAM size in case we are limiting it */
102 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); 103 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
103 tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); 104 tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
104 WREG32(0x134, tmp); 105 WREG32(0x134, tmp);
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 106 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
106 tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); 107 tmp = REG_SET(MC_FB_TOP, tmp >> 16);
107 tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); 108 tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
108 WREG32_MC(RV515_MC_FB_LOCATION, tmp); 109 WREG32_MC(MC_FB_LOCATION, tmp);
109 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); 110 WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
110 WREG32(0x310, rdev->mc.vram_location); 111 WREG32(0x310, rdev->mc.vram_location);
111 if (rdev->flags & RADEON_IS_AGP) { 112 if (rdev->flags & RADEON_IS_AGP) {
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 113 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
113 tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); 114 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
114 tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); 115 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
115 WREG32_MC(RV515_MC_AGP_LOCATION, tmp); 116 WREG32_MC(MC_AGP_LOCATION, tmp);
116 WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); 117 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
117 WREG32_MC(RV515_MC_AGP_BASE_2, 0); 118 WREG32_MC(MC_AGP_BASE_2, 0);
118 } else { 119 } else {
119 WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); 120 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
120 WREG32_MC(RV515_MC_AGP_BASE, 0); 121 WREG32_MC(MC_AGP_BASE, 0);
121 WREG32_MC(RV515_MC_AGP_BASE_2, 0); 122 WREG32_MC(MC_AGP_BASE_2, 0);
122 } 123 }
123 return 0; 124 return 0;
124} 125}
@@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev)
136 */ 137 */
137void rv515_ring_start(struct radeon_device *rdev) 138void rv515_ring_start(struct radeon_device *rdev)
138{ 139{
139 unsigned gb_tile_config;
140 int r; 140 int r;
141 141
142 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
143 gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
144 switch (rdev->num_gb_pipes) {
145 case 2:
146 gb_tile_config |= R300_PIPE_COUNT_R300;
147 break;
148 case 3:
149 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
150 break;
151 case 4:
152 gb_tile_config |= R300_PIPE_COUNT_R420;
153 break;
154 case 1:
155 default:
156 gb_tile_config |= R300_PIPE_COUNT_RV350;
157 break;
158 }
159
160 r = radeon_ring_lock(rdev, 64); 142 r = radeon_ring_lock(rdev, 64);
161 if (r) { 143 if (r) {
162 return; 144 return;
163 } 145 }
164 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 146 radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
165 radeon_ring_write(rdev,
166 RADEON_ISYNC_ANY2D_IDLE3D |
167 RADEON_ISYNC_ANY3D_IDLE2D |
168 RADEON_ISYNC_WAIT_IDLEGUI |
169 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
170 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
171 radeon_ring_write(rdev, gb_tile_config);
172 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
173 radeon_ring_write(rdev, 147 radeon_ring_write(rdev,
174 RADEON_WAIT_2D_IDLECLEAN | 148 ISYNC_ANY2D_IDLE3D |
175 RADEON_WAIT_3D_IDLECLEAN); 149 ISYNC_ANY3D_IDLE2D |
150 ISYNC_WAIT_IDLEGUI |
151 ISYNC_CPSCRATCH_IDLEGUI);
152 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
176 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 154 radeon_ring_write(rdev, PACKET0(0x170C, 0));
177 radeon_ring_write(rdev, 1 << 31); 155 radeon_ring_write(rdev, 1 << 31);
178 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 156 radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
179 radeon_ring_write(rdev, 0); 157 radeon_ring_write(rdev, 0);
180 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 158 radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
181 radeon_ring_write(rdev, 0); 159 radeon_ring_write(rdev, 0);
182 radeon_ring_write(rdev, PACKET0(0x42C8, 0)); 160 radeon_ring_write(rdev, PACKET0(0x42C8, 0));
183 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); 161 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
184 radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); 162 radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
185 radeon_ring_write(rdev, 0); 163 radeon_ring_write(rdev, 0);
186 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 164 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
187 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 165 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
188 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 166 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
189 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 167 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
190 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 168 radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
191 radeon_ring_write(rdev, 169 radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
192 RADEON_WAIT_2D_IDLECLEAN | 170 radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
193 RADEON_WAIT_3D_IDLECLEAN);
194 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
195 radeon_ring_write(rdev, 0); 171 radeon_ring_write(rdev, 0);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 172 radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); 173 radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
198 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); 174 radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
199 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); 175 radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
200 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); 176 radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
201 radeon_ring_write(rdev,
202 ((6 << R300_MS_X0_SHIFT) |
203 (6 << R300_MS_Y0_SHIFT) |
204 (6 << R300_MS_X1_SHIFT) |
205 (6 << R300_MS_Y1_SHIFT) |
206 (6 << R300_MS_X2_SHIFT) |
207 (6 << R300_MS_Y2_SHIFT) |
208 (6 << R300_MSBD0_Y_SHIFT) |
209 (6 << R300_MSBD0_X_SHIFT)));
210 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
211 radeon_ring_write(rdev, 177 radeon_ring_write(rdev,
212 ((6 << R300_MS_X3_SHIFT) | 178 ((6 << MS_X0_SHIFT) |
213 (6 << R300_MS_Y3_SHIFT) | 179 (6 << MS_Y0_SHIFT) |
214 (6 << R300_MS_X4_SHIFT) | 180 (6 << MS_X1_SHIFT) |
215 (6 << R300_MS_Y4_SHIFT) | 181 (6 << MS_Y1_SHIFT) |
216 (6 << R300_MS_X5_SHIFT) | 182 (6 << MS_X2_SHIFT) |
217 (6 << R300_MS_Y5_SHIFT) | 183 (6 << MS_Y2_SHIFT) |
218 (6 << R300_MSBD1_SHIFT))); 184 (6 << MSBD0_Y_SHIFT) |
219 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); 185 (6 << MSBD0_X_SHIFT)));
220 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); 186 radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
221 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
222 radeon_ring_write(rdev, 187 radeon_ring_write(rdev,
223 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); 188 ((6 << MS_X3_SHIFT) |
224 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); 189 (6 << MS_Y3_SHIFT) |
225 radeon_ring_write(rdev, 190 (6 << MS_X4_SHIFT) |
226 R300_GEOMETRY_ROUND_NEAREST | 191 (6 << MS_Y4_SHIFT) |
227 R300_COLOR_ROUND_NEAREST); 192 (6 << MS_X5_SHIFT) |
193 (6 << MS_Y5_SHIFT) |
194 (6 << MSBD1_SHIFT)));
195 radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196 radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197 radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198 radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199 radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200 radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
228 radeon_ring_write(rdev, PACKET0(0x20C8, 0)); 201 radeon_ring_write(rdev, PACKET0(0x20C8, 0));
229 radeon_ring_write(rdev, 0); 202 radeon_ring_write(rdev, 0);
230 radeon_ring_unlock_commit(rdev); 203 radeon_ring_unlock_commit(rdev);
@@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
242 215
243 for (i = 0; i < rdev->usec_timeout; i++) { 216 for (i = 0; i < rdev->usec_timeout; i++) {
244 /* read MC_STATUS */ 217 /* read MC_STATUS */
245 tmp = RREG32_MC(RV515_MC_STATUS); 218 tmp = RREG32_MC(MC_STATUS);
246 if (tmp & RV515_MC_STATUS_IDLE) { 219 if (tmp & MC_STATUS_IDLE) {
247 return 0; 220 return 0;
248 } 221 }
249 DRM_UDELAY(1); 222 DRM_UDELAY(1);
@@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev)
291 reinit_cp = rdev->cp.ready; 264 reinit_cp = rdev->cp.ready;
292 rdev->cp.ready = false; 265 rdev->cp.ready = false;
293 for (i = 0; i < rdev->usec_timeout; i++) { 266 for (i = 0; i < rdev->usec_timeout; i++) {
294 WREG32(RADEON_CP_CSQ_MODE, 0); 267 WREG32(CP_CSQ_MODE, 0);
295 WREG32(RADEON_CP_CSQ_CNTL, 0); 268 WREG32(CP_CSQ_CNTL, 0);
296 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); 269 WREG32(RBBM_SOFT_RESET, 0x32005);
297 (void)RREG32(RADEON_RBBM_SOFT_RESET); 270 (void)RREG32(RBBM_SOFT_RESET);
298 udelay(200); 271 udelay(200);
299 WREG32(RADEON_RBBM_SOFT_RESET, 0); 272 WREG32(RBBM_SOFT_RESET, 0);
300 /* Wait to prevent race in RBBM_STATUS */ 273 /* Wait to prevent race in RBBM_STATUS */
301 mdelay(1); 274 mdelay(1);
302 tmp = RREG32(RADEON_RBBM_STATUS); 275 tmp = RREG32(RBBM_STATUS);
303 if (tmp & ((1 << 20) | (1 << 26))) { 276 if (tmp & ((1 << 20) | (1 << 26))) {
304 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); 277 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
305 /* GA still busy soft reset it */ 278 /* GA still busy soft reset it */
306 WREG32(0x429C, 0x200); 279 WREG32(0x429C, 0x200);
307 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 280 WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
308 WREG32(0x43E0, 0); 281 WREG32(0x43E0, 0);
309 WREG32(0x43E4, 0); 282 WREG32(0x43E4, 0);
310 WREG32(0x24AC, 0); 283 WREG32(0x24AC, 0);
311 } 284 }
312 /* Wait to prevent race in RBBM_STATUS */ 285 /* Wait to prevent race in RBBM_STATUS */
313 mdelay(1); 286 mdelay(1);
314 tmp = RREG32(RADEON_RBBM_STATUS); 287 tmp = RREG32(RBBM_STATUS);
315 if (!(tmp & ((1 << 20) | (1 << 26)))) { 288 if (!(tmp & ((1 << 20) | (1 << 26)))) {
316 break; 289 break;
317 } 290 }
318 } 291 }
319 for (i = 0; i < rdev->usec_timeout; i++) { 292 for (i = 0; i < rdev->usec_timeout; i++) {
320 tmp = RREG32(RADEON_RBBM_STATUS); 293 tmp = RREG32(RBBM_STATUS);
321 if (!(tmp & ((1 << 20) | (1 << 26)))) { 294 if (!(tmp & ((1 << 20) | (1 << 26)))) {
322 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", 295 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
323 tmp); 296 tmp);
@@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev)
331 } 304 }
332 DRM_UDELAY(1); 305 DRM_UDELAY(1);
333 } 306 }
334 tmp = RREG32(RADEON_RBBM_STATUS); 307 tmp = RREG32(RBBM_STATUS);
335 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); 308 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
336 return -1; 309 return -1;
337} 310}
@@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev)
341 uint32_t status; 314 uint32_t status;
342 315
343 /* reset order likely matter */ 316 /* reset order likely matter */
344 status = RREG32(RADEON_RBBM_STATUS); 317 status = RREG32(RBBM_STATUS);
345 /* reset HDP */ 318 /* reset HDP */
346 r100_hdp_reset(rdev); 319 r100_hdp_reset(rdev);
347 /* reset rb2d */ 320 /* reset rb2d */
@@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev)
353 rv515_ga_reset(rdev); 326 rv515_ga_reset(rdev);
354 } 327 }
355 /* reset CP */ 328 /* reset CP */
356 status = RREG32(RADEON_RBBM_STATUS); 329 status = RREG32(RBBM_STATUS);
357 if (status & (1 << 16)) { 330 if (status & (1 << 16)) {
358 r100_cp_reset(rdev); 331 r100_cp_reset(rdev);
359 } 332 }
360 /* Check if GPU is idle */ 333 /* Check if GPU is idle */
361 status = RREG32(RADEON_RBBM_STATUS); 334 status = RREG32(RBBM_STATUS);
362 if (status & (1 << 31)) { 335 if (status & (1 << 31)) {
363 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 336 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
364 return -1; 337 return -1;
@@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
377 350
378 rdev->mc.vram_width = 128; 351 rdev->mc.vram_width = 128;
379 rdev->mc.vram_is_ddr = true; 352 rdev->mc.vram_is_ddr = true;
380 tmp = RREG32_MC(RV515_MC_CNTL); 353 tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
381 tmp &= RV515_MEM_NUM_CHANNELS_MASK;
382 switch (tmp) { 354 switch (tmp) {
383 case 0: 355 case 0:
384 rdev->mc.vram_width = 64; 356 rdev->mc.vram_width = 64;
@@ -394,11 +366,16 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
394 366
395void rv515_vram_info(struct radeon_device *rdev) 367void rv515_vram_info(struct radeon_device *rdev)
396{ 368{
369 fixed20_12 a;
370
397 rv515_vram_get_type(rdev); 371 rv515_vram_get_type(rdev);
398 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
399 372
400 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 373 /* FIXME: we should enforce default clock in case GPU is not in
401 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 374 * default setup
375 */
376 a.full = rfixed_const(100);
377 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
378 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
402} 379}
403 380
404 381
@@ -409,35 +386,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
409{ 386{
410 uint32_t r; 387 uint32_t r;
411 388
412 WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 389 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
413 r = RREG32(R520_MC_IND_DATA); 390 r = RREG32(MC_IND_DATA);
414 WREG32(R520_MC_IND_INDEX, 0); 391 WREG32(MC_IND_INDEX, 0);
415 return r; 392 return r;
416} 393}
417 394
418void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 395void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
419{ 396{
420 WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 397 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
421 WREG32(R520_MC_IND_DATA, (v)); 398 WREG32(MC_IND_DATA, (v));
422 WREG32(R520_MC_IND_INDEX, 0); 399 WREG32(MC_IND_INDEX, 0);
423} 400}
424 401
425uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 402uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
426{ 403{
427 uint32_t r; 404 uint32_t r;
428 405
429 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); 406 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
430 (void)RREG32(RADEON_PCIE_INDEX); 407 (void)RREG32(PCIE_INDEX);
431 r = RREG32(RADEON_PCIE_DATA); 408 r = RREG32(PCIE_DATA);
432 return r; 409 return r;
433} 410}
434 411
435void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 412void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
436{ 413{
437 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); 414 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
438 (void)RREG32(RADEON_PCIE_INDEX); 415 (void)RREG32(PCIE_INDEX);
439 WREG32(RADEON_PCIE_DATA, (v)); 416 WREG32(PCIE_DATA, (v));
440 (void)RREG32(RADEON_PCIE_DATA); 417 (void)RREG32(PCIE_DATA);
441} 418}
442 419
443 420
@@ -452,13 +429,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
452 struct radeon_device *rdev = dev->dev_private; 429 struct radeon_device *rdev = dev->dev_private;
453 uint32_t tmp; 430 uint32_t tmp;
454 431
455 tmp = RREG32(R400_GB_PIPE_SELECT); 432 tmp = RREG32(GB_PIPE_SELECT);
456 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); 433 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
457 tmp = RREG32(R500_SU_REG_DEST); 434 tmp = RREG32(SU_REG_DEST);
458 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); 435 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
459 tmp = RREG32(R300_GB_TILE_CONFIG); 436 tmp = RREG32(GB_TILE_CONFIG);
460 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); 437 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
461 tmp = RREG32(R300_DST_PIPE_CONFIG); 438 tmp = RREG32(DST_PIPE_CONFIG);
462 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); 439 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
463 return 0; 440 return 0;
464} 441}
@@ -509,9 +486,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
509/* 486/*
510 * Asic initialization 487 * Asic initialization
511 */ 488 */
512static const unsigned r500_reg_safe_bm[159] = { 489static const unsigned r500_reg_safe_bm[219] = {
490 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 491 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
515 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 492 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 493 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 494 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -549,14 +526,575 @@ static const unsigned r500_reg_safe_bm[159] = {
549 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 526 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
550 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 527 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
551 0x00000000, 0x00000000, 0x00000000, 0x00000000, 528 0x00000000, 0x00000000, 0x00000000, 0x00000000,
552 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 529 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
530 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
531 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
532 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
553}; 545};
554 546
555
556
557int rv515_init(struct radeon_device *rdev) 547int rv515_init(struct radeon_device *rdev)
558{ 548{
559 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; 549 rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
560 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); 550 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
561 return 0; 551 return 0;
562} 552}
553
554void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
555{
556
557 WREG32(0x659C, 0x0);
558 WREG32(0x6594, 0x705);
559 WREG32(0x65A4, 0x10001);
560 WREG32(0x65D8, 0x0);
561 WREG32(0x65B0, 0x0);
562 WREG32(0x65C0, 0x0);
563 WREG32(0x65D4, 0x0);
564 WREG32(0x6578, 0x0);
565 WREG32(0x657C, 0x841880A8);
566 WREG32(0x6578, 0x1);
567 WREG32(0x657C, 0x84208680);
568 WREG32(0x6578, 0x2);
569 WREG32(0x657C, 0xBFF880B0);
570 WREG32(0x6578, 0x100);
571 WREG32(0x657C, 0x83D88088);
572 WREG32(0x6578, 0x101);
573 WREG32(0x657C, 0x84608680);
574 WREG32(0x6578, 0x102);
575 WREG32(0x657C, 0xBFF080D0);
576 WREG32(0x6578, 0x200);
577 WREG32(0x657C, 0x83988068);
578 WREG32(0x6578, 0x201);
579 WREG32(0x657C, 0x84A08680);
580 WREG32(0x6578, 0x202);
581 WREG32(0x657C, 0xBFF080F8);
582 WREG32(0x6578, 0x300);
583 WREG32(0x657C, 0x83588058);
584 WREG32(0x6578, 0x301);
585 WREG32(0x657C, 0x84E08660);
586 WREG32(0x6578, 0x302);
587 WREG32(0x657C, 0xBFF88120);
588 WREG32(0x6578, 0x400);
589 WREG32(0x657C, 0x83188040);
590 WREG32(0x6578, 0x401);
591 WREG32(0x657C, 0x85008660);
592 WREG32(0x6578, 0x402);
593 WREG32(0x657C, 0xBFF88150);
594 WREG32(0x6578, 0x500);
595 WREG32(0x657C, 0x82D88030);
596 WREG32(0x6578, 0x501);
597 WREG32(0x657C, 0x85408640);
598 WREG32(0x6578, 0x502);
599 WREG32(0x657C, 0xBFF88180);
600 WREG32(0x6578, 0x600);
601 WREG32(0x657C, 0x82A08018);
602 WREG32(0x6578, 0x601);
603 WREG32(0x657C, 0x85808620);
604 WREG32(0x6578, 0x602);
605 WREG32(0x657C, 0xBFF081B8);
606 WREG32(0x6578, 0x700);
607 WREG32(0x657C, 0x82608010);
608 WREG32(0x6578, 0x701);
609 WREG32(0x657C, 0x85A08600);
610 WREG32(0x6578, 0x702);
611 WREG32(0x657C, 0x800081F0);
612 WREG32(0x6578, 0x800);
613 WREG32(0x657C, 0x8228BFF8);
614 WREG32(0x6578, 0x801);
615 WREG32(0x657C, 0x85E085E0);
616 WREG32(0x6578, 0x802);
617 WREG32(0x657C, 0xBFF88228);
618 WREG32(0x6578, 0x10000);
619 WREG32(0x657C, 0x82A8BF00);
620 WREG32(0x6578, 0x10001);
621 WREG32(0x657C, 0x82A08CC0);
622 WREG32(0x6578, 0x10002);
623 WREG32(0x657C, 0x8008BEF8);
624 WREG32(0x6578, 0x10100);
625 WREG32(0x657C, 0x81F0BF28);
626 WREG32(0x6578, 0x10101);
627 WREG32(0x657C, 0x83608CA0);
628 WREG32(0x6578, 0x10102);
629 WREG32(0x657C, 0x8018BED0);
630 WREG32(0x6578, 0x10200);
631 WREG32(0x657C, 0x8148BF38);
632 WREG32(0x6578, 0x10201);
633 WREG32(0x657C, 0x84408C80);
634 WREG32(0x6578, 0x10202);
635 WREG32(0x657C, 0x8008BEB8);
636 WREG32(0x6578, 0x10300);
637 WREG32(0x657C, 0x80B0BF78);
638 WREG32(0x6578, 0x10301);
639 WREG32(0x657C, 0x85008C20);
640 WREG32(0x6578, 0x10302);
641 WREG32(0x657C, 0x8020BEA0);
642 WREG32(0x6578, 0x10400);
643 WREG32(0x657C, 0x8028BF90);
644 WREG32(0x6578, 0x10401);
645 WREG32(0x657C, 0x85E08BC0);
646 WREG32(0x6578, 0x10402);
647 WREG32(0x657C, 0x8018BE90);
648 WREG32(0x6578, 0x10500);
649 WREG32(0x657C, 0xBFB8BFB0);
650 WREG32(0x6578, 0x10501);
651 WREG32(0x657C, 0x86C08B40);
652 WREG32(0x6578, 0x10502);
653 WREG32(0x657C, 0x8010BE90);
654 WREG32(0x6578, 0x10600);
655 WREG32(0x657C, 0xBF58BFC8);
656 WREG32(0x6578, 0x10601);
657 WREG32(0x657C, 0x87A08AA0);
658 WREG32(0x6578, 0x10602);
659 WREG32(0x657C, 0x8010BE98);
660 WREG32(0x6578, 0x10700);
661 WREG32(0x657C, 0xBF10BFF0);
662 WREG32(0x6578, 0x10701);
663 WREG32(0x657C, 0x886089E0);
664 WREG32(0x6578, 0x10702);
665 WREG32(0x657C, 0x8018BEB0);
666 WREG32(0x6578, 0x10800);
667 WREG32(0x657C, 0xBED8BFE8);
668 WREG32(0x6578, 0x10801);
669 WREG32(0x657C, 0x89408940);
670 WREG32(0x6578, 0x10802);
671 WREG32(0x657C, 0xBFE8BED8);
672 WREG32(0x6578, 0x20000);
673 WREG32(0x657C, 0x80008000);
674 WREG32(0x6578, 0x20001);
675 WREG32(0x657C, 0x90008000);
676 WREG32(0x6578, 0x20002);
677 WREG32(0x657C, 0x80008000);
678 WREG32(0x6578, 0x20003);
679 WREG32(0x657C, 0x80008000);
680 WREG32(0x6578, 0x20100);
681 WREG32(0x657C, 0x80108000);
682 WREG32(0x6578, 0x20101);
683 WREG32(0x657C, 0x8FE0BF70);
684 WREG32(0x6578, 0x20102);
685 WREG32(0x657C, 0xBFE880C0);
686 WREG32(0x6578, 0x20103);
687 WREG32(0x657C, 0x80008000);
688 WREG32(0x6578, 0x20200);
689 WREG32(0x657C, 0x8018BFF8);
690 WREG32(0x6578, 0x20201);
691 WREG32(0x657C, 0x8F80BF08);
692 WREG32(0x6578, 0x20202);
693 WREG32(0x657C, 0xBFD081A0);
694 WREG32(0x6578, 0x20203);
695 WREG32(0x657C, 0xBFF88000);
696 WREG32(0x6578, 0x20300);
697 WREG32(0x657C, 0x80188000);
698 WREG32(0x6578, 0x20301);
699 WREG32(0x657C, 0x8EE0BEC0);
700 WREG32(0x6578, 0x20302);
701 WREG32(0x657C, 0xBFB082A0);
702 WREG32(0x6578, 0x20303);
703 WREG32(0x657C, 0x80008000);
704 WREG32(0x6578, 0x20400);
705 WREG32(0x657C, 0x80188000);
706 WREG32(0x6578, 0x20401);
707 WREG32(0x657C, 0x8E00BEA0);
708 WREG32(0x6578, 0x20402);
709 WREG32(0x657C, 0xBF8883C0);
710 WREG32(0x6578, 0x20403);
711 WREG32(0x657C, 0x80008000);
712 WREG32(0x6578, 0x20500);
713 WREG32(0x657C, 0x80188000);
714 WREG32(0x6578, 0x20501);
715 WREG32(0x657C, 0x8D00BE90);
716 WREG32(0x6578, 0x20502);
717 WREG32(0x657C, 0xBF588500);
718 WREG32(0x6578, 0x20503);
719 WREG32(0x657C, 0x80008008);
720 WREG32(0x6578, 0x20600);
721 WREG32(0x657C, 0x80188000);
722 WREG32(0x6578, 0x20601);
723 WREG32(0x657C, 0x8BC0BE98);
724 WREG32(0x6578, 0x20602);
725 WREG32(0x657C, 0xBF308660);
726 WREG32(0x6578, 0x20603);
727 WREG32(0x657C, 0x80008008);
728 WREG32(0x6578, 0x20700);
729 WREG32(0x657C, 0x80108000);
730 WREG32(0x6578, 0x20701);
731 WREG32(0x657C, 0x8A80BEB0);
732 WREG32(0x6578, 0x20702);
733 WREG32(0x657C, 0xBF0087C0);
734 WREG32(0x6578, 0x20703);
735 WREG32(0x657C, 0x80008008);
736 WREG32(0x6578, 0x20800);
737 WREG32(0x657C, 0x80108000);
738 WREG32(0x6578, 0x20801);
739 WREG32(0x657C, 0x8920BED0);
740 WREG32(0x6578, 0x20802);
741 WREG32(0x657C, 0xBED08920);
742 WREG32(0x6578, 0x20803);
743 WREG32(0x657C, 0x80008010);
744 WREG32(0x6578, 0x30000);
745 WREG32(0x657C, 0x90008000);
746 WREG32(0x6578, 0x30001);
747 WREG32(0x657C, 0x80008000);
748 WREG32(0x6578, 0x30100);
749 WREG32(0x657C, 0x8FE0BF90);
750 WREG32(0x6578, 0x30101);
751 WREG32(0x657C, 0xBFF880A0);
752 WREG32(0x6578, 0x30200);
753 WREG32(0x657C, 0x8F60BF40);
754 WREG32(0x6578, 0x30201);
755 WREG32(0x657C, 0xBFE88180);
756 WREG32(0x6578, 0x30300);
757 WREG32(0x657C, 0x8EC0BF00);
758 WREG32(0x6578, 0x30301);
759 WREG32(0x657C, 0xBFC88280);
760 WREG32(0x6578, 0x30400);
761 WREG32(0x657C, 0x8DE0BEE0);
762 WREG32(0x6578, 0x30401);
763 WREG32(0x657C, 0xBFA083A0);
764 WREG32(0x6578, 0x30500);
765 WREG32(0x657C, 0x8CE0BED0);
766 WREG32(0x6578, 0x30501);
767 WREG32(0x657C, 0xBF7884E0);
768 WREG32(0x6578, 0x30600);
769 WREG32(0x657C, 0x8BA0BED8);
770 WREG32(0x6578, 0x30601);
771 WREG32(0x657C, 0xBF508640);
772 WREG32(0x6578, 0x30700);
773 WREG32(0x657C, 0x8A60BEE8);
774 WREG32(0x6578, 0x30701);
775 WREG32(0x657C, 0xBF2087A0);
776 WREG32(0x6578, 0x30800);
777 WREG32(0x657C, 0x8900BF00);
778 WREG32(0x6578, 0x30801);
779 WREG32(0x657C, 0xBF008900);
780}
781
782struct rv515_watermark {
783 u32 lb_request_fifo_depth;
784 fixed20_12 num_line_pair;
785 fixed20_12 estimated_width;
786 fixed20_12 worst_case_latency;
787 fixed20_12 consumption_rate;
788 fixed20_12 active_time;
789 fixed20_12 dbpp;
790 fixed20_12 priority_mark_max;
791 fixed20_12 priority_mark;
792 fixed20_12 sclk;
793};
794
795void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
796 struct radeon_crtc *crtc,
797 struct rv515_watermark *wm)
798{
799 struct drm_display_mode *mode = &crtc->base.mode;
800 fixed20_12 a, b, c;
801 fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
802 fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
803
804 if (!crtc->base.enabled) {
805 /* FIXME: wouldn't it better to set priority mark to maximum */
806 wm->lb_request_fifo_depth = 4;
807 return;
808 }
809
810 if (crtc->vsc.full > rfixed_const(2))
811 wm->num_line_pair.full = rfixed_const(2);
812 else
813 wm->num_line_pair.full = rfixed_const(1);
814
815 b.full = rfixed_const(mode->crtc_hdisplay);
816 c.full = rfixed_const(256);
817 a.full = rfixed_mul(wm->num_line_pair, b);
818 request_fifo_depth.full = rfixed_div(a, c);
819 if (a.full < rfixed_const(4)) {
820 wm->lb_request_fifo_depth = 4;
821 } else {
822 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
823 }
824
825 /* Determine consumption rate
826 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
827 * vtaps = number of vertical taps,
828 * vsc = vertical scaling ratio, defined as source/destination
829 * hsc = horizontal scaling ration, defined as source/destination
830 */
831 a.full = rfixed_const(mode->clock);
832 b.full = rfixed_const(1000);
833 a.full = rfixed_div(a, b);
834 pclk.full = rfixed_div(b, a);
835 if (crtc->rmx_type != RMX_OFF) {
836 b.full = rfixed_const(2);
837 if (crtc->vsc.full > b.full)
838 b.full = crtc->vsc.full;
839 b.full = rfixed_mul(b, crtc->hsc);
840 c.full = rfixed_const(2);
841 b.full = rfixed_div(b, c);
842 consumption_time.full = rfixed_div(pclk, b);
843 } else {
844 consumption_time.full = pclk.full;
845 }
846 a.full = rfixed_const(1);
847 wm->consumption_rate.full = rfixed_div(a, consumption_time);
848
849
850 /* Determine line time
851 * LineTime = total time for one line of displayhtotal
852 * LineTime = total number of horizontal pixels
853 * pclk = pixel clock period(ns)
854 */
855 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
856 line_time.full = rfixed_mul(a, pclk);
857
858 /* Determine active time
859 * ActiveTime = time of active region of display within one line,
860 * hactive = total number of horizontal active pixels
861 * htotal = total number of horizontal pixels
862 */
863 a.full = rfixed_const(crtc->base.mode.crtc_htotal);
864 b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
865 wm->active_time.full = rfixed_mul(line_time, b);
866 wm->active_time.full = rfixed_div(wm->active_time, a);
867
868 /* Determine chunk time
869 * ChunkTime = the time it takes the DCP to send one chunk of data
870 * to the LB which consists of pipeline delay and inter chunk gap
871 * sclk = system clock(Mhz)
872 */
873 a.full = rfixed_const(600 * 1000);
874 chunk_time.full = rfixed_div(a, rdev->pm.sclk);
875 read_delay_latency.full = rfixed_const(1000);
876
877 /* Determine the worst case latency
878 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
879 * WorstCaseLatency = worst case time from urgent to when the MC starts
880 * to return data
881 * READ_DELAY_IDLE_MAX = constant of 1us
882 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
883 * which consists of pipeline delay and inter chunk gap
884 */
885 if (rfixed_trunc(wm->num_line_pair) > 1) {
886 a.full = rfixed_const(3);
887 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
888 wm->worst_case_latency.full += read_delay_latency.full;
889 } else {
890 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
891 }
892
893 /* Determine the tolerable latency
894 * TolerableLatency = Any given request has only 1 line time
895 * for the data to be returned
896 * LBRequestFifoDepth = Number of chunk requests the LB can
897 * put into the request FIFO for a display
898 * LineTime = total time for one line of display
899 * ChunkTime = the time it takes the DCP to send one chunk
900 * of data to the LB which consists of
901 * pipeline delay and inter chunk gap
902 */
903 if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
904 tolerable_latency.full = line_time.full;
905 } else {
906 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
907 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
908 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
909 tolerable_latency.full = line_time.full - tolerable_latency.full;
910 }
911 /* We assume worst case 32bits (4 bytes) */
912 wm->dbpp.full = rfixed_const(2 * 16);
913
914 /* Determine the maximum priority mark
915 * width = viewport width in pixels
916 */
917 a.full = rfixed_const(16);
918 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
919 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
920
921 /* Determine estimated width */
922 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
923 estimated_width.full = rfixed_div(estimated_width, consumption_time);
924 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
925 wm->priority_mark.full = rfixed_const(10);
926 } else {
927 a.full = rfixed_const(16);
928 wm->priority_mark.full = rfixed_div(estimated_width, a);
929 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
930 }
931}
932
933void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
934{
935 struct drm_display_mode *mode0 = NULL;
936 struct drm_display_mode *mode1 = NULL;
937 struct rv515_watermark wm0;
938 struct rv515_watermark wm1;
939 u32 tmp;
940 fixed20_12 priority_mark02, priority_mark12, fill_rate;
941 fixed20_12 a, b;
942
943 if (rdev->mode_info.crtcs[0]->base.enabled)
944 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
945 if (rdev->mode_info.crtcs[1]->base.enabled)
946 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
947 rs690_line_buffer_adjust(rdev, mode0, mode1);
948
949 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
950 rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
951
952 tmp = wm0.lb_request_fifo_depth;
953 tmp |= wm1.lb_request_fifo_depth << 16;
954 WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
955
956 if (mode0 && mode1) {
957 if (rfixed_trunc(wm0.dbpp) > 64)
958 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
959 else
960 a.full = wm0.num_line_pair.full;
961 if (rfixed_trunc(wm1.dbpp) > 64)
962 b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
963 else
964 b.full = wm1.num_line_pair.full;
965 a.full += b.full;
966 fill_rate.full = rfixed_div(wm0.sclk, a);
967 if (wm0.consumption_rate.full > fill_rate.full) {
968 b.full = wm0.consumption_rate.full - fill_rate.full;
969 b.full = rfixed_mul(b, wm0.active_time);
970 a.full = rfixed_const(16);
971 b.full = rfixed_div(b, a);
972 a.full = rfixed_mul(wm0.worst_case_latency,
973 wm0.consumption_rate);
974 priority_mark02.full = a.full + b.full;
975 } else {
976 a.full = rfixed_mul(wm0.worst_case_latency,
977 wm0.consumption_rate);
978 b.full = rfixed_const(16 * 1000);
979 priority_mark02.full = rfixed_div(a, b);
980 }
981 if (wm1.consumption_rate.full > fill_rate.full) {
982 b.full = wm1.consumption_rate.full - fill_rate.full;
983 b.full = rfixed_mul(b, wm1.active_time);
984 a.full = rfixed_const(16);
985 b.full = rfixed_div(b, a);
986 a.full = rfixed_mul(wm1.worst_case_latency,
987 wm1.consumption_rate);
988 priority_mark12.full = a.full + b.full;
989 } else {
990 a.full = rfixed_mul(wm1.worst_case_latency,
991 wm1.consumption_rate);
992 b.full = rfixed_const(16 * 1000);
993 priority_mark12.full = rfixed_div(a, b);
994 }
995 if (wm0.priority_mark.full > priority_mark02.full)
996 priority_mark02.full = wm0.priority_mark.full;
997 if (rfixed_trunc(priority_mark02) < 0)
998 priority_mark02.full = 0;
999 if (wm0.priority_mark_max.full > priority_mark02.full)
1000 priority_mark02.full = wm0.priority_mark_max.full;
1001 if (wm1.priority_mark.full > priority_mark12.full)
1002 priority_mark12.full = wm1.priority_mark.full;
1003 if (rfixed_trunc(priority_mark12) < 0)
1004 priority_mark12.full = 0;
1005 if (wm1.priority_mark_max.full > priority_mark12.full)
1006 priority_mark12.full = wm1.priority_mark_max.full;
1007 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1008 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1009 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1010 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1011 } else if (mode0) {
1012 if (rfixed_trunc(wm0.dbpp) > 64)
1013 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
1014 else
1015 a.full = wm0.num_line_pair.full;
1016 fill_rate.full = rfixed_div(wm0.sclk, a);
1017 if (wm0.consumption_rate.full > fill_rate.full) {
1018 b.full = wm0.consumption_rate.full - fill_rate.full;
1019 b.full = rfixed_mul(b, wm0.active_time);
1020 a.full = rfixed_const(16);
1021 b.full = rfixed_div(b, a);
1022 a.full = rfixed_mul(wm0.worst_case_latency,
1023 wm0.consumption_rate);
1024 priority_mark02.full = a.full + b.full;
1025 } else {
1026 a.full = rfixed_mul(wm0.worst_case_latency,
1027 wm0.consumption_rate);
1028 b.full = rfixed_const(16);
1029 priority_mark02.full = rfixed_div(a, b);
1030 }
1031 if (wm0.priority_mark.full > priority_mark02.full)
1032 priority_mark02.full = wm0.priority_mark.full;
1033 if (rfixed_trunc(priority_mark02) < 0)
1034 priority_mark02.full = 0;
1035 if (wm0.priority_mark_max.full > priority_mark02.full)
1036 priority_mark02.full = wm0.priority_mark_max.full;
1037 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1038 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1039 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1040 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1041 } else {
1042 if (rfixed_trunc(wm1.dbpp) > 64)
1043 a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1044 else
1045 a.full = wm1.num_line_pair.full;
1046 fill_rate.full = rfixed_div(wm1.sclk, a);
1047 if (wm1.consumption_rate.full > fill_rate.full) {
1048 b.full = wm1.consumption_rate.full - fill_rate.full;
1049 b.full = rfixed_mul(b, wm1.active_time);
1050 a.full = rfixed_const(16);
1051 b.full = rfixed_div(b, a);
1052 a.full = rfixed_mul(wm1.worst_case_latency,
1053 wm1.consumption_rate);
1054 priority_mark12.full = a.full + b.full;
1055 } else {
1056 a.full = rfixed_mul(wm1.worst_case_latency,
1057 wm1.consumption_rate);
1058 b.full = rfixed_const(16 * 1000);
1059 priority_mark12.full = rfixed_div(a, b);
1060 }
1061 if (wm1.priority_mark.full > priority_mark12.full)
1062 priority_mark12.full = wm1.priority_mark.full;
1063 if (rfixed_trunc(priority_mark12) < 0)
1064 priority_mark12.full = 0;
1065 if (wm1.priority_mark_max.full > priority_mark12.full)
1066 priority_mark12.full = wm1.priority_mark_max.full;
1067 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1068 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1069 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1070 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1071 }
1072}
1073
1074void rv515_bandwidth_update(struct radeon_device *rdev)
1075{
1076 uint32_t tmp;
1077 struct drm_display_mode *mode0 = NULL;
1078 struct drm_display_mode *mode1 = NULL;
1079
1080 if (rdev->mode_info.crtcs[0]->base.enabled)
1081 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1082 if (rdev->mode_info.crtcs[1]->base.enabled)
1083 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1084 /*
1085 * Set display0/1 priority up in the memory controller for
1086 * modes if the user specifies HIGH for displaypriority
1087 * option.
1088 */
1089 if (rdev->disp_priority == 2) {
1090 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1091 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1092 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1093 if (mode1)
1094 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1095 if (mode0)
1096 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1097 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1098 }
1099 rv515_bandwidth_avivo_update(rdev);
1100}
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h
new file mode 100644
index 000000000000..f3cf84039906
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515r.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RV515R_H
29#define RV515R_H
30
31/* RV515 registers */
32#define PCIE_INDEX 0x0030
33#define PCIE_DATA 0x0034
34#define MC_IND_INDEX 0x0070
35#define MC_IND_WR_EN (1 << 24)
36#define MC_IND_DATA 0x0074
37#define RBBM_SOFT_RESET 0x00F0
38#define CONFIG_MEMSIZE 0x00F8
39#define HDP_FB_LOCATION 0x0134
40#define CP_CSQ_CNTL 0x0740
41#define CP_CSQ_MODE 0x0744
42#define CP_CSQ_ADDR 0x07F0
43#define CP_CSQ_DATA 0x07F4
44#define CP_CSQ_STAT 0x07F8
45#define CP_CSQ2_STAT 0x07FC
46#define RBBM_STATUS 0x0E40
47#define DST_PIPE_CONFIG 0x170C
48#define WAIT_UNTIL 0x1720
49#define WAIT_2D_IDLE (1 << 14)
50#define WAIT_3D_IDLE (1 << 15)
51#define WAIT_2D_IDLECLEAN (1 << 16)
52#define WAIT_3D_IDLECLEAN (1 << 17)
53#define ISYNC_CNTL 0x1724
54#define ISYNC_ANY2D_IDLE3D (1 << 0)
55#define ISYNC_ANY3D_IDLE2D (1 << 1)
56#define ISYNC_TRIG2D_IDLE3D (1 << 2)
57#define ISYNC_TRIG3D_IDLE2D (1 << 3)
58#define ISYNC_WAIT_IDLEGUI (1 << 4)
59#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
60#define VAP_INDEX_OFFSET 0x208C
61#define VAP_PVS_STATE_FLUSH_REG 0x2284
62#define GB_ENABLE 0x4008
63#define GB_MSPOS0 0x4010
64#define MS_X0_SHIFT 0
65#define MS_Y0_SHIFT 4
66#define MS_X1_SHIFT 8
67#define MS_Y1_SHIFT 12
68#define MS_X2_SHIFT 16
69#define MS_Y2_SHIFT 20
70#define MSBD0_Y_SHIFT 24
71#define MSBD0_X_SHIFT 28
72#define GB_MSPOS1 0x4014
73#define MS_X3_SHIFT 0
74#define MS_Y3_SHIFT 4
75#define MS_X4_SHIFT 8
76#define MS_Y4_SHIFT 12
77#define MS_X5_SHIFT 16
78#define MS_Y5_SHIFT 20
79#define MSBD1_SHIFT 24
80#define GB_TILE_CONFIG 0x4018
81#define ENABLE_TILING (1 << 0)
82#define PIPE_COUNT_MASK 0x0000000E
83#define PIPE_COUNT_SHIFT 1
84#define TILE_SIZE_8 (0 << 4)
85#define TILE_SIZE_16 (1 << 4)
86#define TILE_SIZE_32 (2 << 4)
87#define SUBPIXEL_1_12 (0 << 16)
88#define SUBPIXEL_1_16 (1 << 16)
89#define GB_SELECT 0x401C
90#define GB_AA_CONFIG 0x4020
91#define GB_PIPE_SELECT 0x402C
92#define GA_ENHANCE 0x4274
93#define GA_DEADLOCK_CNTL (1 << 0)
94#define GA_FASTSYNC_CNTL (1 << 1)
95#define GA_POLY_MODE 0x4288
96#define FRONT_PTYPE_POINT (0 << 4)
97#define FRONT_PTYPE_LINE (1 << 4)
98#define FRONT_PTYPE_TRIANGE (2 << 4)
99#define BACK_PTYPE_POINT (0 << 7)
100#define BACK_PTYPE_LINE (1 << 7)
101#define BACK_PTYPE_TRIANGE (2 << 7)
102#define GA_ROUND_MODE 0x428C
103#define GEOMETRY_ROUND_TRUNC (0 << 0)
104#define GEOMETRY_ROUND_NEAREST (1 << 0)
105#define COLOR_ROUND_TRUNC (0 << 2)
106#define COLOR_ROUND_NEAREST (1 << 2)
107#define SU_REG_DEST 0x42C8
108#define RB3D_DSTCACHE_CTLSTAT 0x4E4C
109#define RB3D_DC_FLUSH (2 << 0)
110#define RB3D_DC_FREE (2 << 2)
111#define RB3D_DC_FINISH (1 << 4)
112#define ZB_ZCACHE_CTLSTAT 0x4F18
113#define ZC_FLUSH (1 << 0)
114#define ZC_FREE (1 << 1)
115#define DC_LB_MEMORY_SPLIT 0x6520
116#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
117#define DC_LB_MEMORY_SPLIT_SHIFT 0
118#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
119#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
120#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
121#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
122#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
123#define DC_LB_DISP1_END_ADR_SHIFT 4
124#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
125#define D1MODE_PRIORITY_A_CNT 0x6548
126#define MODE_PRIORITY_MARK_MASK 0x00007FFF
127#define MODE_PRIORITY_OFF (1 << 16)
128#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
129#define MODE_PRIORITY_FORCE_MASK (1 << 24)
130#define D1MODE_PRIORITY_B_CNT 0x654C
131#define LB_MAX_REQ_OUTSTANDING 0x6D58
132#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
133#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
134#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
135#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
136#define D2MODE_PRIORITY_A_CNT 0x6D48
137#define D2MODE_PRIORITY_B_CNT 0x6D4C
138
139/* ix[MC] registers */
140#define MC_FB_LOCATION 0x01
141#define MC_FB_START_MASK 0x0000FFFF
142#define MC_FB_START_SHIFT 0
143#define MC_FB_TOP_MASK 0xFFFF0000
144#define MC_FB_TOP_SHIFT 16
145#define MC_AGP_LOCATION 0x02
146#define MC_AGP_START_MASK 0x0000FFFF
147#define MC_AGP_START_SHIFT 0
148#define MC_AGP_TOP_MASK 0xFFFF0000
149#define MC_AGP_TOP_SHIFT 16
150#define MC_AGP_BASE 0x03
151#define MC_AGP_BASE_2 0x04
152#define MC_CNTL 0x5
153#define MEM_NUM_CHANNELS_MASK 0x00000003
154#define MC_STATUS 0x08
155#define MC_STATUS_IDLE (1 << 4)
156#define MC_MISC_LAT_TIMER 0x09
157#define MC_CPR_INIT_LAT_MASK 0x0000000F
158#define MC_VF_INIT_LAT_MASK 0x000000F0
159#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
160#define MC_DISP0R_INIT_LAT_SHIFT 8
161#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
162#define MC_DISP1R_INIT_LAT_SHIFT 12
163#define MC_FIXED_INIT_LAT_MASK 0x000F0000
164#define MC_E2R_INIT_LAT_MASK 0x00F00000
165#define SAME_PAGE_PRIO_MASK 0x0F000000
166#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
167
168
169#endif
170
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da50cc51ede3..21d8ffd57308 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev)
67 "programming pipes. Bad things might happen.\n"); 67 "programming pipes. Bad things might happen.\n");
68 } 68 }
69 69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; 70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); 71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); 72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R700_MC_VM_FB_LOCATION, tmp); 73 WREG32(R700_MC_VM_FB_LOCATION, tmp);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c1c407f7cca3..6538d4236989 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -43,7 +43,6 @@
43#define TTM_BO_HASH_ORDER 13 43#define TTM_BO_HASH_ORDER 13
44 44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 46static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 47
49static inline uint32_t ttm_bo_type_flags(unsigned type) 48static inline uint32_t ttm_bo_type_flags(unsigned type)
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
224 TTM_ASSERT_LOCKED(&bo->mutex); 223 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL; 224 bo->ttm = NULL;
226 225
226 if (bdev->need_dma32)
227 page_flags |= TTM_PAGE_FLAG_DMA32;
228
227 switch (bo->type) { 229 switch (bo->type) {
228 case ttm_bo_type_device: 230 case ttm_bo_type_device:
229 if (zero_alloc) 231 if (zero_alloc)
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
304 306
305 } 307 }
306 308
309 if (bdev->driver->move_notify)
310 bdev->driver->move_notify(bo, mem);
311
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 312 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 313 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 314 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
@@ -655,31 +660,52 @@ retry_pre_get:
655 return 0; 660 return 0;
656} 661}
657 662
663static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664 uint32_t cur_placement,
665 uint32_t proposed_placement)
666{
667 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
669
670 /**
671 * Keep current caching if possible.
672 */
673
674 if ((cur_placement & caching) != 0)
675 result |= (cur_placement & caching);
676 else if ((man->default_caching & caching) != 0)
677 result |= man->default_caching;
678 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679 result |= TTM_PL_FLAG_CACHED;
680 else if ((TTM_PL_FLAG_WC & caching) != 0)
681 result |= TTM_PL_FLAG_WC;
682 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683 result |= TTM_PL_FLAG_UNCACHED;
684
685 return result;
686}
687
688
658static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 689static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
659 bool disallow_fixed, 690 bool disallow_fixed,
660 uint32_t mem_type, 691 uint32_t mem_type,
661 uint32_t mask, uint32_t *res_mask) 692 uint32_t proposed_placement,
693 uint32_t *masked_placement)
662{ 694{
663 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 695 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
664 696
665 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) 697 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
666 return false; 698 return false;
667 699
668 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) 700 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
669 return false; 701 return false;
670 702
671 if ((mask & man->available_caching) == 0) 703 if ((proposed_placement & man->available_caching) == 0)
672 return false; 704 return false;
673 if (mask & man->default_caching)
674 cur_flags |= man->default_caching;
675 else if (mask & TTM_PL_FLAG_CACHED)
676 cur_flags |= TTM_PL_FLAG_CACHED;
677 else if (mask & TTM_PL_FLAG_WC)
678 cur_flags |= TTM_PL_FLAG_WC;
679 else
680 cur_flags |= TTM_PL_FLAG_UNCACHED;
681 705
682 *res_mask = cur_flags; 706 cur_flags |= (proposed_placement & man->available_caching);
707
708 *masked_placement = cur_flags;
683 return true; 709 return true;
684} 710}
685 711
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
723 if (!type_ok) 749 if (!type_ok)
724 continue; 750 continue;
725 751
752 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
753 cur_flags);
754
726 if (mem_type == TTM_PL_SYSTEM) 755 if (mem_type == TTM_PL_SYSTEM)
727 break; 756 break;
728 757
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
779 proposed_placement, &cur_flags)) 808 proposed_placement, &cur_flags))
780 continue; 809 continue;
781 810
811 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
812 cur_flags);
813
782 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 814 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
783 interruptible, no_wait); 815 interruptible, no_wait);
784 816
@@ -1305,7 +1337,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
1305 1337
1306int ttm_bo_device_init(struct ttm_bo_device *bdev, 1338int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob, 1339 struct ttm_mem_global *mem_glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset) 1340 struct ttm_bo_driver *driver, uint64_t file_page_offset,
1341 bool need_dma32)
1309{ 1342{
1310 int ret = -EINVAL; 1343 int ret = -EINVAL;
1311 1344
@@ -1342,6 +1375,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1342 INIT_LIST_HEAD(&bdev->ddestroy); 1375 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru); 1376 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL; 1377 bdev->dev_mapping = NULL;
1378 bdev->need_dma32 = need_dma32;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); 1379 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); 1380 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) { 1381 if (unlikely(ret != 0)) {
@@ -1419,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1419 1453
1420 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1454 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1421} 1455}
1456EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1422 1457
1423static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1458static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1424{ 1459{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bdec583901eb..ce2e6f38ea01 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
136} 136}
137 137
138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
139 unsigned long page) 139 unsigned long page,
140 pgprot_t prot)
140{ 141{
141 struct page *d = ttm_tt_get_page(ttm, page); 142 struct page *d = ttm_tt_get_page(ttm, page);
142 void *dst; 143 void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
148 dst = kmap(d); 149
150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else
153 if (prot != PAGE_KERNEL)
154 dst = vmap(&d, 1, 0, prot);
155 else
156 dst = kmap(d);
157#endif
149 if (!dst) 158 if (!dst)
150 return -ENOMEM; 159 return -ENOMEM;
151 160
152 memcpy_fromio(dst, src, PAGE_SIZE); 161 memcpy_fromio(dst, src, PAGE_SIZE);
153 kunmap(d); 162
163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0);
165#else
166 if (prot != PAGE_KERNEL)
167 vunmap(dst);
168 else
169 kunmap(d);
170#endif
171
154 return 0; 172 return 0;
155} 173}
156 174
157static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 175static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
158 unsigned long page) 176 unsigned long page,
177 pgprot_t prot)
159{ 178{
160 struct page *s = ttm_tt_get_page(ttm, page); 179 struct page *s = ttm_tt_get_page(ttm, page);
161 void *src; 180 void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
164 return -ENOMEM; 183 return -ENOMEM;
165 184
166 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
167 src = kmap(s); 186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else
189 if (prot != PAGE_KERNEL)
190 src = vmap(&s, 1, 0, prot);
191 else
192 src = kmap(s);
193#endif
168 if (!src) 194 if (!src)
169 return -ENOMEM; 195 return -ENOMEM;
170 196
171 memcpy_toio(dst, src, PAGE_SIZE); 197 memcpy_toio(dst, src, PAGE_SIZE);
172 kunmap(s); 198
199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0);
201#else
202 if (prot != PAGE_KERNEL)
203 vunmap(src);
204 else
205 kunmap(s);
206#endif
207
173 return 0; 208 return 0;
174} 209}
175 210
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
214 249
215 for (i = 0; i < new_mem->num_pages; ++i) { 250 for (i = 0; i < new_mem->num_pages; ++i) {
216 page = i * dir + add; 251 page = i * dir + add;
217 if (old_iomap == NULL) 252 if (old_iomap == NULL) {
218 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); 253 pgprot_t prot = ttm_io_prot(old_mem->placement,
219 else if (new_iomap == NULL) 254 PAGE_KERNEL);
220 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); 255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
221 else 256 prot);
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
259 PAGE_KERNEL);
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261 prot);
262 } else
222 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
223 if (ret) 264 if (ret)
224 goto out1; 265 goto out1;
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
509 if (evict) { 550 if (evict) {
510 ret = ttm_bo_wait(bo, false, false, false); 551 ret = ttm_bo_wait(bo, false, false, false);
511 spin_unlock(&bo->lock); 552 spin_unlock(&bo->lock);
512 driver->sync_obj_unref(&bo->sync_obj); 553 if (tmp_obj)
513 554 driver->sync_obj_unref(&tmp_obj);
514 if (ret) 555 if (ret)
515 return ret; 556 return ret;
516 557
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
532 573
533 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 574 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
534 spin_unlock(&bo->lock); 575 spin_unlock(&bo->lock);
576 if (tmp_obj)
577 driver->sync_obj_unref(&tmp_obj);
535 578
536 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 579 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
537 if (ret) 580 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe949a12fe40..33de7637c0c6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 101 return VM_FAULT_NOPAGE;
102 } 102 }
103 103
104 if (bdev->driver->fault_reserve_notify)
105 bdev->driver->fault_reserve_notify(bo);
106
104 /* 107 /*
105 * Wait for buffer data in transit, due to a pipelined 108 * Wait for buffer data in transit, due to a pipelined
106 * move. 109 * move.
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75dc8bd24592..b8b6c4a5f983 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
86 unsigned long i; 86 unsigned long i;
87 87
88 for (i = 0; i < num_pages; ++i) { 88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) { 89 struct page *page = pages[i];
90 unsigned long start = (unsigned long)page_address(pages[i]); 90 void *page_virtual;
91 flush_dcache_range(start, start + PAGE_SIZE); 91
92 } 92 if (unlikely(page == NULL))
93 continue;
94
95 page_virtual = kmap_atomic(page, KM_USER0);
96 flush_dcache_range((unsigned long) page_virtual,
97 (unsigned long) page_virtual + PAGE_SIZE);
98 kunmap_atomic(page_virtual, KM_USER0);
93 } 99 }
94#else 100#else
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
131 137
132static struct page *ttm_tt_alloc_page(unsigned page_flags) 138static struct page *ttm_tt_alloc_page(unsigned page_flags)
133{ 139{
140 gfp_t gfp_flags = GFP_USER;
141
134 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 142 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
135 return alloc_page(GFP_HIGHUSER | __GFP_ZERO); 143 gfp_flags |= __GFP_ZERO;
144
145 if (page_flags & TTM_PAGE_FLAG_DMA32)
146 gfp_flags |= __GFP_DMA32;
147 else
148 gfp_flags |= __GFP_HIGHMEM;
136 149
137 return alloc_page(GFP_HIGHUSER); 150 return alloc_page(gfp_flags);
138} 151}
139 152
140static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 153static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fdd83277c8a8..d258b02aef44 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -672,9 +672,10 @@ omap_i2c_isr(int this_irq, void *dev_id)
672 break; 672 break;
673 } 673 }
674 674
675 err = 0;
676complete:
675 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); 677 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
676 678
677 err = 0;
678 if (stat & OMAP_I2C_STAT_NACK) { 679 if (stat & OMAP_I2C_STAT_NACK) {
679 err |= OMAP_I2C_STAT_NACK; 680 err |= OMAP_I2C_STAT_NACK;
680 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 681 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
@@ -685,16 +686,19 @@ omap_i2c_isr(int this_irq, void *dev_id)
685 err |= OMAP_I2C_STAT_AL; 686 err |= OMAP_I2C_STAT_AL;
686 } 687 }
687 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 688 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
688 OMAP_I2C_STAT_AL)) 689 OMAP_I2C_STAT_AL)) {
689 omap_i2c_complete_cmd(dev, err); 690 omap_i2c_complete_cmd(dev, err);
691 return IRQ_HANDLED;
692 }
690 if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) { 693 if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
691 u8 num_bytes = 1; 694 u8 num_bytes = 1;
692 if (dev->fifo_size) { 695 if (dev->fifo_size) {
693 if (stat & OMAP_I2C_STAT_RRDY) 696 if (stat & OMAP_I2C_STAT_RRDY)
694 num_bytes = dev->fifo_size; 697 num_bytes = dev->fifo_size;
695 else 698 else /* read RXSTAT on RDR interrupt */
696 num_bytes = omap_i2c_read_reg(dev, 699 num_bytes = (omap_i2c_read_reg(dev,
697 OMAP_I2C_BUFSTAT_REG); 700 OMAP_I2C_BUFSTAT_REG)
701 >> 8) & 0x3F;
698 } 702 }
699 while (num_bytes) { 703 while (num_bytes) {
700 num_bytes--; 704 num_bytes--;
@@ -731,9 +735,10 @@ omap_i2c_isr(int this_irq, void *dev_id)
731 if (dev->fifo_size) { 735 if (dev->fifo_size) {
732 if (stat & OMAP_I2C_STAT_XRDY) 736 if (stat & OMAP_I2C_STAT_XRDY)
733 num_bytes = dev->fifo_size; 737 num_bytes = dev->fifo_size;
734 else 738 else /* read TXSTAT on XDR interrupt */
735 num_bytes = omap_i2c_read_reg(dev, 739 num_bytes = omap_i2c_read_reg(dev,
736 OMAP_I2C_BUFSTAT_REG); 740 OMAP_I2C_BUFSTAT_REG)
741 & 0x3F;
737 } 742 }
738 while (num_bytes) { 743 while (num_bytes) {
739 num_bytes--; 744 num_bytes--;
@@ -760,6 +765,27 @@ omap_i2c_isr(int this_irq, void *dev_id)
760 "data to send\n"); 765 "data to send\n");
761 break; 766 break;
762 } 767 }
768
769 /*
770 * OMAP3430 Errata 1.153: When an XRDY/XDR
771 * is hit, wait for XUDF before writing data
772 * to DATA_REG. Otherwise some data bytes can
773 * be lost while transferring them from the
774 * memory to the I2C interface.
775 */
776
777 if (cpu_is_omap34xx()) {
778 while (!(stat & OMAP_I2C_STAT_XUDF)) {
779 if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
780 omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
781 err |= OMAP_I2C_STAT_XUDF;
782 goto complete;
783 }
784 cpu_relax();
785 stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
786 }
787 }
788
763 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); 789 omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
764 } 790 }
765 omap_i2c_ack_stat(dev, 791 omap_i2c_ack_stat(dev,
@@ -879,7 +905,7 @@ omap_i2c_probe(struct platform_device *pdev)
879 i2c_set_adapdata(adap, dev); 905 i2c_set_adapdata(adap, dev);
880 adap->owner = THIS_MODULE; 906 adap->owner = THIS_MODULE;
881 adap->class = I2C_CLASS_HWMON; 907 adap->class = I2C_CLASS_HWMON;
882 strncpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); 908 strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
883 adap->algo = &omap_i2c_algo; 909 adap->algo = &omap_i2c_algo;
884 adap->dev.parent = &pdev->dev; 910 adap->dev.parent = &pdev->dev;
885 911
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 3be0d6a4d630..96aafb91b69a 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -763,11 +763,6 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
763 dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq); 763 dev_info(i2c->dev, "bus frequency set to %d KHz\n", freq);
764 dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon); 764 dev_dbg(i2c->dev, "S3C2410_IICCON=0x%02lx\n", iicon);
765 765
766 /* check for s3c2440 i2c controller */
767
768 if (s3c24xx_i2c_is2440(i2c))
769 writel(0x0, i2c->regs + S3C2440_IICLC);
770
771 return 0; 766 return 0;
772} 767}
773 768
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 4f3d99cd1692..820487d0d5c7 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -637,7 +637,7 @@ static void __exit sh_mobile_i2c_adap_exit(void)
637 platform_driver_unregister(&sh_mobile_i2c_driver); 637 platform_driver_unregister(&sh_mobile_i2c_driver);
638} 638}
639 639
640module_init(sh_mobile_i2c_adap_init); 640subsys_initcall(sh_mobile_i2c_adap_init);
641module_exit(sh_mobile_i2c_adap_exit); 641module_exit(sh_mobile_i2c_adap_exit);
642 642
643MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); 643MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver");
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b34cb5f79eea..2e535a0ccd5e 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -173,6 +173,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
173 unsigned segment; 173 unsigned segment;
174 unsigned offset = (unsigned) off; 174 unsigned offset = (unsigned) off;
175 u8 *cp = bounce + 1; 175 u8 *cp = bounce + 1;
176 int sr;
176 177
177 *cp = AT25_WREN; 178 *cp = AT25_WREN;
178 status = spi_write(at25->spi, cp, 1); 179 status = spi_write(at25->spi, cp, 1);
@@ -214,7 +215,6 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
214 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT); 215 timeout = jiffies + msecs_to_jiffies(EE_TIMEOUT);
215 retries = 0; 216 retries = 0;
216 do { 217 do {
217 int sr;
218 218
219 sr = spi_w8r8(at25->spi, AT25_RDSR); 219 sr = spi_w8r8(at25->spi, AT25_RDSR);
220 if (sr < 0 || (sr & AT25_SR_nRDY)) { 220 if (sr < 0 || (sr & AT25_SR_nRDY)) {
@@ -228,7 +228,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
228 break; 228 break;
229 } while (retries++ < 3 || time_before_eq(jiffies, timeout)); 229 } while (retries++ < 3 || time_before_eq(jiffies, timeout));
230 230
231 if (time_after(jiffies, timeout)) { 231 if ((sr < 0) || (sr & AT25_SR_nRDY)) {
232 dev_err(&at25->spi->dev, 232 dev_err(&at25->spi->dev,
233 "write %d bytes offset %d, " 233 "write %d bytes offset %d, "
234 "timeout after %u msecs\n", 234 "timeout after %u msecs\n",
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index d79fa55c3b89..908844327db0 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -158,6 +158,13 @@ static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
158 return of_host->clock; 158 return of_host->clock;
159} 159}
160 160
161static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
162{
163 struct sdhci_of_host *of_host = sdhci_priv(host);
164
165 return of_host->clock / 256 / 16;
166}
167
161static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) 168static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host)
162{ 169{
163 struct sdhci_of_host *of_host = sdhci_priv(host); 170 struct sdhci_of_host *of_host = sdhci_priv(host);
@@ -184,6 +191,7 @@ static struct sdhci_of_data sdhci_esdhc = {
184 .set_clock = esdhc_set_clock, 191 .set_clock = esdhc_set_clock,
185 .enable_dma = esdhc_enable_dma, 192 .enable_dma = esdhc_enable_dma,
186 .get_max_clock = esdhc_get_max_clock, 193 .get_max_clock = esdhc_get_max_clock,
194 .get_min_clock = esdhc_get_min_clock,
187 .get_timeout_clock = esdhc_get_timeout_clock, 195 .get_timeout_clock = esdhc_get_timeout_clock,
188 }, 196 },
189}; 197};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6779b4ecab18..62041c7e9246 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1766,7 +1766,10 @@ int sdhci_add_host(struct sdhci_host *host)
1766 * Set host parameters. 1766 * Set host parameters.
1767 */ 1767 */
1768 mmc->ops = &sdhci_ops; 1768 mmc->ops = &sdhci_ops;
1769 mmc->f_min = host->max_clk / 256; 1769 if (host->ops->get_min_clock)
1770 mmc->f_min = host->ops->get_min_clock(host);
1771 else
1772 mmc->f_min = host->max_clk / 256;
1770 mmc->f_max = host->max_clk; 1773 mmc->f_max = host->max_clk;
1771 mmc->caps = MMC_CAP_SDIO_IRQ; 1774 mmc->caps = MMC_CAP_SDIO_IRQ;
1772 1775
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 831ddf7dcb49..c77e9ff30223 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -302,6 +302,7 @@ struct sdhci_ops {
302 302
303 int (*enable_dma)(struct sdhci_host *host); 303 int (*enable_dma)(struct sdhci_host *host);
304 unsigned int (*get_max_clock)(struct sdhci_host *host); 304 unsigned int (*get_max_clock)(struct sdhci_host *host);
305 unsigned int (*get_min_clock)(struct sdhci_host *host);
305 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 306 unsigned int (*get_timeout_clock)(struct sdhci_host *host);
306}; 307};
307 308
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 23e10b6263d6..f7a4701bf863 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1174,23 +1174,34 @@ static struct platform_driver cmos_platform_driver = {
1174 } 1174 }
1175}; 1175};
1176 1176
1177#ifdef CONFIG_PNP
1178static bool pnp_driver_registered;
1179#endif
1180static bool platform_driver_registered;
1181
1177static int __init cmos_init(void) 1182static int __init cmos_init(void)
1178{ 1183{
1179 int retval = 0; 1184 int retval = 0;
1180 1185
1181#ifdef CONFIG_PNP 1186#ifdef CONFIG_PNP
1182 pnp_register_driver(&cmos_pnp_driver); 1187 retval = pnp_register_driver(&cmos_pnp_driver);
1188 if (retval == 0)
1189 pnp_driver_registered = true;
1183#endif 1190#endif
1184 1191
1185 if (!cmos_rtc.dev) 1192 if (!cmos_rtc.dev) {
1186 retval = platform_driver_probe(&cmos_platform_driver, 1193 retval = platform_driver_probe(&cmos_platform_driver,
1187 cmos_platform_probe); 1194 cmos_platform_probe);
1195 if (retval == 0)
1196 platform_driver_registered = true;
1197 }
1188 1198
1189 if (retval == 0) 1199 if (retval == 0)
1190 return 0; 1200 return 0;
1191 1201
1192#ifdef CONFIG_PNP 1202#ifdef CONFIG_PNP
1193 pnp_unregister_driver(&cmos_pnp_driver); 1203 if (pnp_driver_registered)
1204 pnp_unregister_driver(&cmos_pnp_driver);
1194#endif 1205#endif
1195 return retval; 1206 return retval;
1196} 1207}
@@ -1199,9 +1210,11 @@ module_init(cmos_init);
1199static void __exit cmos_exit(void) 1210static void __exit cmos_exit(void)
1200{ 1211{
1201#ifdef CONFIG_PNP 1212#ifdef CONFIG_PNP
1202 pnp_unregister_driver(&cmos_pnp_driver); 1213 if (pnp_driver_registered)
1214 pnp_unregister_driver(&cmos_pnp_driver);
1203#endif 1215#endif
1204 platform_driver_unregister(&cmos_platform_driver); 1216 if (platform_driver_registered)
1217 platform_driver_unregister(&cmos_platform_driver);
1205} 1218}
1206module_exit(cmos_exit); 1219module_exit(cmos_exit);
1207 1220
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 338b15c0a548..607d43a31048 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1551,6 +1551,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1551 if (ret) 1551 if (ret)
1552 goto err_add_port; 1552 goto err_add_port;
1553 1553
1554#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
1554 if (atmel_is_console_port(&port->uart) 1555 if (atmel_is_console_port(&port->uart)
1555 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 1556 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
1556 /* 1557 /*
@@ -1559,6 +1560,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
1559 */ 1560 */
1560 clk_disable(port->clk); 1561 clk_disable(port->clk);
1561 } 1562 }
1563#endif
1562 1564
1563 device_init_wakeup(&pdev->dev, 1); 1565 device_init_wakeup(&pdev->dev, 1);
1564 platform_set_drvdata(pdev, port); 1566 platform_set_drvdata(pdev, port);
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index eee4b6e0af2c..9b80ad36dbba 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -59,6 +59,8 @@
59 59
60/* per-register bitmasks: */ 60/* per-register bitmasks: */
61 61
62#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE (2 << 3)
63#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP (1 << 2)
62#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0) 64#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
63#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1) 65#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
64 66
@@ -90,6 +92,7 @@
90 92
91#define OMAP2_MCSPI_CHCTRL_EN (1 << 0) 93#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
92 94
95#define OMAP2_MCSPI_WAKEUPENABLE_WKEN (1 << 0)
93 96
94/* We have 2 DMA channels per CS, one for RX and one for TX */ 97/* We have 2 DMA channels per CS, one for RX and one for TX */
95struct omap2_mcspi_dma { 98struct omap2_mcspi_dma {
@@ -269,7 +272,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
269 272
270 if (rx != NULL) { 273 if (rx != NULL) {
271 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, 274 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
272 data_type, element_count, 1, 275 data_type, element_count - 1, 1,
273 OMAP_DMA_SYNC_ELEMENT, 276 OMAP_DMA_SYNC_ELEMENT,
274 mcspi_dma->dma_rx_sync_dev, 1); 277 mcspi_dma->dma_rx_sync_dev, 1);
275 278
@@ -300,6 +303,25 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
300 if (rx != NULL) { 303 if (rx != NULL) {
301 wait_for_completion(&mcspi_dma->dma_rx_completion); 304 wait_for_completion(&mcspi_dma->dma_rx_completion);
302 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); 305 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
306 omap2_mcspi_set_enable(spi, 0);
307 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
308 & OMAP2_MCSPI_CHSTAT_RXS)) {
309 u32 w;
310
311 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
312 if (word_len <= 8)
313 ((u8 *)xfer->rx_buf)[element_count - 1] = w;
314 else if (word_len <= 16)
315 ((u16 *)xfer->rx_buf)[element_count - 1] = w;
316 else /* word_len <= 32 */
317 ((u32 *)xfer->rx_buf)[element_count - 1] = w;
318 } else {
319 dev_err(&spi->dev, "DMA RX last word empty");
320 count -= (word_len <= 8) ? 1 :
321 (word_len <= 16) ? 2 :
322 /* word_len <= 32 */ 4;
323 }
324 omap2_mcspi_set_enable(spi, 1);
303 } 325 }
304 return count; 326 return count;
305} 327}
@@ -873,8 +895,12 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
873 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); 895 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
874 896
875 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, 897 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
876 /* (3 << 8) | (2 << 3) | */ 898 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE |
877 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE); 899 OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP |
900 OMAP2_MCSPI_SYSCONFIG_SMARTIDLE);
901
902 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
903 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
878 904
879 omap2_mcspi_set_master_mode(master); 905 omap2_mcspi_set_master_mode(master);
880 906
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 348bf61a8fec..975ecddbce30 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -103,8 +103,6 @@ source "drivers/staging/pohmelfs/Kconfig"
103 103
104source "drivers/staging/stlc45xx/Kconfig" 104source "drivers/staging/stlc45xx/Kconfig"
105 105
106source "drivers/staging/uc2322/Kconfig"
107
108source "drivers/staging/b3dfg/Kconfig" 106source "drivers/staging/b3dfg/Kconfig"
109 107
110source "drivers/staging/phison/Kconfig" 108source "drivers/staging/phison/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 8d61d7b4debf..2241ae1b21ee 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_ANDROID) += android/
34obj-$(CONFIG_DST) += dst/ 34obj-$(CONFIG_DST) += dst/
35obj-$(CONFIG_POHMELFS) += pohmelfs/ 35obj-$(CONFIG_POHMELFS) += pohmelfs/
36obj-$(CONFIG_STLC45XX) += stlc45xx/ 36obj-$(CONFIG_STLC45XX) += stlc45xx/
37obj-$(CONFIG_USB_SERIAL_ATEN2011) += uc2322/
38obj-$(CONFIG_B3DFG) += b3dfg/ 37obj-$(CONFIG_B3DFG) += b3dfg/
39obj-$(CONFIG_IDE_PHISON) += phison/ 38obj-$(CONFIG_IDE_PHISON) += phison/
40obj-$(CONFIG_PLAN9AUTH) += p9auth/ 39obj-$(CONFIG_PLAN9AUTH) += p9auth/
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe72240f5a9e..f934393f3959 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -96,19 +96,21 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
96 96
97 read_lock(&tasklist_lock); 97 read_lock(&tasklist_lock);
98 for_each_process(p) { 98 for_each_process(p) {
99 struct mm_struct *mm;
99 int oom_adj; 100 int oom_adj;
100 101
101 task_lock(p); 102 task_lock(p);
102 if (!p->mm) { 103 mm = p->mm;
104 if (!mm) {
103 task_unlock(p); 105 task_unlock(p);
104 continue; 106 continue;
105 } 107 }
106 oom_adj = p->oomkilladj; 108 oom_adj = mm->oom_adj;
107 if (oom_adj < min_adj) { 109 if (oom_adj < min_adj) {
108 task_unlock(p); 110 task_unlock(p);
109 continue; 111 continue;
110 } 112 }
111 tasksize = get_mm_rss(p->mm); 113 tasksize = get_mm_rss(mm);
112 task_unlock(p); 114 task_unlock(p);
113 if (tasksize <= 0) 115 if (tasksize <= 0)
114 continue; 116 continue;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index a9bd4106beb7..0fdf8c6dc648 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -360,18 +360,18 @@ static void qt_read_bulk_callback(struct urb *urb)
360 if (port_paranoia_check(port, __func__) != 0) { 360 if (port_paranoia_check(port, __func__) != 0) {
361 dbg("%s - port_paranoia_check, exiting\n", __func__); 361 dbg("%s - port_paranoia_check, exiting\n", __func__);
362 qt_port->ReadBulkStopped = 1; 362 qt_port->ReadBulkStopped = 1;
363 return; 363 goto exit;
364 } 364 }
365 365
366 if (!serial) { 366 if (!serial) {
367 dbg("%s - bad serial pointer, exiting\n", __func__); 367 dbg("%s - bad serial pointer, exiting\n", __func__);
368 return; 368 goto exit;
369 } 369 }
370 if (qt_port->closePending == 1) { 370 if (qt_port->closePending == 1) {
371 /* Were closing , stop reading */ 371 /* Were closing , stop reading */
372 dbg("%s - (qt_port->closepending == 1\n", __func__); 372 dbg("%s - (qt_port->closepending == 1\n", __func__);
373 qt_port->ReadBulkStopped = 1; 373 qt_port->ReadBulkStopped = 1;
374 return; 374 goto exit;
375 } 375 }
376 376
377 /* 377 /*
@@ -381,7 +381,7 @@ static void qt_read_bulk_callback(struct urb *urb)
381 */ 381 */
382 if (qt_port->RxHolding == 1) { 382 if (qt_port->RxHolding == 1) {
383 qt_port->ReadBulkStopped = 1; 383 qt_port->ReadBulkStopped = 1;
384 return; 384 goto exit;
385 } 385 }
386 386
387 if (urb->status) { 387 if (urb->status) {
@@ -389,7 +389,7 @@ static void qt_read_bulk_callback(struct urb *urb)
389 389
390 dbg("%s - nonzero read bulk status received: %d\n", 390 dbg("%s - nonzero read bulk status received: %d\n",
391 __func__, urb->status); 391 __func__, urb->status);
392 return; 392 goto exit;
393 } 393 }
394 394
395 if (tty && RxCount) { 395 if (tty && RxCount) {
@@ -463,6 +463,8 @@ static void qt_read_bulk_callback(struct urb *urb)
463 } 463 }
464 464
465 schedule_work(&port->work); 465 schedule_work(&port->work);
466exit:
467 tty_kref_put(tty);
466} 468}
467 469
468/* 470/*
@@ -736,6 +738,11 @@ static int qt_startup(struct usb_serial *serial)
736 if (!qt_port) { 738 if (!qt_port) {
737 dbg("%s: kmalloc for quatech_port (%d) failed!.", 739 dbg("%s: kmalloc for quatech_port (%d) failed!.",
738 __func__, i); 740 __func__, i);
741 for(--i; i >= 0; i--) {
742 port = serial->port[i];
743 kfree(usb_get_serial_port_data(port));
744 usb_set_serial_port_data(port, NULL);
745 }
739 return -ENOMEM; 746 return -ENOMEM;
740 } 747 }
741 spin_lock_init(&qt_port->lock); 748 spin_lock_init(&qt_port->lock);
@@ -1041,7 +1048,7 @@ static void qt_block_until_empty(struct tty_struct *tty,
1041 } 1048 }
1042} 1049}
1043 1050
1044static void qt_close( struct usb_serial_port *port) 1051static void qt_close(struct usb_serial_port *port)
1045{ 1052{
1046 struct usb_serial *serial = port->serial; 1053 struct usb_serial *serial = port->serial;
1047 struct quatech_port *qt_port; 1054 struct quatech_port *qt_port;
@@ -1068,6 +1075,7 @@ static void qt_close( struct usb_serial_port *port)
1068 /* wait up to for transmitter to empty */ 1075 /* wait up to for transmitter to empty */
1069 if (serial->dev) 1076 if (serial->dev)
1070 qt_block_until_empty(tty, qt_port); 1077 qt_block_until_empty(tty, qt_port);
1078 tty_kref_put(tty);
1071 1079
1072 /* Close uart channel */ 1080 /* Close uart channel */
1073 status = qt_close_channel(serial, index); 1081 status = qt_close_channel(serial, index);
diff --git a/drivers/staging/uc2322/Kconfig b/drivers/staging/uc2322/Kconfig
deleted file mode 100644
index 2e0c6e79df2b..000000000000
--- a/drivers/staging/uc2322/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config USB_SERIAL_ATEN2011
2 tristate "ATEN 2011 USB to serial device support"
3 depends on USB_SERIAL
4 default N
5 ---help---
6 Say Y here if you want to use a ATEN 2011 dual port USB to serial
7 adapter.
8
9 To compile this driver as a module, choose M here: the module will be
10 called aten2011.
diff --git a/drivers/staging/uc2322/Makefile b/drivers/staging/uc2322/Makefile
deleted file mode 100644
index 49c18d6e579f..000000000000
--- a/drivers/staging/uc2322/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_USB_SERIAL_ATEN2011) += aten2011.o
diff --git a/drivers/staging/uc2322/TODO b/drivers/staging/uc2322/TODO
deleted file mode 100644
index c189a64c4185..000000000000
--- a/drivers/staging/uc2322/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
1TODO:
2 - checkpatch.pl cleanups
3 - remove dead and useless code (auditing the tty ioctls to
4 verify that they really are correct and needed.)
5
6Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
7Russell Lang <gsview@ghostgum.com.au>.
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c
deleted file mode 100644
index 39d0926d1a90..000000000000
--- a/drivers/staging/uc2322/aten2011.c
+++ /dev/null
@@ -1,2430 +0,0 @@
1/*
2 * Aten 2011 USB serial driver for 4 port devices
3 *
4 * Copyright (C) 2000 Inside Out Networks
5 * Copyright (C) 2001-2002, 2009 Greg Kroah-Hartman <greg@kroah.com>
6 * Copyright (C) 2009 Novell Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/tty.h>
20#include <linux/tty_driver.h>
21#include <linux/tty_flip.h>
22#include <linux/module.h>
23#include <linux/serial.h>
24#include <linux/uaccess.h>
25#include <linux/usb.h>
26#include <linux/usb/serial.h>
27
28
29#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
30#define ZLP_REG2 0x3B /* Zero_Flag_Reg2 59 */
31#define ZLP_REG3 0x3C /* Zero_Flag_Reg3 60 */
32#define ZLP_REG4 0x3D /* Zero_Flag_Reg4 61 */
33#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
34
35/* Interrupt Rotinue Defines */
36#define SERIAL_IIR_RLS 0x06
37#define SERIAL_IIR_RDA 0x04
38#define SERIAL_IIR_CTI 0x0c
39#define SERIAL_IIR_THR 0x02
40#define SERIAL_IIR_MS 0x00
41
42/* Emulation of the bit mask on the LINE STATUS REGISTER. */
43#define SERIAL_LSR_DR 0x0001
44#define SERIAL_LSR_OE 0x0002
45#define SERIAL_LSR_PE 0x0004
46#define SERIAL_LSR_FE 0x0008
47#define SERIAL_LSR_BI 0x0010
48#define SERIAL_LSR_THRE 0x0020
49#define SERIAL_LSR_TEMT 0x0040
50#define SERIAL_LSR_FIFOERR 0x0080
51
52/* MSR bit defines(place holders) */
53#define ATEN_MSR_DELTA_CTS 0x10
54#define ATEN_MSR_DELTA_DSR 0x20
55#define ATEN_MSR_DELTA_RI 0x40
56#define ATEN_MSR_DELTA_CD 0x80
57
58/* Serial Port register Address */
59#define RECEIVE_BUFFER_REGISTER ((__u16)(0x00))
60#define TRANSMIT_HOLDING_REGISTER ((__u16)(0x00))
61#define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01))
62#define INTERRUPT_IDENT_REGISTER ((__u16)(0x02))
63#define FIFO_CONTROL_REGISTER ((__u16)(0x02))
64#define LINE_CONTROL_REGISTER ((__u16)(0x03))
65#define MODEM_CONTROL_REGISTER ((__u16)(0x04))
66#define LINE_STATUS_REGISTER ((__u16)(0x05))
67#define MODEM_STATUS_REGISTER ((__u16)(0x06))
68#define SCRATCH_PAD_REGISTER ((__u16)(0x07))
69#define DIVISOR_LATCH_LSB ((__u16)(0x00))
70#define DIVISOR_LATCH_MSB ((__u16)(0x01))
71
72#define SP1_REGISTER ((__u16)(0x00))
73#define CONTROL1_REGISTER ((__u16)(0x01))
74#define CLK_MULTI_REGISTER ((__u16)(0x02))
75#define CLK_START_VALUE_REGISTER ((__u16)(0x03))
76#define DCR1_REGISTER ((__u16)(0x04))
77#define GPIO_REGISTER ((__u16)(0x07))
78
79#define SERIAL_LCR_DLAB ((__u16)(0x0080))
80
81/*
82 * URB POOL related defines
83 */
84#define NUM_URBS 16 /* URB Count */
85#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
86
87#define USB_VENDOR_ID_ATENINTL 0x0557
88#define ATENINTL_DEVICE_ID_2011 0x2011
89#define ATENINTL_DEVICE_ID_7820 0x7820
90
91static struct usb_device_id id_table[] = {
92 { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_2011) },
93 { USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_7820) },
94 { } /* terminating entry */
95};
96MODULE_DEVICE_TABLE(usb, id_table);
97
98/* This structure holds all of the local port information */
99struct ATENINTL_port {
100 int port_num; /*Actual port number in the device(1,2,etc)*/
101 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */
102 unsigned char *bulk_out_buffer; /* buffer used for the bulk out endpoint */
103 struct urb *write_urb; /* write URB for this port */
104 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */
105 unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
106 struct urb *read_urb; /* read URB for this port */
107 __u8 shadowLCR; /* last LCR value received */
108 __u8 shadowMCR; /* last MCR value received */
109 char open;
110 char chaseResponsePending;
111 wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
112 wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
113 struct async_icount icount;
114 struct usb_serial_port *port; /* loop back to the owner of this object */
115 /*Offsets*/
116 __u8 SpRegOffset;
117 __u8 ControlRegOffset;
118 __u8 DcrRegOffset;
119 /* for processing control URBS in interrupt context */
120 struct urb *control_urb;
121 char *ctrl_buf;
122 int MsrLsr;
123
124 struct urb *write_urb_pool[NUM_URBS];
125 /* we pass a pointer to this as the arguement sent to cypress_set_termios old_termios */
126 struct ktermios tmp_termios; /* stores the old termios settings */
127 spinlock_t lock; /* private lock */
128};
129
130/* This structure holds all of the individual serial device information */
131struct ATENINTL_serial {
132 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */
133 unsigned char *interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */
134 struct urb *interrupt_read_urb; /* our interrupt urb */
135 __u8 bulk_in_endpoint; /* the bulk in endpoint handle */
136 unsigned char *bulk_in_buffer; /* the buffer we use for the bulk in endpoint */
137 struct urb *read_urb; /* our bulk read urb */
138 __u8 bulk_out_endpoint; /* the bulk out endpoint handle */
139 struct usb_serial *serial; /* loop back to the owner of this object */
140 int ATEN2011_spectrum_2or4ports; /* this says the number of ports in the device */
141 /* Indicates about the no.of opened ports of an individual USB-serial adapater. */
142 unsigned int NoOfOpenPorts;
143 /* a flag for Status endpoint polling */
144 unsigned char status_polling_started;
145};
146
147static void ATEN2011_set_termios(struct tty_struct *tty,
148 struct usb_serial_port *port,
149 struct ktermios *old_termios);
150static void ATEN2011_change_port_settings(struct tty_struct *tty,
151 struct ATENINTL_port *ATEN2011_port,
152 struct ktermios *old_termios);
153
154/*************************************
155 * Bit definitions for each register *
156 *************************************/
157#define LCR_BITS_5 0x00 /* 5 bits/char */
158#define LCR_BITS_6 0x01 /* 6 bits/char */
159#define LCR_BITS_7 0x02 /* 7 bits/char */
160#define LCR_BITS_8 0x03 /* 8 bits/char */
161#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */
162
163#define LCR_STOP_1 0x00 /* 1 stop bit */
164#define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */
165#define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */
166#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */
167
168#define LCR_PAR_NONE 0x00 /* No parity */
169#define LCR_PAR_ODD 0x08 /* Odd parity */
170#define LCR_PAR_EVEN 0x18 /* Even parity */
171#define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */
172#define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */
173#define LCR_PAR_MASK 0x38 /* Mask for parity field */
174
175#define LCR_SET_BREAK 0x40 /* Set Break condition */
176#define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */
177
178#define MCR_DTR 0x01 /* Assert DTR */
179#define MCR_RTS 0x02 /* Assert RTS */
180#define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */
181#define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */
182#define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */
183#define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */
184
185#define ATEN2011_MSR_CTS 0x10 /* Current state of CTS */
186#define ATEN2011_MSR_DSR 0x20 /* Current state of DSR */
187#define ATEN2011_MSR_RI 0x40 /* Current state of RI */
188#define ATEN2011_MSR_CD 0x80 /* Current state of CD */
189
190
191static int debug;
192
193/*
194 * Version Information
195 */
196#define DRIVER_VERSION "2.0"
197#define DRIVER_DESC "ATENINTL 2011 USB Serial Adapter"
198
199/*
200 * Defines used for sending commands to port
201 */
202
203#define ATEN_WDR_TIMEOUT (50) /* default urb timeout */
204
205/* Requests */
206#define ATEN_RD_RTYPE 0xC0
207#define ATEN_WR_RTYPE 0x40
208#define ATEN_RDREQ 0x0D
209#define ATEN_WRREQ 0x0E
210#define ATEN_CTRL_TIMEOUT 500
211#define VENDOR_READ_LENGTH (0x01)
212
213/* set to 1 for RS485 mode and 0 for RS232 mode */
214/* FIXME make this somehow dynamic and not build time specific */
215static int RS485mode;
216
217static int set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val)
218{
219 struct usb_device *dev = port->serial->dev;
220 val = val & 0x00ff;
221
222 dbg("%s: is %x, value %x", __func__, reg, val);
223
224 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ,
225 ATEN_WR_RTYPE, val, reg, NULL, 0,
226 ATEN_WDR_TIMEOUT);
227}
228
229static int get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val)
230{
231 struct usb_device *dev = port->serial->dev;
232 int ret;
233
234 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ,
235 ATEN_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
236 ATEN_WDR_TIMEOUT);
237 dbg("%s: offset is %x, return val %x", __func__, reg, *val);
238 *val = (*val) & 0x00ff;
239 return ret;
240}
241
242static int set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val)
243{
244 struct usb_device *dev = port->serial->dev;
245 struct ATENINTL_serial *a_serial;
246 __u16 minor;
247
248 a_serial = usb_get_serial_data(port->serial);
249 minor = port->serial->minor;
250 if (minor == SERIAL_TTY_NO_MINOR)
251 minor = 0;
252 val = val & 0x00ff;
253
254 /*
255 * For the UART control registers,
256 * the application number need to be Or'ed
257 */
258 if (a_serial->ATEN2011_spectrum_2or4ports == 4)
259 val |= (((__u16)port->number - minor) + 1) << 8;
260 else {
261 if (((__u16) port->number - minor) == 0)
262 val |= (((__u16)port->number - minor) + 1) << 8;
263 else
264 val |= (((__u16)port->number - minor) + 2) << 8;
265 }
266 dbg("%s: application number is %x", __func__, val);
267
268 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ATEN_WRREQ,
269 ATEN_WR_RTYPE, val, reg, NULL, 0,
270 ATEN_WDR_TIMEOUT);
271}
272
273static int get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val)
274{
275 struct usb_device *dev = port->serial->dev;
276 int ret = 0;
277 __u16 wval;
278 struct ATENINTL_serial *a_serial;
279 __u16 minor = port->serial->minor;
280
281 a_serial = usb_get_serial_data(port->serial);
282 if (minor == SERIAL_TTY_NO_MINOR)
283 minor = 0;
284
285 /* wval is same as application number */
286 if (a_serial->ATEN2011_spectrum_2or4ports == 4)
287 wval = (((__u16)port->number - minor) + 1) << 8;
288 else {
289 if (((__u16) port->number - minor) == 0)
290 wval = (((__u16) port->number - minor) + 1) << 8;
291 else
292 wval = (((__u16) port->number - minor) + 2) << 8;
293 }
294 dbg("%s: application number is %x", __func__, wval);
295 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ATEN_RDREQ,
296 ATEN_RD_RTYPE, wval, reg, val, VENDOR_READ_LENGTH,
297 ATEN_WDR_TIMEOUT);
298 *val = (*val) & 0x00ff;
299 return ret;
300}
301
302static int handle_newMsr(struct ATENINTL_port *port, __u8 newMsr)
303{
304 struct ATENINTL_port *ATEN2011_port;
305 struct async_icount *icount;
306 ATEN2011_port = port;
307 icount = &ATEN2011_port->icount;
308 if (newMsr &
309 (ATEN_MSR_DELTA_CTS | ATEN_MSR_DELTA_DSR | ATEN_MSR_DELTA_RI |
310 ATEN_MSR_DELTA_CD)) {
311 icount = &ATEN2011_port->icount;
312
313 /* update input line counters */
314 if (newMsr & ATEN_MSR_DELTA_CTS)
315 icount->cts++;
316 if (newMsr & ATEN_MSR_DELTA_DSR)
317 icount->dsr++;
318 if (newMsr & ATEN_MSR_DELTA_CD)
319 icount->dcd++;
320 if (newMsr & ATEN_MSR_DELTA_RI)
321 icount->rng++;
322 }
323
324 return 0;
325}
326
327static int handle_newLsr(struct ATENINTL_port *port, __u8 newLsr)
328{
329 struct async_icount *icount;
330
331 dbg("%s - %02x", __func__, newLsr);
332
333 if (newLsr & SERIAL_LSR_BI) {
334 /*
335 * Parity and Framing errors only count if they occur exclusive
336 * of a break being received.
337 */
338 newLsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
339 }
340
341 /* update input line counters */
342 icount = &port->icount;
343 if (newLsr & SERIAL_LSR_BI)
344 icount->brk++;
345 if (newLsr & SERIAL_LSR_OE)
346 icount->overrun++;
347 if (newLsr & SERIAL_LSR_PE)
348 icount->parity++;
349 if (newLsr & SERIAL_LSR_FE)
350 icount->frame++;
351
352 return 0;
353}
354
355static void ATEN2011_control_callback(struct urb *urb)
356{
357 unsigned char *data;
358 struct ATENINTL_port *ATEN2011_port;
359 __u8 regval = 0x0;
360
361 switch (urb->status) {
362 case 0:
363 /* success */
364 break;
365 case -ECONNRESET:
366 case -ENOENT:
367 case -ESHUTDOWN:
368 /* this urb is terminated, clean up */
369 dbg("%s - urb shutting down with status: %d", __func__,
370 urb->status);
371 return;
372 default:
373 dbg("%s - nonzero urb status received: %d", __func__,
374 urb->status);
375 goto exit;
376 }
377
378 ATEN2011_port = (struct ATENINTL_port *)urb->context;
379
380 dbg("%s urb buffer size is %d", __func__, urb->actual_length);
381 dbg("%s ATEN2011_port->MsrLsr is %d port %d", __func__,
382 ATEN2011_port->MsrLsr, ATEN2011_port->port_num);
383 data = urb->transfer_buffer;
384 regval = (__u8) data[0];
385 dbg("%s data is %x", __func__, regval);
386 if (ATEN2011_port->MsrLsr == 0)
387 handle_newMsr(ATEN2011_port, regval);
388 else if (ATEN2011_port->MsrLsr == 1)
389 handle_newLsr(ATEN2011_port, regval);
390
391exit:
392 return;
393}
394
395static int ATEN2011_get_reg(struct ATENINTL_port *ATEN, __u16 Wval, __u16 reg,
396 __u16 *val)
397{
398 struct usb_device *dev = ATEN->port->serial->dev;
399 struct usb_ctrlrequest *dr = NULL;
400 unsigned char *buffer = NULL;
401 int ret = 0;
402 buffer = (__u8 *) ATEN->ctrl_buf;
403
404 dr = (void *)(buffer + 2);
405 dr->bRequestType = ATEN_RD_RTYPE;
406 dr->bRequest = ATEN_RDREQ;
407 dr->wValue = cpu_to_le16(Wval);
408 dr->wIndex = cpu_to_le16(reg);
409 dr->wLength = cpu_to_le16(2);
410
411 usb_fill_control_urb(ATEN->control_urb, dev, usb_rcvctrlpipe(dev, 0),
412 (unsigned char *)dr, buffer, 2,
413 ATEN2011_control_callback, ATEN);
414 ATEN->control_urb->transfer_buffer_length = 2;
415 ret = usb_submit_urb(ATEN->control_urb, GFP_ATOMIC);
416 return ret;
417}
418
419static void ATEN2011_interrupt_callback(struct urb *urb)
420{
421 int result;
422 int length;
423 struct ATENINTL_port *ATEN2011_port;
424 struct ATENINTL_serial *ATEN2011_serial;
425 struct usb_serial *serial;
426 __u16 Data;
427 unsigned char *data;
428 __u8 sp[5], st;
429 int i;
430 __u16 wval;
431 int minor;
432
433 dbg("%s", " : Entering");
434
435 ATEN2011_serial = (struct ATENINTL_serial *)urb->context;
436
437 switch (urb->status) {
438 case 0:
439 /* success */
440 break;
441 case -ECONNRESET:
442 case -ENOENT:
443 case -ESHUTDOWN:
444 /* this urb is terminated, clean up */
445 dbg("%s - urb shutting down with status: %d", __func__,
446 urb->status);
447 return;
448 default:
449 dbg("%s - nonzero urb status received: %d", __func__,
450 urb->status);
451 goto exit;
452 }
453 length = urb->actual_length;
454 data = urb->transfer_buffer;
455
456 serial = ATEN2011_serial->serial;
457
458 /* ATENINTL get 5 bytes
459 * Byte 1 IIR Port 1 (port.number is 0)
460 * Byte 2 IIR Port 2 (port.number is 1)
461 * Byte 3 IIR Port 3 (port.number is 2)
462 * Byte 4 IIR Port 4 (port.number is 3)
463 * Byte 5 FIFO status for both */
464
465 if (length && length > 5) {
466 dbg("%s", "Wrong data !!!");
467 return;
468 }
469
470 /* MATRIX */
471 if (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 4) {
472 sp[0] = (__u8) data[0];
473 sp[1] = (__u8) data[1];
474 sp[2] = (__u8) data[2];
475 sp[3] = (__u8) data[3];
476 st = (__u8) data[4];
477 } else {
478 sp[0] = (__u8) data[0];
479 sp[1] = (__u8) data[2];
480 /* sp[2]=(__u8)data[2]; */
481 /* sp[3]=(__u8)data[3]; */
482 st = (__u8) data[4];
483
484 }
485 for (i = 0; i < serial->num_ports; i++) {
486 ATEN2011_port = usb_get_serial_port_data(serial->port[i]);
487 minor = serial->minor;
488 if (minor == SERIAL_TTY_NO_MINOR)
489 minor = 0;
490 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
491 && (i != 0))
492 wval =
493 (((__u16) serial->port[i]->number -
494 (__u16) (minor)) + 2) << 8;
495 else
496 wval =
497 (((__u16) serial->port[i]->number -
498 (__u16) (minor)) + 1) << 8;
499 if (ATEN2011_port->open != 0) {
500 if (sp[i] & 0x01) {
501 dbg("SP%d No Interrupt !!!", i);
502 } else {
503 switch (sp[i] & 0x0f) {
504 case SERIAL_IIR_RLS:
505 dbg("Serial Port %d: Receiver status error or address bit detected in 9-bit mode", i);
506 ATEN2011_port->MsrLsr = 1;
507 ATEN2011_get_reg(ATEN2011_port, wval,
508 LINE_STATUS_REGISTER,
509 &Data);
510 break;
511 case SERIAL_IIR_MS:
512 dbg("Serial Port %d: Modem status change", i);
513 ATEN2011_port->MsrLsr = 0;
514 ATEN2011_get_reg(ATEN2011_port, wval,
515 MODEM_STATUS_REGISTER,
516 &Data);
517 break;
518 }
519 }
520 }
521
522 }
523exit:
524 if (ATEN2011_serial->status_polling_started == 0)
525 return;
526
527 result = usb_submit_urb(urb, GFP_ATOMIC);
528 if (result) {
529 dev_err(&urb->dev->dev,
530 "%s - Error %d submitting interrupt urb\n",
531 __func__, result);
532 }
533
534 return;
535}
536
537static void ATEN2011_bulk_in_callback(struct urb *urb)
538{
539 int status;
540 unsigned char *data;
541 struct usb_serial *serial;
542 struct usb_serial_port *port;
543 struct ATENINTL_serial *ATEN2011_serial;
544 struct ATENINTL_port *ATEN2011_port;
545 struct tty_struct *tty;
546
547 if (urb->status) {
548 dbg("nonzero read bulk status received: %d", urb->status);
549 return;
550 }
551
552 ATEN2011_port = (struct ATENINTL_port *)urb->context;
553
554 port = (struct usb_serial_port *)ATEN2011_port->port;
555 serial = port->serial;
556
557 dbg("%s", "Entering...");
558
559 data = urb->transfer_buffer;
560 ATEN2011_serial = usb_get_serial_data(serial);
561
562 if (urb->actual_length) {
563 tty = tty_port_tty_get(&ATEN2011_port->port->port);
564 if (tty) {
565 tty_buffer_request_room(tty, urb->actual_length);
566 tty_insert_flip_string(tty, data, urb->actual_length);
567 tty_flip_buffer_push(tty);
568 tty_kref_put(tty);
569 }
570
571 ATEN2011_port->icount.rx += urb->actual_length;
572 dbg("ATEN2011_port->icount.rx is %d:",
573 ATEN2011_port->icount.rx);
574 }
575
576 if (!ATEN2011_port->read_urb) {
577 dbg("%s", "URB KILLED !!!");
578 return;
579 }
580
581 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
582 ATEN2011_port->read_urb->dev = serial->dev;
583
584 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
585 if (status)
586 dbg("usb_submit_urb(read bulk) failed, status = %d", status);
587 }
588}
589
590static void ATEN2011_bulk_out_data_callback(struct urb *urb)
591{
592 struct ATENINTL_port *ATEN2011_port;
593 struct tty_struct *tty;
594
595 if (urb->status) {
596 dbg("nonzero write bulk status received:%d", urb->status);
597 return;
598 }
599
600 ATEN2011_port = (struct ATENINTL_port *)urb->context;
601
602 dbg("%s", "Entering .........");
603
604 tty = tty_port_tty_get(&ATEN2011_port->port->port);
605
606 if (tty && ATEN2011_port->open)
607 /* tell the tty driver that something has changed */
608 tty_wakeup(tty);
609
610 /* schedule_work(&ATEN2011_port->port->work); */
611 tty_kref_put(tty);
612
613}
614
615#ifdef ATENSerialProbe
616static int ATEN2011_serial_probe(struct usb_serial *serial,
617 const struct usb_device_id *id)
618{
619
620 /*need to implement the mode_reg reading and updating\
621 structures usb_serial_ device_type\
622 (i.e num_ports, num_bulkin,bulkout etc) */
623 /* Also we can update the changes attach */
624 return 1;
625}
626#endif
627
628static int ATEN2011_open(struct tty_struct *tty, struct usb_serial_port *port,
629 struct file *filp)
630{
631 int response;
632 int j;
633 struct usb_serial *serial;
634 struct urb *urb;
635 __u16 Data;
636 int status;
637 struct ATENINTL_serial *ATEN2011_serial;
638 struct ATENINTL_port *ATEN2011_port;
639 struct ktermios tmp_termios;
640 int minor;
641
642 serial = port->serial;
643
644 ATEN2011_port = usb_get_serial_port_data(port);
645
646 if (ATEN2011_port == NULL)
647 return -ENODEV;
648
649 ATEN2011_serial = usb_get_serial_data(serial);
650 if (ATEN2011_serial == NULL)
651 return -ENODEV;
652
653 /* increment the number of opened ports counter here */
654 ATEN2011_serial->NoOfOpenPorts++;
655
656 usb_clear_halt(serial->dev, port->write_urb->pipe);
657 usb_clear_halt(serial->dev, port->read_urb->pipe);
658
659 /* Initialising the write urb pool */
660 for (j = 0; j < NUM_URBS; ++j) {
661 urb = usb_alloc_urb(0, GFP_ATOMIC);
662 ATEN2011_port->write_urb_pool[j] = urb;
663
664 if (urb == NULL) {
665 err("No more urbs???");
666 continue;
667 }
668
669 urb->transfer_buffer = NULL;
670 urb->transfer_buffer =
671 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
672 if (!urb->transfer_buffer) {
673 err("%s-out of memory for urb buffers.", __func__);
674 continue;
675 }
676 }
677
678/*****************************************************************************
679 * Initialize ATEN2011 -- Write Init values to corresponding Registers
680 *
681 * Register Index
682 * 1 : IER
683 * 2 : FCR
684 * 3 : LCR
685 * 4 : MCR
686 *
687 * 0x08 : SP1/2 Control Reg
688 *****************************************************************************/
689
690/* NEED to check the fallowing Block */
691
692 Data = 0x0;
693 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
694 if (status < 0) {
695 dbg("Reading Spreg failed");
696 return -1;
697 }
698 Data |= 0x80;
699 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
700 if (status < 0) {
701 dbg("writing Spreg failed");
702 return -1;
703 }
704
705 Data &= ~0x80;
706 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
707 if (status < 0) {
708 dbg("writing Spreg failed");
709 return -1;
710 }
711
712/* End of block to be checked */
713/**************************CHECK***************************/
714
715 if (RS485mode == 0)
716 Data = 0xC0;
717 else
718 Data = 0x00;
719 status = set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
720 if (status < 0) {
721 dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status);
722 return -1;
723 } else
724 dbg("SCRATCH_PAD_REGISTER Writing success status%d", status);
725
726/**************************CHECK***************************/
727
728 Data = 0x0;
729 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
730 if (status < 0) {
731 dbg("Reading Controlreg failed");
732 return -1;
733 }
734 Data |= 0x08; /* Driver done bit */
735 Data |= 0x20; /* rx_disable */
736 status = 0;
737 status =
738 set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
739 if (status < 0) {
740 dbg("writing Controlreg failed");
741 return -1;
742 }
743 /*
744 * do register settings here
745 * Set all regs to the device default values.
746 * First Disable all interrupts.
747 */
748
749 Data = 0x00;
750 status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
751 if (status < 0) {
752 dbg("disableing interrupts failed");
753 return -1;
754 }
755 /* Set FIFO_CONTROL_REGISTER to the default value */
756 Data = 0x00;
757 status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
758 if (status < 0) {
759 dbg("Writing FIFO_CONTROL_REGISTER failed");
760 return -1;
761 }
762
763 Data = 0xcf; /* chk */
764 status = set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
765 if (status < 0) {
766 dbg("Writing FIFO_CONTROL_REGISTER failed");
767 return -1;
768 }
769
770 Data = 0x03; /* LCR_BITS_8 */
771 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
772 ATEN2011_port->shadowLCR = Data;
773
774 Data = 0x0b; /* MCR_DTR|MCR_RTS|MCR_MASTER_IE */
775 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
776 ATEN2011_port->shadowMCR = Data;
777
778#ifdef Check
779 Data = 0x00;
780 status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
781 ATEN2011_port->shadowLCR = Data;
782
783 Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
784 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
785
786 Data = 0x0c;
787 status = set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
788
789 Data = 0x0;
790 status = set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
791
792 Data = 0x00;
793 status = get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
794
795/* Data = ATEN2011_port->shadowLCR; */ /* data latch disable */
796 Data = Data & ~SERIAL_LCR_DLAB;
797 status = set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
798 ATEN2011_port->shadowLCR = Data;
799#endif
800 /* clearing Bulkin and Bulkout Fifo */
801 Data = 0x0;
802 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
803
804 Data = Data | 0x0c;
805 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
806
807 Data = Data & ~0x0c;
808 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
809 /* Finally enable all interrupts */
810 Data = 0x0;
811 Data = 0x0c;
812 status = set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
813
814 /* clearing rx_disable */
815 Data = 0x0;
816 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
817 Data = Data & ~0x20;
818 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
819
820 /* rx_negate */
821 Data = 0x0;
822 status = get_reg_sync(port, ATEN2011_port->ControlRegOffset, &Data);
823 Data = Data | 0x10;
824 status = 0;
825 status = set_reg_sync(port, ATEN2011_port->ControlRegOffset, Data);
826
827 /*
828 * Check to see if we've set up our endpoint info yet
829 * (can't set it up in ATEN2011_startup as the structures
830 * were not set up at that time.)
831 */
832 if (ATEN2011_serial->NoOfOpenPorts == 1) {
833 /* start the status polling here */
834 ATEN2011_serial->status_polling_started = 1;
835 /* If not yet set, Set here */
836 ATEN2011_serial->interrupt_in_buffer =
837 serial->port[0]->interrupt_in_buffer;
838 ATEN2011_serial->interrupt_in_endpoint =
839 serial->port[0]->interrupt_in_endpointAddress;
840 ATEN2011_serial->interrupt_read_urb =
841 serial->port[0]->interrupt_in_urb;
842
843 /* set up interrupt urb */
844 usb_fill_int_urb(ATEN2011_serial->interrupt_read_urb,
845 serial->dev,
846 usb_rcvintpipe(serial->dev,
847 ATEN2011_serial->
848 interrupt_in_endpoint),
849 ATEN2011_serial->interrupt_in_buffer,
850 ATEN2011_serial->interrupt_read_urb->
851 transfer_buffer_length,
852 ATEN2011_interrupt_callback, ATEN2011_serial,
853 ATEN2011_serial->interrupt_read_urb->interval);
854
855 /* start interrupt read for ATEN2011 *
856 * will continue as long as ATEN2011 is connected */
857
858 response =
859 usb_submit_urb(ATEN2011_serial->interrupt_read_urb,
860 GFP_KERNEL);
861 if (response) {
862 dbg("%s - Error %d submitting interrupt urb",
863 __func__, response);
864 }
865
866 }
867
868 /*
869 * See if we've set up our endpoint info yet
870 * (can't set it up in ATEN2011_startup as the
871 * structures were not set up at that time.)
872 */
873
874 dbg("port number is %d", port->number);
875 dbg("serial number is %d", port->serial->minor);
876 dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
877 dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
878 dbg("Interrupt endpoint is %d",
879 port->interrupt_in_endpointAddress);
880 dbg("port's number in the device is %d", ATEN2011_port->port_num);
881 ATEN2011_port->bulk_in_buffer = port->bulk_in_buffer;
882 ATEN2011_port->bulk_in_endpoint = port->bulk_in_endpointAddress;
883 ATEN2011_port->read_urb = port->read_urb;
884 ATEN2011_port->bulk_out_endpoint = port->bulk_out_endpointAddress;
885
886 minor = port->serial->minor;
887 if (minor == SERIAL_TTY_NO_MINOR)
888 minor = 0;
889
890 /* set up our bulk in urb */
891 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
892 && (((__u16) port->number - (__u16) (minor)) != 0)) {
893 usb_fill_bulk_urb(ATEN2011_port->read_urb, serial->dev,
894 usb_rcvbulkpipe(serial->dev,
895 (port->
896 bulk_in_endpointAddress +
897 2)), port->bulk_in_buffer,
898 ATEN2011_port->read_urb->
899 transfer_buffer_length,
900 ATEN2011_bulk_in_callback, ATEN2011_port);
901 } else
902 usb_fill_bulk_urb(ATEN2011_port->read_urb,
903 serial->dev,
904 usb_rcvbulkpipe(serial->dev,
905 port->
906 bulk_in_endpointAddress),
907 port->bulk_in_buffer,
908 ATEN2011_port->read_urb->
909 transfer_buffer_length,
910 ATEN2011_bulk_in_callback, ATEN2011_port);
911
912 dbg("ATEN2011_open: bulkin endpoint is %d",
913 port->bulk_in_endpointAddress);
914 response = usb_submit_urb(ATEN2011_port->read_urb, GFP_KERNEL);
915 if (response) {
916 err("%s - Error %d submitting control urb", __func__,
917 response);
918 }
919
920 /* initialize our wait queues */
921 init_waitqueue_head(&ATEN2011_port->wait_chase);
922 init_waitqueue_head(&ATEN2011_port->wait_command);
923
924 /* initialize our icount structure */
925 memset(&(ATEN2011_port->icount), 0x00, sizeof(ATEN2011_port->icount));
926
927 /* initialize our port settings */
928 ATEN2011_port->shadowMCR = MCR_MASTER_IE; /* Must set to enable ints! */
929 ATEN2011_port->chaseResponsePending = 0;
930 /* send a open port command */
931 ATEN2011_port->open = 1;
932 /* ATEN2011_change_port_settings(ATEN2011_port,old_termios); */
933 /* Setup termios */
934 ATEN2011_set_termios(tty, port, &tmp_termios);
935 ATEN2011_port->icount.tx = 0;
936 ATEN2011_port->icount.rx = 0;
937
938 dbg("usb_serial serial:%x ATEN2011_port:%x\nATEN2011_serial:%x usb_serial_port port:%x",
939 (unsigned int)serial, (unsigned int)ATEN2011_port,
940 (unsigned int)ATEN2011_serial, (unsigned int)port);
941
942 return 0;
943
944}
945
946static int ATEN2011_chars_in_buffer(struct tty_struct *tty)
947{
948 struct usb_serial_port *port = tty->driver_data;
949 int i;
950 int chars = 0;
951 struct ATENINTL_port *ATEN2011_port;
952
953 /* dbg("%s"," ATEN2011_chars_in_buffer:entering ..........."); */
954
955 ATEN2011_port = usb_get_serial_port_data(port);
956 if (ATEN2011_port == NULL) {
957 dbg("%s", "ATEN2011_break:leaving ...........");
958 return -1;
959 }
960
961 for (i = 0; i < NUM_URBS; ++i)
962 if (ATEN2011_port->write_urb_pool[i]->status == -EINPROGRESS)
963 chars += URB_TRANSFER_BUFFER_SIZE;
964
965 dbg("%s - returns %d", __func__, chars);
966 return chars;
967
968}
969
970static void ATEN2011_block_until_tx_empty(struct tty_struct *tty,
971 struct ATENINTL_port *ATEN2011_port)
972{
973 int timeout = HZ / 10;
974 int wait = 30;
975 int count;
976
977 while (1) {
978 count = ATEN2011_chars_in_buffer(tty);
979
980 /* Check for Buffer status */
981 if (count <= 0)
982 return;
983
984 /* Block the thread for a while */
985 interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase,
986 timeout);
987
988 /* No activity.. count down section */
989 wait--;
990 if (wait == 0) {
991 dbg("%s - TIMEOUT", __func__);
992 return;
993 } else {
994 /* Reset timout value back to seconds */
995 wait = 30;
996 }
997 }
998}
999
1000static void ATEN2011_close(struct tty_struct *tty, struct usb_serial_port *port,
1001 struct file *filp)
1002{
1003 struct usb_serial *serial;
1004 struct ATENINTL_serial *ATEN2011_serial;
1005 struct ATENINTL_port *ATEN2011_port;
1006 int no_urbs;
1007 __u16 Data;
1008
1009 dbg("%s", "ATEN2011_close:entering...");
1010 serial = port->serial;
1011
1012 /* take the Adpater and port's private data */
1013 ATEN2011_serial = usb_get_serial_data(serial);
1014 ATEN2011_port = usb_get_serial_port_data(port);
1015 if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL))
1016 return;
1017
1018 if (serial->dev) {
1019 /* flush and block(wait) until tx is empty */
1020 ATEN2011_block_until_tx_empty(tty, ATEN2011_port);
1021 }
1022 /* kill the ports URB's */
1023 for (no_urbs = 0; no_urbs < NUM_URBS; no_urbs++)
1024 usb_kill_urb(ATEN2011_port->write_urb_pool[no_urbs]);
1025 /* Freeing Write URBs */
1026 for (no_urbs = 0; no_urbs < NUM_URBS; ++no_urbs) {
1027 kfree(ATEN2011_port->write_urb_pool[no_urbs]->transfer_buffer);
1028 usb_free_urb(ATEN2011_port->write_urb_pool[no_urbs]);
1029 }
1030 /* While closing port, shutdown all bulk read, write *
1031 * and interrupt read if they exists */
1032 if (serial->dev) {
1033 if (ATEN2011_port->write_urb) {
1034 dbg("%s", "Shutdown bulk write");
1035 usb_kill_urb(ATEN2011_port->write_urb);
1036 }
1037 if (ATEN2011_port->read_urb) {
1038 dbg("%s", "Shutdown bulk read");
1039 usb_kill_urb(ATEN2011_port->read_urb);
1040 }
1041 if ((&ATEN2011_port->control_urb)) {
1042 dbg("%s", "Shutdown control read");
1043 /* usb_kill_urb (ATEN2011_port->control_urb); */
1044
1045 }
1046 }
1047 /* if(ATEN2011_port->ctrl_buf != NULL) */
1048 /* kfree(ATEN2011_port->ctrl_buf); */
1049 /* decrement the no.of open ports counter of an individual USB-serial adapter. */
1050 ATEN2011_serial->NoOfOpenPorts--;
1051 dbg("NoOfOpenPorts in close%d:in port%d",
1052 ATEN2011_serial->NoOfOpenPorts, port->number);
1053 if (ATEN2011_serial->NoOfOpenPorts == 0) {
1054 /* stop the stus polling here */
1055 ATEN2011_serial->status_polling_started = 0;
1056 if (ATEN2011_serial->interrupt_read_urb) {
1057 dbg("%s", "Shutdown interrupt_read_urb");
1058 /* ATEN2011_serial->interrupt_in_buffer=NULL; */
1059 /* usb_kill_urb (ATEN2011_serial->interrupt_read_urb); */
1060 }
1061 }
1062 if (ATEN2011_port->write_urb) {
1063 /* if this urb had a transfer buffer already (old tx) free it */
1064 kfree(ATEN2011_port->write_urb->transfer_buffer);
1065 usb_free_urb(ATEN2011_port->write_urb);
1066 }
1067
1068 /* clear the MCR & IER */
1069 Data = 0x00;
1070 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1071 Data = 0x00;
1072 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
1073
1074 ATEN2011_port->open = 0;
1075 dbg("%s", "Leaving ............");
1076
1077}
1078
1079static void ATEN2011_block_until_chase_response(struct tty_struct *tty,
1080 struct ATENINTL_port
1081 *ATEN2011_port)
1082{
1083 int timeout = 1 * HZ;
1084 int wait = 10;
1085 int count;
1086
1087 while (1) {
1088 count = ATEN2011_chars_in_buffer(tty);
1089
1090 /* Check for Buffer status */
1091 if (count <= 0) {
1092 ATEN2011_port->chaseResponsePending = 0;
1093 return;
1094 }
1095
1096 /* Block the thread for a while */
1097 interruptible_sleep_on_timeout(&ATEN2011_port->wait_chase,
1098 timeout);
1099 /* No activity.. count down section */
1100 wait--;
1101 if (wait == 0) {
1102 dbg("%s - TIMEOUT", __func__);
1103 return;
1104 } else {
1105 /* Reset timout value back to seconds */
1106 wait = 10;
1107 }
1108 }
1109
1110}
1111
1112static void ATEN2011_break(struct tty_struct *tty, int break_state)
1113{
1114 struct usb_serial_port *port = tty->driver_data;
1115 unsigned char data;
1116 struct usb_serial *serial;
1117 struct ATENINTL_serial *ATEN2011_serial;
1118 struct ATENINTL_port *ATEN2011_port;
1119
1120 dbg("%s", "Entering ...........");
1121 dbg("ATEN2011_break: Start");
1122
1123 serial = port->serial;
1124
1125 ATEN2011_serial = usb_get_serial_data(serial);
1126 ATEN2011_port = usb_get_serial_port_data(port);
1127
1128 if ((ATEN2011_serial == NULL) || (ATEN2011_port == NULL))
1129 return;
1130
1131 /* flush and chase */
1132 ATEN2011_port->chaseResponsePending = 1;
1133
1134 if (serial->dev) {
1135 /* flush and block until tx is empty */
1136 ATEN2011_block_until_chase_response(tty, ATEN2011_port);
1137 }
1138
1139 if (break_state == -1)
1140 data = ATEN2011_port->shadowLCR | LCR_SET_BREAK;
1141 else
1142 data = ATEN2011_port->shadowLCR & ~LCR_SET_BREAK;
1143
1144 ATEN2011_port->shadowLCR = data;
1145 dbg("ATEN2011_break ATEN2011_port->shadowLCR is %x",
1146 ATEN2011_port->shadowLCR);
1147 set_uart_reg(port, LINE_CONTROL_REGISTER, ATEN2011_port->shadowLCR);
1148
1149 return;
1150}
1151
1152static int ATEN2011_write_room(struct tty_struct *tty)
1153{
1154 struct usb_serial_port *port = tty->driver_data;
1155 int i;
1156 int room = 0;
1157 struct ATENINTL_port *ATEN2011_port;
1158
1159 ATEN2011_port = usb_get_serial_port_data(port);
1160 if (ATEN2011_port == NULL) {
1161 dbg("%s", "ATEN2011_break:leaving ...........");
1162 return -1;
1163 }
1164
1165 for (i = 0; i < NUM_URBS; ++i)
1166 if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS)
1167 room += URB_TRANSFER_BUFFER_SIZE;
1168
1169 dbg("%s - returns %d", __func__, room);
1170 return room;
1171
1172}
1173
1174static int ATEN2011_write(struct tty_struct *tty, struct usb_serial_port *port,
1175 const unsigned char *data, int count)
1176{
1177 int status;
1178 int i;
1179 int bytes_sent = 0;
1180 int transfer_size;
1181 int minor;
1182
1183 struct ATENINTL_port *ATEN2011_port;
1184 struct usb_serial *serial;
1185 struct ATENINTL_serial *ATEN2011_serial;
1186 struct urb *urb;
1187 const unsigned char *current_position = data;
1188 unsigned char *data1;
1189 dbg("%s", "entering ...........");
1190
1191 serial = port->serial;
1192
1193 ATEN2011_port = usb_get_serial_port_data(port);
1194 if (ATEN2011_port == NULL) {
1195 dbg("%s", "ATEN2011_port is NULL");
1196 return -1;
1197 }
1198
1199 ATEN2011_serial = usb_get_serial_data(serial);
1200 if (ATEN2011_serial == NULL) {
1201 dbg("%s", "ATEN2011_serial is NULL");
1202 return -1;
1203 }
1204
1205 /* try to find a free urb in the list */
1206 urb = NULL;
1207
1208 for (i = 0; i < NUM_URBS; ++i) {
1209 if (ATEN2011_port->write_urb_pool[i]->status != -EINPROGRESS) {
1210 urb = ATEN2011_port->write_urb_pool[i];
1211 dbg("URB:%d", i);
1212 break;
1213 }
1214 }
1215
1216 if (urb == NULL) {
1217 dbg("%s - no more free urbs", __func__);
1218 goto exit;
1219 }
1220
1221 if (urb->transfer_buffer == NULL) {
1222 urb->transfer_buffer =
1223 kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
1224
1225 if (urb->transfer_buffer == NULL) {
1226 err("%s no more kernel memory...", __func__);
1227 goto exit;
1228 }
1229 }
1230 transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
1231
1232 memcpy(urb->transfer_buffer, current_position, transfer_size);
1233 /* usb_serial_debug_data (__FILE__, __func__, transfer_size, urb->transfer_buffer); */
1234
1235 /* fill urb with data and submit */
1236 minor = port->serial->minor;
1237 if (minor == SERIAL_TTY_NO_MINOR)
1238 minor = 0;
1239 if ((ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)
1240 && (((__u16) port->number - (__u16) (minor)) != 0)) {
1241 usb_fill_bulk_urb(urb, ATEN2011_serial->serial->dev,
1242 usb_sndbulkpipe(ATEN2011_serial->serial->dev,
1243 (port->
1244 bulk_out_endpointAddress) +
1245 2), urb->transfer_buffer,
1246 transfer_size,
1247 ATEN2011_bulk_out_data_callback,
1248 ATEN2011_port);
1249 } else
1250
1251 usb_fill_bulk_urb(urb,
1252 ATEN2011_serial->serial->dev,
1253 usb_sndbulkpipe(ATEN2011_serial->serial->dev,
1254 port->
1255 bulk_out_endpointAddress),
1256 urb->transfer_buffer, transfer_size,
1257 ATEN2011_bulk_out_data_callback,
1258 ATEN2011_port);
1259
1260 data1 = urb->transfer_buffer;
1261 dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
1262 /* for(i=0;i < urb->actual_length;i++) */
1263 /* dbg("Data is %c ",data1[i]); */
1264
1265 /* send it down the pipe */
1266 status = usb_submit_urb(urb, GFP_ATOMIC);
1267
1268 if (status) {
1269 err("%s - usb_submit_urb(write bulk) failed with status = %d",
1270 __func__, status);
1271 bytes_sent = status;
1272 goto exit;
1273 }
1274 bytes_sent = transfer_size;
1275 ATEN2011_port->icount.tx += transfer_size;
1276 dbg("ATEN2011_port->icount.tx is %d:", ATEN2011_port->icount.tx);
1277
1278exit:
1279 return bytes_sent;
1280}
1281
1282static void ATEN2011_throttle(struct tty_struct *tty)
1283{
1284 struct usb_serial_port *port = tty->driver_data;
1285 struct ATENINTL_port *ATEN2011_port;
1286 int status;
1287
1288 dbg("- port %d", port->number);
1289
1290 ATEN2011_port = usb_get_serial_port_data(port);
1291
1292 if (ATEN2011_port == NULL)
1293 return;
1294
1295 if (!ATEN2011_port->open) {
1296 dbg("%s", "port not opened");
1297 return;
1298 }
1299
1300 dbg("%s", "Entering .......... ");
1301
1302 if (!tty) {
1303 dbg("%s - no tty available", __func__);
1304 return;
1305 }
1306
1307 /* if we are implementing XON/XOFF, send the stop character */
1308 if (I_IXOFF(tty)) {
1309 unsigned char stop_char = STOP_CHAR(tty);
1310 status = ATEN2011_write(tty, port, &stop_char, 1);
1311 if (status <= 0)
1312 return;
1313 }
1314
1315 /* if we are implementing RTS/CTS, toggle that line */
1316 if (tty->termios->c_cflag & CRTSCTS) {
1317 ATEN2011_port->shadowMCR &= ~MCR_RTS;
1318 status = set_uart_reg(port, MODEM_CONTROL_REGISTER,
1319 ATEN2011_port->shadowMCR);
1320 if (status < 0)
1321 return;
1322 }
1323
1324 return;
1325}
1326
1327static void ATEN2011_unthrottle(struct tty_struct *tty)
1328{
1329 struct usb_serial_port *port = tty->driver_data;
1330 int status;
1331 struct ATENINTL_port *ATEN2011_port = usb_get_serial_port_data(port);
1332
1333 if (ATEN2011_port == NULL)
1334 return;
1335
1336 if (!ATEN2011_port->open) {
1337 dbg("%s - port not opened", __func__);
1338 return;
1339 }
1340
1341 dbg("%s", "Entering .......... ");
1342
1343 if (!tty) {
1344 dbg("%s - no tty available", __func__);
1345 return;
1346 }
1347
1348 /* if we are implementing XON/XOFF, send the start character */
1349 if (I_IXOFF(tty)) {
1350 unsigned char start_char = START_CHAR(tty);
1351 status = ATEN2011_write(tty, port, &start_char, 1);
1352 if (status <= 0)
1353 return;
1354 }
1355
1356 /* if we are implementing RTS/CTS, toggle that line */
1357 if (tty->termios->c_cflag & CRTSCTS) {
1358 ATEN2011_port->shadowMCR |= MCR_RTS;
1359 status = set_uart_reg(port, MODEM_CONTROL_REGISTER,
1360 ATEN2011_port->shadowMCR);
1361 if (status < 0)
1362 return;
1363 }
1364
1365 return;
1366}
1367
1368static int ATEN2011_tiocmget(struct tty_struct *tty, struct file *file)
1369{
1370 struct usb_serial_port *port = tty->driver_data;
1371 struct ATENINTL_port *ATEN2011_port;
1372 unsigned int result;
1373 __u16 msr;
1374 __u16 mcr;
1375 /* unsigned int mcr; */
1376 int status = 0;
1377 ATEN2011_port = usb_get_serial_port_data(port);
1378
1379 dbg("%s - port %d", __func__, port->number);
1380
1381 if (ATEN2011_port == NULL)
1382 return -ENODEV;
1383
1384 status = get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
1385 status = get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
1386 /* mcr = ATEN2011_port->shadowMCR; */
1387 /* COMMENT2: the Fallowing three line are commented for updating only MSR values */
1388 result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
1389 | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
1390 | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
1391 | ((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0)
1392 | ((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0)
1393 | ((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0)
1394 | ((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0);
1395
1396 dbg("%s - 0x%04X", __func__, result);
1397
1398 return result;
1399}
1400
1401static int ATEN2011_tiocmset(struct tty_struct *tty, struct file *file,
1402 unsigned int set, unsigned int clear)
1403{
1404 struct usb_serial_port *port = tty->driver_data;
1405 struct ATENINTL_port *ATEN2011_port;
1406 unsigned int mcr;
1407 unsigned int status;
1408
1409 dbg("%s - port %d", __func__, port->number);
1410
1411 ATEN2011_port = usb_get_serial_port_data(port);
1412
1413 if (ATEN2011_port == NULL)
1414 return -ENODEV;
1415
1416 mcr = ATEN2011_port->shadowMCR;
1417 if (clear & TIOCM_RTS)
1418 mcr &= ~MCR_RTS;
1419 if (clear & TIOCM_DTR)
1420 mcr &= ~MCR_DTR;
1421 if (clear & TIOCM_LOOP)
1422 mcr &= ~MCR_LOOPBACK;
1423
1424 if (set & TIOCM_RTS)
1425 mcr |= MCR_RTS;
1426 if (set & TIOCM_DTR)
1427 mcr |= MCR_DTR;
1428 if (set & TIOCM_LOOP)
1429 mcr |= MCR_LOOPBACK;
1430
1431 ATEN2011_port->shadowMCR = mcr;
1432
1433 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
1434 if (status < 0) {
1435 dbg("setting MODEM_CONTROL_REGISTER Failed");
1436 return -1;
1437 }
1438
1439 return 0;
1440}
1441
1442static void ATEN2011_set_termios(struct tty_struct *tty,
1443 struct usb_serial_port *port,
1444 struct ktermios *old_termios)
1445{
1446 int status;
1447 unsigned int cflag;
1448 struct usb_serial *serial;
1449 struct ATENINTL_port *ATEN2011_port;
1450
1451 dbg("ATEN2011_set_termios: START");
1452
1453 serial = port->serial;
1454
1455 ATEN2011_port = usb_get_serial_port_data(port);
1456
1457 if (ATEN2011_port == NULL)
1458 return;
1459
1460 if (!ATEN2011_port->open) {
1461 dbg("%s - port not opened", __func__);
1462 return;
1463 }
1464
1465 dbg("%s", "setting termios - ");
1466
1467 cflag = tty->termios->c_cflag;
1468
1469 dbg("%s - cflag %08x iflag %08x", __func__,
1470 tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag));
1471
1472 if (old_termios) {
1473 dbg("%s - old clfag %08x old iflag %08x", __func__,
1474 old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
1475 }
1476
1477 dbg("%s - port %d", __func__, port->number);
1478
1479 /* change the port settings to the new ones specified */
1480
1481 ATEN2011_change_port_settings(tty, ATEN2011_port, old_termios);
1482
1483 if (!ATEN2011_port->read_urb) {
1484 dbg("%s", "URB KILLED !!!!!");
1485 return;
1486 }
1487
1488 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
1489 ATEN2011_port->read_urb->dev = serial->dev;
1490 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
1491 if (status) {
1492 dbg
1493 (" usb_submit_urb(read bulk) failed, status = %d",
1494 status);
1495 }
1496 }
1497 return;
1498}
1499
1500static int get_lsr_info(struct tty_struct *tty,
1501 struct ATENINTL_port *ATEN2011_port,
1502 unsigned int __user *value)
1503{
1504 int count;
1505 unsigned int result = 0;
1506
1507 count = ATEN2011_chars_in_buffer(tty);
1508 if (count == 0) {
1509 dbg("%s -- Empty", __func__);
1510 result = TIOCSER_TEMT;
1511 }
1512
1513 if (copy_to_user(value, &result, sizeof(int)))
1514 return -EFAULT;
1515 return 0;
1516}
1517
1518static int get_number_bytes_avail(struct tty_struct *tty,
1519 struct ATENINTL_port *ATEN2011_port,
1520 unsigned int __user *value)
1521{
1522 unsigned int result = 0;
1523
1524 if (!tty)
1525 return -ENOIOCTLCMD;
1526
1527 result = tty->read_cnt;
1528
1529 dbg("%s(%d) = %d", __func__, ATEN2011_port->port->number, result);
1530 if (copy_to_user(value, &result, sizeof(int)))
1531 return -EFAULT;
1532
1533 return -ENOIOCTLCMD;
1534}
1535
1536static int set_modem_info(struct ATENINTL_port *ATEN2011_port, unsigned int cmd,
1537 unsigned int __user *value)
1538{
1539 unsigned int mcr;
1540 unsigned int arg;
1541 __u16 Data;
1542 int status;
1543 struct usb_serial_port *port;
1544
1545 if (ATEN2011_port == NULL)
1546 return -1;
1547
1548 port = (struct usb_serial_port *)ATEN2011_port->port;
1549
1550 mcr = ATEN2011_port->shadowMCR;
1551
1552 if (copy_from_user(&arg, value, sizeof(int)))
1553 return -EFAULT;
1554
1555 switch (cmd) {
1556 case TIOCMBIS:
1557 if (arg & TIOCM_RTS)
1558 mcr |= MCR_RTS;
1559 if (arg & TIOCM_DTR)
1560 mcr |= MCR_RTS;
1561 if (arg & TIOCM_LOOP)
1562 mcr |= MCR_LOOPBACK;
1563 break;
1564
1565 case TIOCMBIC:
1566 if (arg & TIOCM_RTS)
1567 mcr &= ~MCR_RTS;
1568 if (arg & TIOCM_DTR)
1569 mcr &= ~MCR_RTS;
1570 if (arg & TIOCM_LOOP)
1571 mcr &= ~MCR_LOOPBACK;
1572 break;
1573
1574 case TIOCMSET:
1575 /* turn off the RTS and DTR and LOOPBACK
1576 * and then only turn on what was asked to */
1577 mcr &= ~(MCR_RTS | MCR_DTR | MCR_LOOPBACK);
1578 mcr |= ((arg & TIOCM_RTS) ? MCR_RTS : 0);
1579 mcr |= ((arg & TIOCM_DTR) ? MCR_DTR : 0);
1580 mcr |= ((arg & TIOCM_LOOP) ? MCR_LOOPBACK : 0);
1581 break;
1582 }
1583
1584 ATEN2011_port->shadowMCR = mcr;
1585
1586 Data = ATEN2011_port->shadowMCR;
1587 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1588 if (status < 0) {
1589 dbg("setting MODEM_CONTROL_REGISTER Failed");
1590 return -1;
1591 }
1592
1593 return 0;
1594}
1595
1596static int get_modem_info(struct ATENINTL_port *ATEN2011_port,
1597 unsigned int __user *value)
1598{
1599 unsigned int result = 0;
1600 __u16 msr;
1601 unsigned int mcr = ATEN2011_port->shadowMCR;
1602 int status;
1603
1604 status = get_uart_reg(ATEN2011_port->port, MODEM_STATUS_REGISTER, &msr);
1605 result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) /* 0x002 */
1606 |((mcr & MCR_RTS) ? TIOCM_RTS : 0) /* 0x004 */
1607 |((msr & ATEN2011_MSR_CTS) ? TIOCM_CTS : 0) /* 0x020 */
1608 |((msr & ATEN2011_MSR_CD) ? TIOCM_CAR : 0) /* 0x040 */
1609 |((msr & ATEN2011_MSR_RI) ? TIOCM_RI : 0) /* 0x080 */
1610 |((msr & ATEN2011_MSR_DSR) ? TIOCM_DSR : 0); /* 0x100 */
1611
1612 dbg("%s -- %x", __func__, result);
1613
1614 if (copy_to_user(value, &result, sizeof(int)))
1615 return -EFAULT;
1616 return 0;
1617}
1618
1619static int get_serial_info(struct ATENINTL_port *ATEN2011_port,
1620 struct serial_struct __user *retinfo)
1621{
1622 struct serial_struct tmp;
1623
1624 if (ATEN2011_port == NULL)
1625 return -1;
1626
1627 if (!retinfo)
1628 return -EFAULT;
1629
1630 memset(&tmp, 0, sizeof(tmp));
1631
1632 tmp.type = PORT_16550A;
1633 tmp.line = ATEN2011_port->port->serial->minor;
1634 if (tmp.line == SERIAL_TTY_NO_MINOR)
1635 tmp.line = 0;
1636 tmp.port = ATEN2011_port->port->number;
1637 tmp.irq = 0;
1638 tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
1639 tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
1640 tmp.baud_base = 9600;
1641 tmp.close_delay = 5 * HZ;
1642 tmp.closing_wait = 30 * HZ;
1643
1644 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
1645 return -EFAULT;
1646 return 0;
1647}
1648
1649static int ATEN2011_ioctl(struct tty_struct *tty, struct file *file,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct usb_serial_port *port = tty->driver_data;
1653 struct ATENINTL_port *ATEN2011_port;
1654 struct async_icount cnow;
1655 struct async_icount cprev;
1656 struct serial_icounter_struct icount;
1657 int ATENret = 0;
1658 unsigned int __user *user_arg = (unsigned int __user *)arg;
1659
1660 ATEN2011_port = usb_get_serial_port_data(port);
1661
1662 if (ATEN2011_port == NULL)
1663 return -1;
1664
1665 dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
1666
1667 switch (cmd) {
1668 /* return number of bytes available */
1669
1670 case TIOCINQ:
1671 dbg("%s (%d) TIOCINQ", __func__, port->number);
1672 return get_number_bytes_avail(tty, ATEN2011_port, user_arg);
1673 break;
1674
1675 case TIOCOUTQ:
1676 dbg("%s (%d) TIOCOUTQ", __func__, port->number);
1677 return put_user(ATEN2011_chars_in_buffer(tty), user_arg);
1678 break;
1679
1680 case TIOCSERGETLSR:
1681 dbg("%s (%d) TIOCSERGETLSR", __func__, port->number);
1682 return get_lsr_info(tty, ATEN2011_port, user_arg);
1683 return 0;
1684
1685 case TIOCMBIS:
1686 case TIOCMBIC:
1687 case TIOCMSET:
1688 dbg("%s (%d) TIOCMSET/TIOCMBIC/TIOCMSET", __func__,
1689 port->number);
1690 ATENret = set_modem_info(ATEN2011_port, cmd, user_arg);
1691 return ATENret;
1692
1693 case TIOCMGET:
1694 dbg("%s (%d) TIOCMGET", __func__, port->number);
1695 return get_modem_info(ATEN2011_port, user_arg);
1696
1697 case TIOCGSERIAL:
1698 dbg("%s (%d) TIOCGSERIAL", __func__, port->number);
1699 return get_serial_info(ATEN2011_port,
1700 (struct serial_struct __user *)arg);
1701
1702 case TIOCSSERIAL:
1703 dbg("%s (%d) TIOCSSERIAL", __func__, port->number);
1704 break;
1705
1706 case TIOCMIWAIT:
1707 dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
1708 cprev = ATEN2011_port->icount;
1709 while (1) {
1710 /* see if a signal did it */
1711 if (signal_pending(current))
1712 return -ERESTARTSYS;
1713 cnow = ATEN2011_port->icount;
1714 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
1715 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
1716 return -EIO; /* no change => error */
1717 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
1718 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
1719 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
1720 ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
1721 return 0;
1722 }
1723 cprev = cnow;
1724 }
1725 /* NOTREACHED */
1726 break;
1727
1728 case TIOCGICOUNT:
1729 cnow = ATEN2011_port->icount;
1730 icount.cts = cnow.cts;
1731 icount.dsr = cnow.dsr;
1732 icount.rng = cnow.rng;
1733 icount.dcd = cnow.dcd;
1734 icount.rx = cnow.rx;
1735 icount.tx = cnow.tx;
1736 icount.frame = cnow.frame;
1737 icount.overrun = cnow.overrun;
1738 icount.parity = cnow.parity;
1739 icount.brk = cnow.brk;
1740 icount.buf_overrun = cnow.buf_overrun;
1741
1742 dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
1743 port->number, icount.rx, icount.tx);
1744 if (copy_to_user((void __user *)arg, &icount, sizeof(icount)))
1745 return -EFAULT;
1746 return 0;
1747
1748 default:
1749 break;
1750 }
1751
1752 return -ENOIOCTLCMD;
1753}
1754
1755static int ATEN2011_calc_baud_rate_divisor(int baudRate, int *divisor,
1756 __u16 *clk_sel_val)
1757{
1758 dbg("%s - %d", __func__, baudRate);
1759
1760 if (baudRate <= 115200) {
1761 *divisor = 115200 / baudRate;
1762 *clk_sel_val = 0x0;
1763 }
1764 if ((baudRate > 115200) && (baudRate <= 230400)) {
1765 *divisor = 230400 / baudRate;
1766 *clk_sel_val = 0x10;
1767 } else if ((baudRate > 230400) && (baudRate <= 403200)) {
1768 *divisor = 403200 / baudRate;
1769 *clk_sel_val = 0x20;
1770 } else if ((baudRate > 403200) && (baudRate <= 460800)) {
1771 *divisor = 460800 / baudRate;
1772 *clk_sel_val = 0x30;
1773 } else if ((baudRate > 460800) && (baudRate <= 806400)) {
1774 *divisor = 806400 / baudRate;
1775 *clk_sel_val = 0x40;
1776 } else if ((baudRate > 806400) && (baudRate <= 921600)) {
1777 *divisor = 921600 / baudRate;
1778 *clk_sel_val = 0x50;
1779 } else if ((baudRate > 921600) && (baudRate <= 1572864)) {
1780 *divisor = 1572864 / baudRate;
1781 *clk_sel_val = 0x60;
1782 } else if ((baudRate > 1572864) && (baudRate <= 3145728)) {
1783 *divisor = 3145728 / baudRate;
1784 *clk_sel_val = 0x70;
1785 }
1786 return 0;
1787}
1788
1789static int ATEN2011_send_cmd_write_baud_rate(struct ATENINTL_port
1790 *ATEN2011_port, int baudRate)
1791{
1792 int divisor = 0;
1793 int status;
1794 __u16 Data;
1795 unsigned char number;
1796 __u16 clk_sel_val;
1797 struct usb_serial_port *port;
1798 int minor;
1799
1800 if (ATEN2011_port == NULL)
1801 return -1;
1802
1803 port = (struct usb_serial_port *)ATEN2011_port->port;
1804
1805 dbg("%s", "Entering .......... ");
1806
1807 minor = ATEN2011_port->port->serial->minor;
1808 if (minor == SERIAL_TTY_NO_MINOR)
1809 minor = 0;
1810 number = ATEN2011_port->port->number - minor;
1811
1812 dbg("%s - port = %d, baud = %d", __func__,
1813 ATEN2011_port->port->number, baudRate);
1814 /* reset clk_uart_sel in spregOffset */
1815 if (baudRate > 115200) {
1816#ifdef HW_flow_control
1817 /*
1818 * NOTE: need to see the pther register to modify
1819 * setting h/w flow control bit to 1;
1820 */
1821 /* Data = ATEN2011_port->shadowMCR; */
1822 Data = 0x2b;
1823 ATEN2011_port->shadowMCR = Data;
1824 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1825 if (status < 0) {
1826 dbg("Writing spreg failed in set_serial_baud");
1827 return -1;
1828 }
1829#endif
1830
1831 } else {
1832#ifdef HW_flow_control
1833 /* setting h/w flow control bit to 0; */
1834 /* Data = ATEN2011_port->shadowMCR; */
1835 Data = 0xb;
1836 ATEN2011_port->shadowMCR = Data;
1837 status = set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1838 if (status < 0) {
1839 dbg("Writing spreg failed in set_serial_baud");
1840 return -1;
1841 }
1842#endif
1843
1844 }
1845
1846 if (1) /* baudRate <= 115200) */ {
1847 clk_sel_val = 0x0;
1848 Data = 0x0;
1849 status =
1850 ATEN2011_calc_baud_rate_divisor(baudRate, &divisor,
1851 &clk_sel_val);
1852 status = get_reg_sync(port, ATEN2011_port->SpRegOffset, &Data);
1853 if (status < 0) {
1854 dbg("reading spreg failed in set_serial_baud");
1855 return -1;
1856 }
1857 Data = (Data & 0x8f) | clk_sel_val;
1858 status = set_reg_sync(port, ATEN2011_port->SpRegOffset, Data);
1859 if (status < 0) {
1860 dbg("Writing spreg failed in set_serial_baud");
1861 return -1;
1862 }
1863 /* Calculate the Divisor */
1864
1865 if (status) {
1866 err("%s - bad baud rate", __func__);
1867 dbg("%s", "bad baud rate");
1868 return status;
1869 }
1870 /* Enable access to divisor latch */
1871 Data = ATEN2011_port->shadowLCR | SERIAL_LCR_DLAB;
1872 ATEN2011_port->shadowLCR = Data;
1873 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
1874
1875 /* Write the divisor */
1876 Data = (unsigned char)(divisor & 0xff);
1877 dbg("set_serial_baud Value to write DLL is %x", Data);
1878 set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
1879
1880 Data = (unsigned char)((divisor & 0xff00) >> 8);
1881 dbg("set_serial_baud Value to write DLM is %x", Data);
1882 set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
1883
1884 /* Disable access to divisor latch */
1885 Data = ATEN2011_port->shadowLCR & ~SERIAL_LCR_DLAB;
1886 ATEN2011_port->shadowLCR = Data;
1887 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
1888
1889 }
1890
1891 return status;
1892}
1893
1894static void ATEN2011_change_port_settings(struct tty_struct *tty,
1895 struct ATENINTL_port *ATEN2011_port,
1896 struct ktermios *old_termios)
1897{
1898 int baud;
1899 unsigned cflag;
1900 unsigned iflag;
1901 __u8 lData;
1902 __u8 lParity;
1903 __u8 lStop;
1904 int status;
1905 __u16 Data;
1906 struct usb_serial_port *port;
1907 struct usb_serial *serial;
1908
1909 if (ATEN2011_port == NULL)
1910 return;
1911
1912 port = (struct usb_serial_port *)ATEN2011_port->port;
1913
1914 serial = port->serial;
1915
1916 dbg("%s - port %d", __func__, ATEN2011_port->port->number);
1917
1918 if (!ATEN2011_port->open) {
1919 dbg("%s - port not opened", __func__);
1920 return;
1921 }
1922
1923 if ((!tty) || (!tty->termios)) {
1924 dbg("%s - no tty structures", __func__);
1925 return;
1926 }
1927
1928 dbg("%s", "Entering .......... ");
1929
1930 lData = LCR_BITS_8;
1931 lStop = LCR_STOP_1;
1932 lParity = LCR_PAR_NONE;
1933
1934 cflag = tty->termios->c_cflag;
1935 iflag = tty->termios->c_iflag;
1936
1937 /* Change the number of bits */
1938
1939 /* COMMENT1: the below Line"if(cflag & CSIZE)" is added for the errors we get for serial loop data test i.e serial_loopback.pl -v */
1940 /* if(cflag & CSIZE) */
1941 {
1942 switch (cflag & CSIZE) {
1943 case CS5:
1944 lData = LCR_BITS_5;
1945 break;
1946
1947 case CS6:
1948 lData = LCR_BITS_6;
1949 break;
1950
1951 case CS7:
1952 lData = LCR_BITS_7;
1953 break;
1954 default:
1955 case CS8:
1956 lData = LCR_BITS_8;
1957 break;
1958 }
1959 }
1960 /* Change the Parity bit */
1961 if (cflag & PARENB) {
1962 if (cflag & PARODD) {
1963 lParity = LCR_PAR_ODD;
1964 dbg("%s - parity = odd", __func__);
1965 } else {
1966 lParity = LCR_PAR_EVEN;
1967 dbg("%s - parity = even", __func__);
1968 }
1969
1970 } else {
1971 dbg("%s - parity = none", __func__);
1972 }
1973
1974 if (cflag & CMSPAR)
1975 lParity = lParity | 0x20;
1976
1977 /* Change the Stop bit */
1978 if (cflag & CSTOPB) {
1979 lStop = LCR_STOP_2;
1980 dbg("%s - stop bits = 2", __func__);
1981 } else {
1982 lStop = LCR_STOP_1;
1983 dbg("%s - stop bits = 1", __func__);
1984 }
1985
1986 /* Update the LCR with the correct value */
1987 ATEN2011_port->shadowLCR &=
1988 ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
1989 ATEN2011_port->shadowLCR |= (lData | lParity | lStop);
1990
1991 dbg
1992 ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is %x",
1993 ATEN2011_port->shadowLCR);
1994 /* Disable Interrupts */
1995 Data = 0x00;
1996 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
1997
1998 Data = 0x00;
1999 set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
2000
2001 Data = 0xcf;
2002 set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
2003
2004 /* Send the updated LCR value to the ATEN2011 */
2005 Data = ATEN2011_port->shadowLCR;
2006
2007 set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
2008
2009 Data = 0x00b;
2010 ATEN2011_port->shadowMCR = Data;
2011 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2012 Data = 0x00b;
2013 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2014
2015 /* set up the MCR register and send it to the ATEN2011 */
2016
2017 ATEN2011_port->shadowMCR = MCR_MASTER_IE;
2018 if (cflag & CBAUD)
2019 ATEN2011_port->shadowMCR |= (MCR_DTR | MCR_RTS);
2020
2021 if (cflag & CRTSCTS)
2022 ATEN2011_port->shadowMCR |= (MCR_XON_ANY);
2023 else
2024 ATEN2011_port->shadowMCR &= ~(MCR_XON_ANY);
2025
2026 Data = ATEN2011_port->shadowMCR;
2027 set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2028
2029 /* Determine divisor based on baud rate */
2030 baud = tty_get_baud_rate(tty);
2031
2032 if (!baud) {
2033 /* pick a default, any default... */
2034 dbg("%s", "Picked default baud...");
2035 baud = 9600;
2036 }
2037
2038 dbg("%s - baud rate = %d", __func__, baud);
2039 status = ATEN2011_send_cmd_write_baud_rate(ATEN2011_port, baud);
2040
2041 /* Enable Interrupts */
2042 Data = 0x0c;
2043 set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
2044
2045 if (ATEN2011_port->read_urb->status != -EINPROGRESS) {
2046 ATEN2011_port->read_urb->dev = serial->dev;
2047
2048 status = usb_submit_urb(ATEN2011_port->read_urb, GFP_ATOMIC);
2049
2050 if (status) {
2051 dbg
2052 (" usb_submit_urb(read bulk) failed, status = %d",
2053 status);
2054 }
2055 }
2056 dbg
2057 ("ATEN2011_change_port_settings ATEN2011_port->shadowLCR is End %x",
2058 ATEN2011_port->shadowLCR);
2059
2060 return;
2061}
2062
2063static int ATEN2011_calc_num_ports(struct usb_serial *serial)
2064{
2065
2066 __u16 Data = 0x00;
2067 int ret = 0;
2068 int ATEN2011_2or4ports;
2069 ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2070 ATEN_RDREQ, ATEN_RD_RTYPE, 0, GPIO_REGISTER,
2071 &Data, VENDOR_READ_LENGTH, ATEN_WDR_TIMEOUT);
2072
2073/* ghostgum: here is where the problem appears to bet */
2074/* Which of the following are needed? */
2075/* Greg used the serial->type->num_ports=2 */
2076/* But the code in the ATEN2011_open relies on serial->num_ports=2 */
2077 if ((Data & 0x01) == 0) {
2078 ATEN2011_2or4ports = 2;
2079 serial->type->num_ports = 2;
2080 serial->num_ports = 2;
2081 }
2082 /* else if(serial->interface->cur_altsetting->desc.bNumEndpoints == 9) */
2083 else {
2084 ATEN2011_2or4ports = 4;
2085 serial->type->num_ports = 4;
2086 serial->num_ports = 4;
2087
2088 }
2089
2090 return ATEN2011_2or4ports;
2091}
2092
2093static int ATEN2011_startup(struct usb_serial *serial)
2094{
2095 struct ATENINTL_serial *ATEN2011_serial;
2096 struct ATENINTL_port *ATEN2011_port;
2097 struct usb_device *dev;
2098 int i, status;
2099 int minor;
2100
2101 __u16 Data;
2102 dbg("%s", " ATEN2011_startup :entering..........");
2103
2104 if (!serial) {
2105 dbg("%s", "Invalid Handler");
2106 return -1;
2107 }
2108
2109 dev = serial->dev;
2110
2111 dbg("%s", "Entering...");
2112
2113 /* create our private serial structure */
2114 ATEN2011_serial = kzalloc(sizeof(struct ATENINTL_serial), GFP_KERNEL);
2115 if (ATEN2011_serial == NULL) {
2116 err("%s - Out of memory", __func__);
2117 return -ENOMEM;
2118 }
2119
2120 /* resetting the private structure field values to zero */
2121 memset(ATEN2011_serial, 0, sizeof(struct ATENINTL_serial));
2122
2123 ATEN2011_serial->serial = serial;
2124 /* initilize status polling flag to 0 */
2125 ATEN2011_serial->status_polling_started = 0;
2126
2127 usb_set_serial_data(serial, ATEN2011_serial);
2128 ATEN2011_serial->ATEN2011_spectrum_2or4ports =
2129 ATEN2011_calc_num_ports(serial);
2130 /* we set up the pointers to the endpoints in the ATEN2011_open *
2131 * function, as the structures aren't created yet. */
2132
2133 /* set up port private structures */
2134 for (i = 0; i < serial->num_ports; ++i) {
2135 ATEN2011_port =
2136 kmalloc(sizeof(struct ATENINTL_port), GFP_KERNEL);
2137 if (ATEN2011_port == NULL) {
2138 err("%s - Out of memory", __func__);
2139 usb_set_serial_data(serial, NULL);
2140 kfree(ATEN2011_serial);
2141 return -ENOMEM;
2142 }
2143 memset(ATEN2011_port, 0, sizeof(struct ATENINTL_port));
2144
2145 /*
2146 * Initialize all port interrupt end point to port 0
2147 * int endpoint. Our device has only one interrupt end point
2148 * comman to all port
2149 */
2150 /* serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; */
2151
2152 ATEN2011_port->port = serial->port[i];
2153 usb_set_serial_port_data(serial->port[i], ATEN2011_port);
2154
2155 minor = serial->port[i]->serial->minor;
2156 if (minor == SERIAL_TTY_NO_MINOR)
2157 minor = 0;
2158 ATEN2011_port->port_num =
2159 ((serial->port[i]->number - minor) + 1);
2160
2161 if (ATEN2011_port->port_num == 1) {
2162 ATEN2011_port->SpRegOffset = 0x0;
2163 ATEN2011_port->ControlRegOffset = 0x1;
2164 ATEN2011_port->DcrRegOffset = 0x4;
2165 } else if ((ATEN2011_port->port_num == 2)
2166 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2167 4)) {
2168 ATEN2011_port->SpRegOffset = 0x8;
2169 ATEN2011_port->ControlRegOffset = 0x9;
2170 ATEN2011_port->DcrRegOffset = 0x16;
2171 } else if ((ATEN2011_port->port_num == 2)
2172 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2173 2)) {
2174 ATEN2011_port->SpRegOffset = 0xa;
2175 ATEN2011_port->ControlRegOffset = 0xb;
2176 ATEN2011_port->DcrRegOffset = 0x19;
2177 } else if ((ATEN2011_port->port_num == 3)
2178 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2179 4)) {
2180 ATEN2011_port->SpRegOffset = 0xa;
2181 ATEN2011_port->ControlRegOffset = 0xb;
2182 ATEN2011_port->DcrRegOffset = 0x19;
2183 } else if ((ATEN2011_port->port_num == 4)
2184 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports ==
2185 4)) {
2186 ATEN2011_port->SpRegOffset = 0xc;
2187 ATEN2011_port->ControlRegOffset = 0xd;
2188 ATEN2011_port->DcrRegOffset = 0x1c;
2189 }
2190
2191 usb_set_serial_port_data(serial->port[i], ATEN2011_port);
2192
2193 /* enable rx_disable bit in control register */
2194
2195 status = get_reg_sync(serial->port[i],
2196 ATEN2011_port->ControlRegOffset, &Data);
2197 if (status < 0) {
2198 dbg("Reading ControlReg failed status-0x%x",
2199 status);
2200 break;
2201 } else
2202 dbg
2203 ("ControlReg Reading success val is %x, status%d",
2204 Data, status);
2205 Data |= 0x08; /* setting driver done bit */
2206 Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */
2207
2208 /* Data |= 0x20; */ /* rx_disable bit */
2209 status = set_reg_sync(serial->port[i],
2210 ATEN2011_port->ControlRegOffset, Data);
2211 if (status < 0) {
2212 dbg
2213 ("Writing ControlReg failed(rx_disable) status-0x%x",
2214 status);
2215 break;
2216 } else
2217 dbg
2218 ("ControlReg Writing success(rx_disable) status%d",
2219 status);
2220
2221 /*
2222 * Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
2223 * and 0x24 in DCR3
2224 */
2225 Data = 0x01;
2226 status = set_reg_sync(serial->port[i],
2227 (__u16)(ATEN2011_port->DcrRegOffset + 0),
2228 Data);
2229 if (status < 0) {
2230 dbg("Writing DCR0 failed status-0x%x", status);
2231 break;
2232 } else
2233 dbg("DCR0 Writing success status%d", status);
2234
2235 Data = 0x05;
2236 status = set_reg_sync(serial->port[i],
2237 (__u16)(ATEN2011_port->DcrRegOffset + 1),
2238 Data);
2239 if (status < 0) {
2240 dbg("Writing DCR1 failed status-0x%x", status);
2241 break;
2242 } else
2243 dbg("DCR1 Writing success status%d", status);
2244
2245 Data = 0x24;
2246 status = set_reg_sync(serial->port[i],
2247 (__u16)(ATEN2011_port->DcrRegOffset + 2),
2248 Data);
2249 if (status < 0) {
2250 dbg("Writing DCR2 failed status-0x%x", status);
2251 break;
2252 } else
2253 dbg("DCR2 Writing success status%d", status);
2254
2255 /* write values in clkstart0x0 and clkmulti 0x20 */
2256 Data = 0x0;
2257 status = set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER,
2258 Data);
2259 if (status < 0) {
2260 dbg
2261 ("Writing CLK_START_VALUE_REGISTER failed status-0x%x",
2262 status);
2263 break;
2264 } else
2265 dbg
2266 ("CLK_START_VALUE_REGISTER Writing success status%d",
2267 status);
2268
2269 Data = 0x20;
2270 status = set_reg_sync(serial->port[i], CLK_MULTI_REGISTER,
2271 Data);
2272 if (status < 0) {
2273 dbg
2274 ("Writing CLK_MULTI_REGISTER failed status-0x%x",
2275 status);
2276 break;
2277 } else
2278 dbg("CLK_MULTI_REGISTER Writing success status%d",
2279 status);
2280
2281 /* Zero Length flag register */
2282 if ((ATEN2011_port->port_num != 1)
2283 && (ATEN2011_serial->ATEN2011_spectrum_2or4ports == 2)) {
2284
2285 Data = 0xff;
2286 status = set_reg_sync(serial->port[i],
2287 (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num)),
2288 Data);
2289 dbg("ZLIP offset%x",
2290 (__u16) (ZLP_REG1 +
2291 ((__u16) ATEN2011_port->port_num)));
2292 if (status < 0) {
2293 dbg
2294 ("Writing ZLP_REG%d failed status-0x%x",
2295 i + 2, status);
2296 break;
2297 } else
2298 dbg("ZLP_REG%d Writing success status%d",
2299 i + 2, status);
2300 } else {
2301 Data = 0xff;
2302 status = set_reg_sync(serial->port[i],
2303 (__u16)(ZLP_REG1 + ((__u16)ATEN2011_port->port_num) - 0x1),
2304 Data);
2305 dbg("ZLIP offset%x",
2306 (__u16) (ZLP_REG1 +
2307 ((__u16) ATEN2011_port->port_num) -
2308 0x1));
2309 if (status < 0) {
2310 dbg
2311 ("Writing ZLP_REG%d failed status-0x%x",
2312 i + 1, status);
2313 break;
2314 } else
2315 dbg("ZLP_REG%d Writing success status%d",
2316 i + 1, status);
2317
2318 }
2319 ATEN2011_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC);
2320 ATEN2011_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
2321
2322 }
2323
2324 /* Zero Length flag enable */
2325 Data = 0x0f;
2326 status = set_reg_sync(serial->port[0], ZLP_REG5, Data);
2327 if (status < 0) {
2328 dbg("Writing ZLP_REG5 failed status-0x%x", status);
2329 return -1;
2330 } else
2331 dbg("ZLP_REG5 Writing success status%d", status);
2332
2333 /* setting configuration feature to one */
2334 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
2335 (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ);
2336 return 0;
2337}
2338
2339static void ATEN2011_release(struct usb_serial *serial)
2340{
2341 int i;
2342 struct ATENINTL_port *ATEN2011_port;
2343
2344 /* check for the ports to be closed,close the ports and disconnect */
2345
2346 /* free private structure allocated for serial port *
2347 * stop reads and writes on all ports */
2348
2349 for (i = 0; i < serial->num_ports; ++i) {
2350 ATEN2011_port = usb_get_serial_port_data(serial->port[i]);
2351 kfree(ATEN2011_port->ctrl_buf);
2352 usb_kill_urb(ATEN2011_port->control_urb);
2353 kfree(ATEN2011_port);
2354 usb_set_serial_port_data(serial->port[i], NULL);
2355 }
2356
2357 /* free private structure allocated for serial device */
2358
2359 kfree(usb_get_serial_data(serial));
2360 usb_set_serial_data(serial, NULL);
2361}
2362
2363static struct usb_serial_driver aten_serial_driver = {
2364 .driver = {
2365 .owner = THIS_MODULE,
2366 .name = "aten2011",
2367 },
2368 .description = DRIVER_DESC,
2369 .id_table = id_table,
2370 .open = ATEN2011_open,
2371 .close = ATEN2011_close,
2372 .write = ATEN2011_write,
2373 .write_room = ATEN2011_write_room,
2374 .chars_in_buffer = ATEN2011_chars_in_buffer,
2375 .throttle = ATEN2011_throttle,
2376 .unthrottle = ATEN2011_unthrottle,
2377 .calc_num_ports = ATEN2011_calc_num_ports,
2378
2379 .ioctl = ATEN2011_ioctl,
2380 .set_termios = ATEN2011_set_termios,
2381 .break_ctl = ATEN2011_break,
2382 .tiocmget = ATEN2011_tiocmget,
2383 .tiocmset = ATEN2011_tiocmset,
2384 .attach = ATEN2011_startup,
2385 .release = ATEN2011_release,
2386 .read_bulk_callback = ATEN2011_bulk_in_callback,
2387 .read_int_callback = ATEN2011_interrupt_callback,
2388};
2389
2390static struct usb_driver aten_driver = {
2391 .name = "aten2011",
2392 .probe = usb_serial_probe,
2393 .disconnect = usb_serial_disconnect,
2394 .id_table = id_table,
2395};
2396
2397static int __init aten_init(void)
2398{
2399 int retval;
2400
2401 /* Register with the usb serial */
2402 retval = usb_serial_register(&aten_serial_driver);
2403 if (retval)
2404 return retval;
2405
2406 printk(KERN_INFO KBUILD_MODNAME ":"
2407 DRIVER_DESC " " DRIVER_VERSION "\n");
2408
2409 /* Register with the usb */
2410 retval = usb_register(&aten_driver);
2411 if (retval)
2412 usb_serial_deregister(&aten_serial_driver);
2413
2414 return retval;
2415}
2416
2417static void __exit aten_exit(void)
2418{
2419 usb_deregister(&aten_driver);
2420 usb_serial_deregister(&aten_serial_driver);
2421}
2422
2423module_init(aten_init);
2424module_exit(aten_exit);
2425
2426/* Module information */
2427MODULE_DESCRIPTION(DRIVER_DESC);
2428MODULE_LICENSE("GPL");
2429
2430MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 0ab9d15f3439..f5416af1e902 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/fb.h> 22#include <linux/fb.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/vmalloc.h>
24 25
25#include "udlfb.h" 26#include "udlfb.h"
26 27
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 24dfb33f90cb..a16c538d0132 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
80 int max_tx; 80 int max_tx;
81 int i; 81 int i;
82 82
83 /* Allocate space for the SS endpoint companion descriptor */
84 ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
85 GFP_KERNEL);
86 if (!ep->ss_ep_comp)
87 return -ENOMEM;
88 desc = (struct usb_ss_ep_comp_descriptor *) buffer; 83 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
89 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { 84 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
90 dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " 85 dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
91 " interface %d altsetting %d ep %d: " 86 " interface %d altsetting %d ep %d: "
92 "using minimum values\n", 87 "using minimum values\n",
93 cfgno, inum, asnum, ep->desc.bEndpointAddress); 88 cfgno, inum, asnum, ep->desc.bEndpointAddress);
94 ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
95 ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
96 ep->ss_ep_comp->desc.bMaxBurst = 0;
97 /*
98 * Leave bmAttributes as zero, which will mean no streams for
99 * bulk, and isoc won't support multiple bursts of packets.
100 * With bursts of only one packet, and a Mult of 1, the max
101 * amount of data moved per endpoint service interval is one
102 * packet.
103 */
104 if (usb_endpoint_xfer_isoc(&ep->desc) ||
105 usb_endpoint_xfer_int(&ep->desc))
106 ep->ss_ep_comp->desc.wBytesPerInterval =
107 ep->desc.wMaxPacketSize;
108 /* 89 /*
109 * The next descriptor is for an Endpoint or Interface, 90 * The next descriptor is for an Endpoint or Interface,
110 * no extra descriptors to copy into the companion structure, 91 * no extra descriptors to copy into the companion structure,
111 * and we didn't eat up any of the buffer. 92 * and we didn't eat up any of the buffer.
112 */ 93 */
113 retval = 0; 94 return 0;
114 goto valid;
115 } 95 }
116 memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); 96 memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
117 desc = &ep->ss_ep_comp->desc; 97 desc = &ep->ss_ep_comp->desc;
@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
320 buffer += i; 300 buffer += i;
321 size -= i; 301 size -= i;
322 302
303 /* Allocate space for the SS endpoint companion descriptor */
304 endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
305 GFP_KERNEL);
306 if (!endpoint->ss_ep_comp)
307 return -ENOMEM;
308
309 /* Fill in some default values (may be overwritten later) */
310 endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
311 endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
312 endpoint->ss_ep_comp->desc.bMaxBurst = 0;
313 /*
314 * Leave bmAttributes as zero, which will mean no streams for
315 * bulk, and isoc won't support multiple bursts of packets.
316 * With bursts of only one packet, and a Mult of 1, the max
317 * amount of data moved per endpoint service interval is one
318 * packet.
319 */
320 if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
321 usb_endpoint_xfer_int(&endpoint->desc))
322 endpoint->ss_ep_comp->desc.wBytesPerInterval =
323 endpoint->desc.wMaxPacketSize;
324
323 if (size > 0) { 325 if (size > 0) {
324 retval = usb_parse_ss_endpoint_companion(ddev, cfgno, 326 retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
325 inum, asnum, endpoint, num_ep, buffer, 327 inum, asnum, endpoint, num_ep, buffer,
@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
329 retval = buffer - buffer0; 331 retval = buffer - buffer0;
330 } 332 }
331 } else { 333 } else {
334 dev_warn(ddev, "config %d interface %d altsetting %d "
335 "endpoint 0x%X has no "
336 "SuperSpeed companion descriptor\n",
337 cfgno, inum, asnum, d->bEndpointAddress);
332 retval = buffer - buffer0; 338 retval = buffer - buffer0;
333 } 339 }
334 } else { 340 } else {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index dc2ac613a9d1..1d283e1b2b8d 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
105 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 105 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
106 int retval; 106 int retval;
107 107
108 ehci_reset(ehci);
108 retval = ehci_halt(ehci); 109 retval = ehci_halt(ehci);
109 if (retval) 110 if (retval)
110 return retval; 111 return retval;
@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
118 119
119 hcd->has_tt = 1; 120 hcd->has_tt = 1;
120 121
121 ehci_reset(ehci);
122 ehci_port_power(ehci, 0); 122 ehci_port_power(ehci, 0);
123 123
124 return retval; 124 return retval;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index f3aaba35e912..83cbecd2a1ed 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
282static void ohci_omap_stop(struct usb_hcd *hcd) 282static void ohci_omap_stop(struct usb_hcd *hcd)
283{ 283{
284 dev_dbg(hcd->self.controller, "stopping USB Controller\n"); 284 dev_dbg(hcd->self.controller, "stopping USB Controller\n");
285 ohci_stop(hcd);
285 omap_ohci_clock_power(0); 286 omap_ohci_clock_power(0);
286} 287}
287 288
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2501c571f855..705e34324156 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
173{ 173{
174 void *addr; 174 void *addr;
175 u32 temp; 175 u32 temp;
176 u64 temp_64;
176 177
177 addr = &ir_set->irq_pending; 178 addr = &ir_set->irq_pending;
178 temp = xhci_readl(xhci, addr); 179 temp = xhci_readl(xhci, addr);
@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
200 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", 201 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
201 addr, (unsigned int)temp); 202 addr, (unsigned int)temp);
202 203
203 addr = &ir_set->erst_base[0]; 204 addr = &ir_set->erst_base;
204 temp = xhci_readl(xhci, addr); 205 temp_64 = xhci_read_64(xhci, addr);
205 xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", 206 xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
206 addr, (unsigned int) temp); 207 addr, temp_64);
207
208 addr = &ir_set->erst_base[1];
209 temp = xhci_readl(xhci, addr);
210 xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
211 addr, (unsigned int) temp);
212 208
213 addr = &ir_set->erst_dequeue[0]; 209 addr = &ir_set->erst_dequeue;
214 temp = xhci_readl(xhci, addr); 210 temp_64 = xhci_read_64(xhci, addr);
215 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", 211 xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
216 addr, (unsigned int) temp); 212 addr, temp_64);
217
218 addr = &ir_set->erst_dequeue[1];
219 temp = xhci_readl(xhci, addr);
220 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
221 addr, (unsigned int) temp);
222} 213}
223 214
224void xhci_print_run_regs(struct xhci_hcd *xhci) 215void xhci_print_run_regs(struct xhci_hcd *xhci)
@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
268 xhci_dbg(xhci, "Link TRB:\n"); 259 xhci_dbg(xhci, "Link TRB:\n");
269 xhci_print_trb_offsets(xhci, trb); 260 xhci_print_trb_offsets(xhci, trb);
270 261
271 address = trb->link.segment_ptr[0] + 262 address = trb->link.segment_ptr;
272 (((u64) trb->link.segment_ptr[1]) << 32);
273 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); 263 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
274 264
275 xhci_dbg(xhci, "Interrupter target = 0x%x\n", 265 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
282 (unsigned int) (trb->link.control & TRB_NO_SNOOP)); 272 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
283 break; 273 break;
284 case TRB_TYPE(TRB_TRANSFER): 274 case TRB_TYPE(TRB_TRANSFER):
285 address = trb->trans_event.buffer[0] + 275 address = trb->trans_event.buffer;
286 (((u64) trb->trans_event.buffer[1]) << 32);
287 /* 276 /*
288 * FIXME: look at flags to figure out if it's an address or if 277 * FIXME: look at flags to figure out if it's an address or if
289 * the data is directly in the buffer field. 278 * the data is directly in the buffer field.
@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
291 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); 280 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
292 break; 281 break;
293 case TRB_TYPE(TRB_COMPLETION): 282 case TRB_TYPE(TRB_COMPLETION):
294 address = trb->event_cmd.cmd_trb[0] + 283 address = trb->event_cmd.cmd_trb;
295 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
296 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); 284 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
297 xhci_dbg(xhci, "Completion status = %u\n", 285 xhci_dbg(xhci, "Completion status = %u\n",
298 (unsigned int) GET_COMP_CODE(trb->event_cmd.status)); 286 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
328 for (i = 0; i < TRBS_PER_SEGMENT; ++i) { 316 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
329 trb = &seg->trbs[i]; 317 trb = &seg->trbs[i];
330 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, 318 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
331 (unsigned int) trb->link.segment_ptr[0], 319 lower_32_bits(trb->link.segment_ptr),
332 (unsigned int) trb->link.segment_ptr[1], 320 upper_32_bits(trb->link.segment_ptr),
333 (unsigned int) trb->link.intr_target, 321 (unsigned int) trb->link.intr_target,
334 (unsigned int) trb->link.control); 322 (unsigned int) trb->link.control);
335 addr += sizeof(*trb); 323 addr += sizeof(*trb);
@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
386 entry = &erst->entries[i]; 374 entry = &erst->entries[i];
387 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", 375 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
388 (unsigned int) addr, 376 (unsigned int) addr,
389 (unsigned int) entry->seg_addr[0], 377 lower_32_bits(entry->seg_addr),
390 (unsigned int) entry->seg_addr[1], 378 upper_32_bits(entry->seg_addr),
391 (unsigned int) entry->seg_size, 379 (unsigned int) entry->seg_size,
392 (unsigned int) entry->rsvd); 380 (unsigned int) entry->rsvd);
393 addr += sizeof(*entry); 381 addr += sizeof(*entry);
@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
396 384
397void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) 385void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
398{ 386{
399 u32 val; 387 u64 val;
400 388
401 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 389 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
402 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); 390 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
403 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); 391 lower_32_bits(val));
404 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); 392 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
393 upper_32_bits(val));
405} 394}
406 395
407void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 396/* Print the last 32 bytes for 64-byte contexts */
397static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
398{
399 int i;
400 for (i = 0; i < 4; ++i) {
401 xhci_dbg(xhci, "@%p (virt) @%08llx "
402 "(dma) %#08llx - rsvd64[%d]\n",
403 &ctx[4 + i], (unsigned long long)dma,
404 ctx[4 + i], i);
405 dma += 8;
406 }
407}
408
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
408{ 410{
409 int i, j;
410 int last_ep_ctx = 31;
411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 411 /* Fields are 32 bits wide, DMA addresses are in bytes */
412 int field_size = 32 / 8; 412 int field_size = 32 / 8;
413 int i;
413 414
414 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", 415 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
415 &ctx->drop_flags, (unsigned long long)dma, 416 dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
416 ctx->drop_flags); 417 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
417 dma += field_size;
418 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
419 &ctx->add_flags, (unsigned long long)dma,
420 ctx->add_flags);
421 dma += field_size;
422 for (i = 0; i > 6; ++i) {
423 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
424 &ctx->rsvd[i], (unsigned long long)dma,
425 ctx->rsvd[i], i);
426 dma += field_size;
427 }
428 418
429 xhci_dbg(xhci, "Slot Context:\n"); 419 xhci_dbg(xhci, "Slot Context:\n");
430 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", 420 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
431 &ctx->slot.dev_info, 421 &slot_ctx->dev_info,
432 (unsigned long long)dma, ctx->slot.dev_info); 422 (unsigned long long)dma, slot_ctx->dev_info);
433 dma += field_size; 423 dma += field_size;
434 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", 424 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
435 &ctx->slot.dev_info2, 425 &slot_ctx->dev_info2,
436 (unsigned long long)dma, ctx->slot.dev_info2); 426 (unsigned long long)dma, slot_ctx->dev_info2);
437 dma += field_size; 427 dma += field_size;
438 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", 428 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
439 &ctx->slot.tt_info, 429 &slot_ctx->tt_info,
440 (unsigned long long)dma, ctx->slot.tt_info); 430 (unsigned long long)dma, slot_ctx->tt_info);
441 dma += field_size; 431 dma += field_size;
442 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", 432 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
443 &ctx->slot.dev_state, 433 &slot_ctx->dev_state,
444 (unsigned long long)dma, ctx->slot.dev_state); 434 (unsigned long long)dma, slot_ctx->dev_state);
445 dma += field_size; 435 dma += field_size;
446 for (i = 0; i > 4; ++i) { 436 for (i = 0; i < 4; ++i) {
447 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 437 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
448 &ctx->slot.reserved[i], (unsigned long long)dma, 438 &slot_ctx->reserved[i], (unsigned long long)dma,
449 ctx->slot.reserved[i], i); 439 slot_ctx->reserved[i], i);
450 dma += field_size; 440 dma += field_size;
451 } 441 }
452 442
443 if (csz)
444 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
445}
446
447void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
448 struct xhci_container_ctx *ctx,
449 unsigned int last_ep)
450{
451 int i, j;
452 int last_ep_ctx = 31;
453 /* Fields are 32 bits wide, DMA addresses are in bytes */
454 int field_size = 32 / 8;
455 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
456
453 if (last_ep < 31) 457 if (last_ep < 31)
454 last_ep_ctx = last_ep + 1; 458 last_ep_ctx = last_ep + 1;
455 for (i = 0; i < last_ep_ctx; ++i) { 459 for (i = 0; i < last_ep_ctx; ++i) {
460 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
461 dma_addr_t dma = ctx->dma +
462 ((unsigned long)ep_ctx - (unsigned long)ctx);
463
456 xhci_dbg(xhci, "Endpoint %02d Context:\n", i); 464 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
457 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", 465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
458 &ctx->ep[i].ep_info, 466 &ep_ctx->ep_info,
459 (unsigned long long)dma, ctx->ep[i].ep_info); 467 (unsigned long long)dma, ep_ctx->ep_info);
460 dma += field_size; 468 dma += field_size;
461 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", 469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
462 &ctx->ep[i].ep_info2, 470 &ep_ctx->ep_info2,
463 (unsigned long long)dma, ctx->ep[i].ep_info2); 471 (unsigned long long)dma, ep_ctx->ep_info2);
464 dma += field_size;
465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
466 &ctx->ep[i].deq[0],
467 (unsigned long long)dma, ctx->ep[i].deq[0]);
468 dma += field_size;
469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
470 &ctx->ep[i].deq[1],
471 (unsigned long long)dma, ctx->ep[i].deq[1]);
472 dma += field_size; 472 dma += field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
474 &ep_ctx->deq,
475 (unsigned long long)dma, ep_ctx->deq);
476 dma += 2*field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", 477 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
474 &ctx->ep[i].tx_info, 478 &ep_ctx->tx_info,
475 (unsigned long long)dma, ctx->ep[i].tx_info); 479 (unsigned long long)dma, ep_ctx->tx_info);
476 dma += field_size; 480 dma += field_size;
477 for (j = 0; j < 3; ++j) { 481 for (j = 0; j < 3; ++j) {
478 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 482 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
479 &ctx->ep[i].reserved[j], 483 &ep_ctx->reserved[j],
480 (unsigned long long)dma, 484 (unsigned long long)dma,
481 ctx->ep[i].reserved[j], j); 485 ep_ctx->reserved[j], j);
486 dma += field_size;
487 }
488
489 if (csz)
490 dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
491 }
492}
493
494void xhci_dbg_ctx(struct xhci_hcd *xhci,
495 struct xhci_container_ctx *ctx,
496 unsigned int last_ep)
497{
498 int i;
499 /* Fields are 32 bits wide, DMA addresses are in bytes */
500 int field_size = 32 / 8;
501 struct xhci_slot_ctx *slot_ctx;
502 dma_addr_t dma = ctx->dma;
503 int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
504
505 if (ctx->type == XHCI_CTX_TYPE_INPUT) {
506 struct xhci_input_control_ctx *ctrl_ctx =
507 xhci_get_input_control_ctx(xhci, ctx);
508 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
509 &ctrl_ctx->drop_flags, (unsigned long long)dma,
510 ctrl_ctx->drop_flags);
511 dma += field_size;
512 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
513 &ctrl_ctx->add_flags, (unsigned long long)dma,
514 ctrl_ctx->add_flags);
515 dma += field_size;
516 for (i = 0; i < 6; ++i) {
517 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
518 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
519 ctrl_ctx->rsvd2[i], i);
482 dma += field_size; 520 dma += field_size;
483 } 521 }
522
523 if (csz)
524 dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
484 } 525 }
526
527 slot_ctx = xhci_get_slot_ctx(xhci, ctx);
528 xhci_dbg_slot_ctx(xhci, ctx);
529 xhci_dbg_ep_ctx(xhci, ctx, last_ep);
485} 530}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index dba3e07ccd09..816c39caca1c 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci)
103 u32 state; 103 u32 state;
104 104
105 state = xhci_readl(xhci, &xhci->op_regs->status); 105 state = xhci_readl(xhci, &xhci->op_regs->status);
106 BUG_ON((state & STS_HALT) == 0); 106 if ((state & STS_HALT) == 0) {
107 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
108 return 0;
109 }
107 110
108 xhci_dbg(xhci, "// Reset the HC\n"); 111 xhci_dbg(xhci, "// Reset the HC\n");
109 command = xhci_readl(xhci, &xhci->op_regs->command); 112 command = xhci_readl(xhci, &xhci->op_regs->command);
@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd)
226static void xhci_work(struct xhci_hcd *xhci) 229static void xhci_work(struct xhci_hcd *xhci)
227{ 230{
228 u32 temp; 231 u32 temp;
232 u64 temp_64;
229 233
230 /* 234 /*
231 * Clear the op reg interrupt status first, 235 * Clear the op reg interrupt status first,
@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci)
248 /* FIXME this should be a delayed service routine that clears the EHB */ 252 /* FIXME this should be a delayed service routine that clears the EHB */
249 xhci_handle_event(xhci); 253 xhci_handle_event(xhci);
250 254
251 /* Clear the event handler busy flag; the event ring should be empty. */ 255 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 256 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); 257 xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
254 /* Flush posted writes -- FIXME is this necessary? */ 258 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending); 259 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256} 260}
@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
266{ 270{
267 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 271 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
268 u32 temp, temp2; 272 u32 temp, temp2;
273 union xhci_trb *trb;
269 274
270 spin_lock(&xhci->lock); 275 spin_lock(&xhci->lock);
276 trb = xhci->event_ring->dequeue;
271 /* Check if the xHC generated the interrupt, or the irq is shared */ 277 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp = xhci_readl(xhci, &xhci->op_regs->status); 278 temp = xhci_readl(xhci, &xhci->op_regs->status);
273 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); 279 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
280 if (temp == 0xffffffff && temp2 == 0xffffffff)
281 goto hw_died;
282
274 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { 283 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
275 spin_unlock(&xhci->lock); 284 spin_unlock(&xhci->lock);
276 return IRQ_NONE; 285 return IRQ_NONE;
277 } 286 }
287 xhci_dbg(xhci, "op reg status = %08x\n", temp);
288 xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
289 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
290 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
291 (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
292 lower_32_bits(trb->link.segment_ptr),
293 upper_32_bits(trb->link.segment_ptr),
294 (unsigned int) trb->link.intr_target,
295 (unsigned int) trb->link.control);
278 296
279 if (temp & STS_FATAL) { 297 if (temp & STS_FATAL) {
280 xhci_warn(xhci, "WARNING: Host System Error\n"); 298 xhci_warn(xhci, "WARNING: Host System Error\n");
281 xhci_halt(xhci); 299 xhci_halt(xhci);
300hw_died:
282 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 301 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
283 spin_unlock(&xhci->lock); 302 spin_unlock(&xhci->lock);
284 return -ESHUTDOWN; 303 return -ESHUTDOWN;
@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg)
295{ 314{
296 unsigned long flags; 315 unsigned long flags;
297 int temp; 316 int temp;
317 u64 temp_64;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 318 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j; 319 int i, j;
300 320
@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg)
311 xhci_dbg(xhci, "Event ring:\n"); 331 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 332 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 333 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 334 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
315 temp &= ERST_PTR_MASK; 335 temp_64 &= ~ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); 336 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
317 xhci_dbg(xhci, "Command ring:\n"); 337 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 338 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 339 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg)
356int xhci_run(struct usb_hcd *hcd) 376int xhci_run(struct usb_hcd *hcd)
357{ 377{
358 u32 temp; 378 u32 temp;
379 u64 temp_64;
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 380 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 void (*doorbell)(struct xhci_hcd *) = NULL; 381 void (*doorbell)(struct xhci_hcd *) = NULL;
361 382
@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd)
382 add_timer(&xhci->event_ring_timer); 403 add_timer(&xhci->event_ring_timer);
383#endif 404#endif
384 405
406 xhci_dbg(xhci, "Command ring memory map follows:\n");
407 xhci_debug_ring(xhci, xhci->cmd_ring);
408 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
409 xhci_dbg_cmd_ptrs(xhci);
410
411 xhci_dbg(xhci, "ERST memory map follows:\n");
412 xhci_dbg_erst(xhci, &xhci->erst);
413 xhci_dbg(xhci, "Event ring:\n");
414 xhci_debug_ring(xhci, xhci->event_ring);
415 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
416 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
417 temp_64 &= ~ERST_PTR_MASK;
418 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
419
385 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 420 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
386 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 421 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
387 temp &= ~ER_IRQ_INTERVAL_MASK; 422 temp &= ~ER_IRQ_INTERVAL_MASK;
@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd)
406 if (NUM_TEST_NOOPS > 0) 441 if (NUM_TEST_NOOPS > 0)
407 doorbell = xhci_setup_one_noop(xhci); 442 doorbell = xhci_setup_one_noop(xhci);
408 443
409 xhci_dbg(xhci, "Command ring memory map follows:\n");
410 xhci_debug_ring(xhci, xhci->cmd_ring);
411 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
412 xhci_dbg_cmd_ptrs(xhci);
413
414 xhci_dbg(xhci, "ERST memory map follows:\n");
415 xhci_dbg_erst(xhci, &xhci->erst);
416 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
420 temp &= ERST_PTR_MASK;
421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424
425 temp = xhci_readl(xhci, &xhci->op_regs->command); 444 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN); 445 temp |= (CMD_RUN);
427 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 446 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
601 goto exit; 620 goto exit;
602 } 621 }
603 if (usb_endpoint_xfer_control(&urb->ep->desc)) 622 if (usb_endpoint_xfer_control(&urb->ep->desc))
604 ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, 623 /* We have a spinlock and interrupts disabled, so we must pass
624 * atomic context to this function, which may allocate memory.
625 */
626 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
605 slot_id, ep_index); 627 slot_id, ep_index);
606 else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) 628 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
607 ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, 629 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
608 slot_id, ep_index); 630 slot_id, ep_index);
609 else 631 else
610 ret = -EINVAL; 632 ret = -EINVAL;
@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
661 goto done; 683 goto done;
662 684
663 xhci_dbg(xhci, "Cancel URB %p\n", urb); 685 xhci_dbg(xhci, "Cancel URB %p\n", urb);
686 xhci_dbg(xhci, "Event ring:\n");
687 xhci_debug_ring(xhci, xhci->event_ring);
664 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 688 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
665 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; 689 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
690 xhci_dbg(xhci, "Endpoint ring:\n");
691 xhci_debug_ring(xhci, ep_ring);
666 td = (struct xhci_td *) urb->hcpriv; 692 td = (struct xhci_td *) urb->hcpriv;
667 693
668 ep_ring->cancels_pending++; 694 ep_ring->cancels_pending++;
@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
696 struct usb_host_endpoint *ep) 722 struct usb_host_endpoint *ep)
697{ 723{
698 struct xhci_hcd *xhci; 724 struct xhci_hcd *xhci;
699 struct xhci_device_control *in_ctx; 725 struct xhci_container_ctx *in_ctx, *out_ctx;
726 struct xhci_input_control_ctx *ctrl_ctx;
727 struct xhci_slot_ctx *slot_ctx;
700 unsigned int last_ctx; 728 unsigned int last_ctx;
701 unsigned int ep_index; 729 unsigned int ep_index;
702 struct xhci_ep_ctx *ep_ctx; 730 struct xhci_ep_ctx *ep_ctx;
@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
724 } 752 }
725 753
726 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 754 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
755 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
756 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
727 ep_index = xhci_get_endpoint_index(&ep->desc); 757 ep_index = xhci_get_endpoint_index(&ep->desc);
728 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 758 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
729 /* If the HC already knows the endpoint is disabled, 759 /* If the HC already knows the endpoint is disabled,
730 * or the HCD has noted it is disabled, ignore this request 760 * or the HCD has noted it is disabled, ignore this request
731 */ 761 */
732 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 762 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
733 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 763 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
734 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 764 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
735 __func__, ep); 765 __func__, ep);
736 return 0; 766 return 0;
737 } 767 }
738 768
739 in_ctx->drop_flags |= drop_flag; 769 ctrl_ctx->drop_flags |= drop_flag;
740 new_drop_flags = in_ctx->drop_flags; 770 new_drop_flags = ctrl_ctx->drop_flags;
741 771
742 in_ctx->add_flags = ~drop_flag; 772 ctrl_ctx->add_flags = ~drop_flag;
743 new_add_flags = in_ctx->add_flags; 773 new_add_flags = ctrl_ctx->add_flags;
744 774
745 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); 775 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
776 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
746 /* Update the last valid endpoint context, if we deleted the last one */ 777 /* Update the last valid endpoint context, if we deleted the last one */
747 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 778 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
748 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 779 slot_ctx->dev_info &= ~LAST_CTX_MASK;
749 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 780 slot_ctx->dev_info |= LAST_CTX(last_ctx);
750 } 781 }
751 new_slot_info = in_ctx->slot.dev_info; 782 new_slot_info = slot_ctx->dev_info;
752 783
753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 784 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
754 785
@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
778 struct usb_host_endpoint *ep) 809 struct usb_host_endpoint *ep)
779{ 810{
780 struct xhci_hcd *xhci; 811 struct xhci_hcd *xhci;
781 struct xhci_device_control *in_ctx; 812 struct xhci_container_ctx *in_ctx, *out_ctx;
782 unsigned int ep_index; 813 unsigned int ep_index;
783 struct xhci_ep_ctx *ep_ctx; 814 struct xhci_ep_ctx *ep_ctx;
815 struct xhci_slot_ctx *slot_ctx;
816 struct xhci_input_control_ctx *ctrl_ctx;
784 u32 added_ctxs; 817 u32 added_ctxs;
785 unsigned int last_ctx; 818 unsigned int last_ctx;
786 u32 new_add_flags, new_drop_flags, new_slot_info; 819 u32 new_add_flags, new_drop_flags, new_slot_info;
787 int ret = 0; 820 int ret = 0;
788 821
789 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 822 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
790 if (ret <= 0) 823 if (ret <= 0) {
824 /* So we won't queue a reset ep command for a root hub */
825 ep->hcpriv = NULL;
791 return ret; 826 return ret;
827 }
792 xhci = hcd_to_xhci(hcd); 828 xhci = hcd_to_xhci(hcd);
793 829
794 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 830 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
810 } 846 }
811 847
812 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 848 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
849 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
850 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
813 ep_index = xhci_get_endpoint_index(&ep->desc); 851 ep_index = xhci_get_endpoint_index(&ep->desc);
814 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 852 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
815 /* If the HCD has already noted the endpoint is enabled, 853 /* If the HCD has already noted the endpoint is enabled,
816 * ignore this request. 854 * ignore this request.
817 */ 855 */
818 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 856 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
819 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 857 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
820 __func__, ep); 858 __func__, ep);
821 return 0; 859 return 0;
@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
833 return -ENOMEM; 871 return -ENOMEM;
834 } 872 }
835 873
836 in_ctx->add_flags |= added_ctxs; 874 ctrl_ctx->add_flags |= added_ctxs;
837 new_add_flags = in_ctx->add_flags; 875 new_add_flags = ctrl_ctx->add_flags;
838 876
839 /* If xhci_endpoint_disable() was called for this endpoint, but the 877 /* If xhci_endpoint_disable() was called for this endpoint, but the
840 * xHC hasn't been notified yet through the check_bandwidth() call, 878 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
842 * descriptors. We must drop and re-add this endpoint, so we leave the 880 * descriptors. We must drop and re-add this endpoint, so we leave the
843 * drop flags alone. 881 * drop flags alone.
844 */ 882 */
845 new_drop_flags = in_ctx->drop_flags; 883 new_drop_flags = ctrl_ctx->drop_flags;
846 884
885 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
847 /* Update the last valid endpoint context, if we just added one past */ 886 /* Update the last valid endpoint context, if we just added one past */
848 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 887 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
849 in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 888 slot_ctx->dev_info &= ~LAST_CTX_MASK;
850 in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 889 slot_ctx->dev_info |= LAST_CTX(last_ctx);
851 } 890 }
852 new_slot_info = in_ctx->slot.dev_info; 891 new_slot_info = slot_ctx->dev_info;
892
893 /* Store the usb_device pointer for later use */
894 ep->hcpriv = udev;
853 895
854 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 896 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
855 (unsigned int) ep->desc.bEndpointAddress, 897 (unsigned int) ep->desc.bEndpointAddress,
@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
860 return 0; 902 return 0;
861} 903}
862 904
863static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) 905static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
864{ 906{
907 struct xhci_input_control_ctx *ctrl_ctx;
865 struct xhci_ep_ctx *ep_ctx; 908 struct xhci_ep_ctx *ep_ctx;
909 struct xhci_slot_ctx *slot_ctx;
866 int i; 910 int i;
867 911
868 /* When a device's add flag and drop flag are zero, any subsequent 912 /* When a device's add flag and drop flag are zero, any subsequent
@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
870 * untouched. Make sure we don't leave any old state in the input 914 * untouched. Make sure we don't leave any old state in the input
871 * endpoint contexts. 915 * endpoint contexts.
872 */ 916 */
873 virt_dev->in_ctx->drop_flags = 0; 917 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
874 virt_dev->in_ctx->add_flags = 0; 918 ctrl_ctx->drop_flags = 0;
875 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 919 ctrl_ctx->add_flags = 0;
920 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
921 slot_ctx->dev_info &= ~LAST_CTX_MASK;
876 /* Endpoint 0 is always valid */ 922 /* Endpoint 0 is always valid */
877 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); 923 slot_ctx->dev_info |= LAST_CTX(1);
878 for (i = 1; i < 31; ++i) { 924 for (i = 1; i < 31; ++i) {
879 ep_ctx = &virt_dev->in_ctx->ep[i]; 925 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
880 ep_ctx->ep_info = 0; 926 ep_ctx->ep_info = 0;
881 ep_ctx->ep_info2 = 0; 927 ep_ctx->ep_info2 = 0;
882 ep_ctx->deq[0] = 0; 928 ep_ctx->deq = 0;
883 ep_ctx->deq[1] = 0;
884 ep_ctx->tx_info = 0; 929 ep_ctx->tx_info = 0;
885 } 930 }
886} 931}
@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
903 unsigned long flags; 948 unsigned long flags;
904 struct xhci_hcd *xhci; 949 struct xhci_hcd *xhci;
905 struct xhci_virt_device *virt_dev; 950 struct xhci_virt_device *virt_dev;
951 struct xhci_input_control_ctx *ctrl_ctx;
952 struct xhci_slot_ctx *slot_ctx;
906 953
907 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 954 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
908 if (ret <= 0) 955 if (ret <= 0)
@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
918 virt_dev = xhci->devs[udev->slot_id]; 965 virt_dev = xhci->devs[udev->slot_id];
919 966
920 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 967 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
921 virt_dev->in_ctx->add_flags |= SLOT_FLAG; 968 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
922 virt_dev->in_ctx->add_flags &= ~EP0_FLAG; 969 ctrl_ctx->add_flags |= SLOT_FLAG;
923 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; 970 ctrl_ctx->add_flags &= ~EP0_FLAG;
924 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; 971 ctrl_ctx->drop_flags &= ~SLOT_FLAG;
972 ctrl_ctx->drop_flags &= ~EP0_FLAG;
925 xhci_dbg(xhci, "New Input Control Context:\n"); 973 xhci_dbg(xhci, "New Input Control Context:\n");
926 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 974 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
927 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 975 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
976 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
928 977
929 spin_lock_irqsave(&xhci->lock, flags); 978 spin_lock_irqsave(&xhci->lock, flags);
930 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, 979 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
931 udev->slot_id); 980 udev->slot_id);
932 if (ret < 0) { 981 if (ret < 0) {
933 spin_unlock_irqrestore(&xhci->lock, flags); 982 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
982 } 1031 }
983 1032
984 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1033 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
985 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 1034 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
986 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 1035 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
987 1036
988 xhci_zero_in_ctx(virt_dev); 1037 xhci_zero_in_ctx(xhci, virt_dev);
989 /* Free any old rings */ 1038 /* Free any old rings */
990 for (i = 1; i < 31; ++i) { 1039 for (i = 1; i < 31; ++i) {
991 if (virt_dev->new_ep_rings[i]) { 1040 if (virt_dev->new_ep_rings[i]) {
@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1023 virt_dev->new_ep_rings[i] = NULL; 1072 virt_dev->new_ep_rings[i] = NULL;
1024 } 1073 }
1025 } 1074 }
1026 xhci_zero_in_ctx(virt_dev); 1075 xhci_zero_in_ctx(xhci, virt_dev);
1076}
1077
1078/* Deal with stalled endpoints. The core should have sent the control message
1079 * to clear the halt condition. However, we need to make the xHCI hardware
1080 * reset its sequence number, since a device will expect a sequence number of
1081 * zero after the halt condition is cleared.
1082 * Context: in_interrupt
1083 */
1084void xhci_endpoint_reset(struct usb_hcd *hcd,
1085 struct usb_host_endpoint *ep)
1086{
1087 struct xhci_hcd *xhci;
1088 struct usb_device *udev;
1089 unsigned int ep_index;
1090 unsigned long flags;
1091 int ret;
1092 struct xhci_dequeue_state deq_state;
1093 struct xhci_ring *ep_ring;
1094
1095 xhci = hcd_to_xhci(hcd);
1096 udev = (struct usb_device *) ep->hcpriv;
1097 /* Called with a root hub endpoint (or an endpoint that wasn't added
1098 * with xhci_add_endpoint()
1099 */
1100 if (!ep->hcpriv)
1101 return;
1102 ep_index = xhci_get_endpoint_index(&ep->desc);
1103 ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
1104 if (!ep_ring->stopped_td) {
1105 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1106 ep->desc.bEndpointAddress);
1107 return;
1108 }
1109
1110 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1111 spin_lock_irqsave(&xhci->lock, flags);
1112 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1113 /*
1114 * Can't change the ring dequeue pointer until it's transitioned to the
1115 * stopped state, which is only upon a successful reset endpoint
1116 * command. Better hope that last command worked!
1117 */
1118 if (!ret) {
1119 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1120 /* We need to move the HW's dequeue pointer past this TD,
1121 * or it will attempt to resend it on the next doorbell ring.
1122 */
1123 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1124 ep_index, ep_ring->stopped_td, &deq_state);
1125 xhci_dbg(xhci, "Queueing new dequeue state\n");
1126 xhci_queue_new_dequeue_state(xhci, ep_ring,
1127 udev->slot_id,
1128 ep_index, &deq_state);
1129 kfree(ep_ring->stopped_td);
1130 xhci_ring_cmd_db(xhci);
1131 }
1132 spin_unlock_irqrestore(&xhci->lock, flags);
1133
1134 if (ret)
1135 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1027} 1136}
1028 1137
1029/* 1138/*
@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1120 struct xhci_virt_device *virt_dev; 1229 struct xhci_virt_device *virt_dev;
1121 int ret = 0; 1230 int ret = 0;
1122 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1231 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1123 u32 temp; 1232 struct xhci_slot_ctx *slot_ctx;
1233 struct xhci_input_control_ctx *ctrl_ctx;
1234 u64 temp_64;
1124 1235
1125 if (!udev->slot_id) { 1236 if (!udev->slot_id) {
1126 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 1237 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
@@ -1133,10 +1244,12 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1133 if (!udev->config) 1244 if (!udev->config)
1134 xhci_setup_addressable_virt_dev(xhci, udev); 1245 xhci_setup_addressable_virt_dev(xhci, udev);
1135 /* Otherwise, assume the core has the device configured how it wants */ 1246 /* Otherwise, assume the core has the device configured how it wants */
1247 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1248 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1136 1249
1137 spin_lock_irqsave(&xhci->lock, flags); 1250 spin_lock_irqsave(&xhci->lock, flags);
1138 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, 1251 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1139 udev->slot_id); 1252 udev->slot_id);
1140 if (ret) { 1253 if (ret) {
1141 spin_unlock_irqrestore(&xhci->lock, flags); 1254 spin_unlock_irqrestore(&xhci->lock, flags);
1142 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1255 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1176 default: 1289 default:
1177 xhci_err(xhci, "ERROR: unexpected command completion " 1290 xhci_err(xhci, "ERROR: unexpected command completion "
1178 "code 0x%x.\n", virt_dev->cmd_status); 1291 "code 0x%x.\n", virt_dev->cmd_status);
1292 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1293 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1179 ret = -EINVAL; 1294 ret = -EINVAL;
1180 break; 1295 break;
1181 } 1296 }
1182 if (ret) { 1297 if (ret) {
1183 return ret; 1298 return ret;
1184 } 1299 }
1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); 1300 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1186 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); 1301 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1187 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); 1302 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1188 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
1189 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
1190 udev->slot_id,
1191 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
1192 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
1193 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
1194 udev->slot_id, 1303 udev->slot_id,
1195 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], 1304 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1196 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); 1305 (unsigned long long)
1306 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1197 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1307 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1198 (unsigned long long)virt_dev->out_ctx_dma); 1308 (unsigned long long)virt_dev->out_ctx->dma);
1199 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1309 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1200 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1310 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1201 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1311 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1202 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1312 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1203 /* 1313 /*
1204 * USB core uses address 1 for the roothubs, so we add one to the 1314 * USB core uses address 1 for the roothubs, so we add one to the
1205 * address given back to us by the HC. 1315 * address given back to us by the HC.
1206 */ 1316 */
1207 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1317 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1318 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
1208 /* Zero the input context control for later use */ 1319 /* Zero the input context control for later use */
1209 virt_dev->in_ctx->add_flags = 0; 1320 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1210 virt_dev->in_ctx->drop_flags = 0; 1321 ctrl_ctx->add_flags = 0;
1211 /* Mirror flags in the output context for future ep enable/disable */ 1322 ctrl_ctx->drop_flags = 0;
1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1213 virt_dev->out_ctx->drop_flags = 0;
1214 1323
1215 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1324 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1216 /* XXX Meh, not sure if anyone else but choose_address uses this. */ 1325 /* XXX Meh, not sure if anyone else but choose_address uses this. */
@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void)
1252 /* xhci_device_control has eight fields, and also 1361 /* xhci_device_control has eight fields, and also
1253 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 1362 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1254 */ 1363 */
1255 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1256 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 1364 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1257 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 1365 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1258 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 1366 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c8a72de1c508..e6b9a1c6002d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
88 return; 88 return;
89 prev->next = next; 89 prev->next = next;
90 if (link_trbs) { 90 if (link_trbs) {
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; 91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
92 92
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ 93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; 94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
@@ -189,6 +189,63 @@ fail:
189 return 0; 189 return 0;
190} 190}
191 191
192#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
193
194struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
195 int type, gfp_t flags)
196{
197 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
198 if (!ctx)
199 return NULL;
200
201 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
202 ctx->type = type;
203 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
204 if (type == XHCI_CTX_TYPE_INPUT)
205 ctx->size += CTX_SIZE(xhci->hcc_params);
206
207 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
208 memset(ctx->bytes, 0, ctx->size);
209 return ctx;
210}
211
212void xhci_free_container_ctx(struct xhci_hcd *xhci,
213 struct xhci_container_ctx *ctx)
214{
215 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
216 kfree(ctx);
217}
218
219struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
220 struct xhci_container_ctx *ctx)
221{
222 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
223 return (struct xhci_input_control_ctx *)ctx->bytes;
224}
225
226struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
227 struct xhci_container_ctx *ctx)
228{
229 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
230 return (struct xhci_slot_ctx *)ctx->bytes;
231
232 return (struct xhci_slot_ctx *)
233 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
234}
235
236struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
237 struct xhci_container_ctx *ctx,
238 unsigned int ep_index)
239{
240 /* increment ep index by offset of start of ep ctx array */
241 ep_index++;
242 if (ctx->type == XHCI_CTX_TYPE_INPUT)
243 ep_index++;
244
245 return (struct xhci_ep_ctx *)
246 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
247}
248
192/* All the xhci_tds in the ring's TD list should be freed at this point */ 249/* All the xhci_tds in the ring's TD list should be freed at this point */
193void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 250void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194{ 251{
@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
200 return; 257 return;
201 258
202 dev = xhci->devs[slot_id]; 259 dev = xhci->devs[slot_id];
203 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; 260 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
204 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 if (!dev) 261 if (!dev)
206 return; 262 return;
207 263
@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
210 xhci_ring_free(xhci, dev->ep_rings[i]); 266 xhci_ring_free(xhci, dev->ep_rings[i]);
211 267
212 if (dev->in_ctx) 268 if (dev->in_ctx)
213 dma_pool_free(xhci->device_pool, 269 xhci_free_container_ctx(xhci, dev->in_ctx);
214 dev->in_ctx, dev->in_ctx_dma);
215 if (dev->out_ctx) 270 if (dev->out_ctx)
216 dma_pool_free(xhci->device_pool, 271 xhci_free_container_ctx(xhci, dev->out_ctx);
217 dev->out_ctx, dev->out_ctx_dma); 272
218 kfree(xhci->devs[slot_id]); 273 kfree(xhci->devs[slot_id]);
219 xhci->devs[slot_id] = 0; 274 xhci->devs[slot_id] = 0;
220} 275}
@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
222int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 277int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
223 struct usb_device *udev, gfp_t flags) 278 struct usb_device *udev, gfp_t flags)
224{ 279{
225 dma_addr_t dma;
226 struct xhci_virt_device *dev; 280 struct xhci_virt_device *dev;
227 281
228 /* Slot ID 0 is reserved */ 282 /* Slot ID 0 is reserved */
@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
236 return 0; 290 return 0;
237 dev = xhci->devs[slot_id]; 291 dev = xhci->devs[slot_id];
238 292
239 /* Allocate the (output) device context that will be used in the HC */ 293 /* Allocate the (output) device context that will be used in the HC. */
240 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 294 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
241 if (!dev->out_ctx) 295 if (!dev->out_ctx)
242 goto fail; 296 goto fail;
243 dev->out_ctx_dma = dma; 297
244 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 298 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
245 (unsigned long long)dma); 299 (unsigned long long)dev->out_ctx->dma);
246 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
247 300
248 /* Allocate the (input) device context for address device command */ 301 /* Allocate the (input) device context for address device command */
249 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 302 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
250 if (!dev->in_ctx) 303 if (!dev->in_ctx)
251 goto fail; 304 goto fail;
252 dev->in_ctx_dma = dma; 305
253 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 306 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
254 (unsigned long long)dma); 307 (unsigned long long)dev->in_ctx->dma);
255 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
256 308
257 /* Allocate endpoint 0 ring */ 309 /* Allocate endpoint 0 ring */
258 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); 310 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
261 313
262 init_completion(&dev->cmd_completion); 314 init_completion(&dev->cmd_completion);
263 315
264 /* 316 /* Point to output device context in dcbaa. */
265 * Point to output device context in dcbaa; skip the output control 317 xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
266 * context, which is eight 32 bit fields (or 32 bytes long)
267 */
268 xhci->dcbaa->dev_context_ptrs[2*slot_id] =
269 (u32) dev->out_ctx_dma + (32);
270 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 318 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 slot_id, 319 slot_id,
272 &xhci->dcbaa->dev_context_ptrs[2*slot_id], 320 &xhci->dcbaa->dev_context_ptrs[slot_id],
273 (unsigned long long)dev->out_ctx_dma); 321 (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
274 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275 322
276 return 1; 323 return 1;
277fail: 324fail:
@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
285 struct xhci_virt_device *dev; 332 struct xhci_virt_device *dev;
286 struct xhci_ep_ctx *ep0_ctx; 333 struct xhci_ep_ctx *ep0_ctx;
287 struct usb_device *top_dev; 334 struct usb_device *top_dev;
335 struct xhci_slot_ctx *slot_ctx;
336 struct xhci_input_control_ctx *ctrl_ctx;
288 337
289 dev = xhci->devs[udev->slot_id]; 338 dev = xhci->devs[udev->slot_id];
290 /* Slot ID 0 is reserved */ 339 /* Slot ID 0 is reserved */
@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
293 udev->slot_id); 342 udev->slot_id);
294 return -EINVAL; 343 return -EINVAL;
295 } 344 }
296 ep0_ctx = &dev->in_ctx->ep[0]; 345 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
346 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
347 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
297 348
298 /* 2) New slot context and endpoint 0 context are valid*/ 349 /* 2) New slot context and endpoint 0 context are valid*/
299 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 350 ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
300 351
301 /* 3) Only the control endpoint is valid - one endpoint context */ 352 /* 3) Only the control endpoint is valid - one endpoint context */
302 dev->in_ctx->slot.dev_info |= LAST_CTX(1); 353 slot_ctx->dev_info |= LAST_CTX(1);
303 354
304 switch (udev->speed) { 355 switch (udev->speed) {
305 case USB_SPEED_SUPER: 356 case USB_SPEED_SUPER:
306 dev->in_ctx->slot.dev_info |= (u32) udev->route; 357 slot_ctx->dev_info |= (u32) udev->route;
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; 358 slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
308 break; 359 break;
309 case USB_SPEED_HIGH: 360 case USB_SPEED_HIGH:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; 361 slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
311 break; 362 break;
312 case USB_SPEED_FULL: 363 case USB_SPEED_FULL:
313 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; 364 slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
314 break; 365 break;
315 case USB_SPEED_LOW: 366 case USB_SPEED_LOW:
316 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; 367 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
317 break; 368 break;
318 case USB_SPEED_VARIABLE: 369 case USB_SPEED_VARIABLE:
319 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 370 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
327 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 378 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
328 top_dev = top_dev->parent) 379 top_dev = top_dev->parent)
329 /* Found device below root hub */; 380 /* Found device below root hub */;
330 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 381 slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
331 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 382 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
332 383
333 /* Is this a LS/FS device under a HS hub? */ 384 /* Is this a LS/FS device under a HS hub? */
@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
337 */ 388 */
338 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 389 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
339 udev->tt) { 390 udev->tt) {
340 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; 391 slot_ctx->tt_info = udev->tt->hub->slot_id;
341 dev->in_ctx->slot.tt_info |= udev->ttport << 8; 392 slot_ctx->tt_info |= udev->ttport << 8;
342 } 393 }
343 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 394 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
344 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 395 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
360 ep0_ctx->ep_info2 |= MAX_BURST(0); 411 ep0_ctx->ep_info2 |= MAX_BURST(0);
361 ep0_ctx->ep_info2 |= ERROR_COUNT(3); 412 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362 413
363 ep0_ctx->deq[0] = 414 ep0_ctx->deq =
364 dev->ep_rings[0]->first_seg->dma; 415 dev->ep_rings[0]->first_seg->dma;
365 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; 416 ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
366 ep0_ctx->deq[1] = 0;
367 417
368 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 418 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369 419
@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
470 unsigned int max_burst; 520 unsigned int max_burst;
471 521
472 ep_index = xhci_get_endpoint_index(&ep->desc); 522 ep_index = xhci_get_endpoint_index(&ep->desc);
473 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 523 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
474 524
475 /* Set up the endpoint ring */ 525 /* Set up the endpoint ring */
476 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); 526 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
477 if (!virt_dev->new_ep_rings[ep_index]) 527 if (!virt_dev->new_ep_rings[ep_index])
478 return -ENOMEM; 528 return -ENOMEM;
479 ep_ring = virt_dev->new_ep_rings[ep_index]; 529 ep_ring = virt_dev->new_ep_rings[ep_index];
480 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; 530 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 ep_ctx->deq[1] = 0;
482 531
483 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 532 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484 533
485 /* FIXME dig Mult and streams info out of ep companion desc */ 534 /* FIXME dig Mult and streams info out of ep companion desc */
486 535
487 /* Allow 3 retries for everything but isoc */ 536 /* Allow 3 retries for everything but isoc;
537 * error count = 0 means infinite retries.
538 */
488 if (!usb_endpoint_xfer_isoc(&ep->desc)) 539 if (!usb_endpoint_xfer_isoc(&ep->desc))
489 ep_ctx->ep_info2 = ERROR_COUNT(3); 540 ep_ctx->ep_info2 = ERROR_COUNT(3);
490 else 541 else
491 ep_ctx->ep_info2 = ERROR_COUNT(0); 542 ep_ctx->ep_info2 = ERROR_COUNT(1);
492 543
493 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); 544 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
494 545
@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
498 max_packet = ep->desc.wMaxPacketSize; 549 max_packet = ep->desc.wMaxPacketSize;
499 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 550 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
500 /* dig out max burst from ep companion desc */ 551 /* dig out max burst from ep companion desc */
501 max_packet = ep->ss_ep_comp->desc.bMaxBurst; 552 if (!ep->ss_ep_comp) {
553 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
554 max_packet = 0;
555 } else {
556 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
557 }
502 ep_ctx->ep_info2 |= MAX_BURST(max_packet); 558 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
503 break; 559 break;
504 case USB_SPEED_HIGH: 560 case USB_SPEED_HIGH:
@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
531 struct xhci_ep_ctx *ep_ctx; 587 struct xhci_ep_ctx *ep_ctx;
532 588
533 ep_index = xhci_get_endpoint_index(&ep->desc); 589 ep_index = xhci_get_endpoint_index(&ep->desc);
534 ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 590 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
535 591
536 ep_ctx->ep_info = 0; 592 ep_ctx->ep_info = 0;
537 ep_ctx->ep_info2 = 0; 593 ep_ctx->ep_info2 = 0;
538 ep_ctx->deq[0] = 0; 594 ep_ctx->deq = 0;
539 ep_ctx->deq[1] = 0;
540 ep_ctx->tx_info = 0; 595 ep_ctx->tx_info = 0;
541 /* Don't free the endpoint ring until the set interface or configuration 596 /* Don't free the endpoint ring until the set interface or configuration
542 * request succeeds. 597 * request succeeds.
543 */ 598 */
544} 599}
545 600
601/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
602static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
603{
604 int i;
605 struct device *dev = xhci_to_hcd(xhci)->self.controller;
606 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
607
608 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
609
610 if (!num_sp)
611 return 0;
612
613 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
614 if (!xhci->scratchpad)
615 goto fail_sp;
616
617 xhci->scratchpad->sp_array =
618 pci_alloc_consistent(to_pci_dev(dev),
619 num_sp * sizeof(u64),
620 &xhci->scratchpad->sp_dma);
621 if (!xhci->scratchpad->sp_array)
622 goto fail_sp2;
623
624 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
625 if (!xhci->scratchpad->sp_buffers)
626 goto fail_sp3;
627
628 xhci->scratchpad->sp_dma_buffers =
629 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
630
631 if (!xhci->scratchpad->sp_dma_buffers)
632 goto fail_sp4;
633
634 xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
635 for (i = 0; i < num_sp; i++) {
636 dma_addr_t dma;
637 void *buf = pci_alloc_consistent(to_pci_dev(dev),
638 xhci->page_size, &dma);
639 if (!buf)
640 goto fail_sp5;
641
642 xhci->scratchpad->sp_array[i] = dma;
643 xhci->scratchpad->sp_buffers[i] = buf;
644 xhci->scratchpad->sp_dma_buffers[i] = dma;
645 }
646
647 return 0;
648
649 fail_sp5:
650 for (i = i - 1; i >= 0; i--) {
651 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
652 xhci->scratchpad->sp_buffers[i],
653 xhci->scratchpad->sp_dma_buffers[i]);
654 }
655 kfree(xhci->scratchpad->sp_dma_buffers);
656
657 fail_sp4:
658 kfree(xhci->scratchpad->sp_buffers);
659
660 fail_sp3:
661 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
662 xhci->scratchpad->sp_array,
663 xhci->scratchpad->sp_dma);
664
665 fail_sp2:
666 kfree(xhci->scratchpad);
667 xhci->scratchpad = NULL;
668
669 fail_sp:
670 return -ENOMEM;
671}
672
673static void scratchpad_free(struct xhci_hcd *xhci)
674{
675 int num_sp;
676 int i;
677 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
678
679 if (!xhci->scratchpad)
680 return;
681
682 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
683
684 for (i = 0; i < num_sp; i++) {
685 pci_free_consistent(pdev, xhci->page_size,
686 xhci->scratchpad->sp_buffers[i],
687 xhci->scratchpad->sp_dma_buffers[i]);
688 }
689 kfree(xhci->scratchpad->sp_dma_buffers);
690 kfree(xhci->scratchpad->sp_buffers);
691 pci_free_consistent(pdev, num_sp * sizeof(u64),
692 xhci->scratchpad->sp_array,
693 xhci->scratchpad->sp_dma);
694 kfree(xhci->scratchpad);
695 xhci->scratchpad = NULL;
696}
697
546void xhci_mem_cleanup(struct xhci_hcd *xhci) 698void xhci_mem_cleanup(struct xhci_hcd *xhci)
547{ 699{
548 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 700 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
551 703
552 /* Free the Event Ring Segment Table and the actual Event Ring */ 704 /* Free the Event Ring Segment Table and the actual Event Ring */
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 705 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); 706 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); 707 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
556 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 708 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 if (xhci->erst.entries) 709 if (xhci->erst.entries)
560 pci_free_consistent(pdev, size, 710 pci_free_consistent(pdev, size,
@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
566 xhci->event_ring = NULL; 716 xhci->event_ring = NULL;
567 xhci_dbg(xhci, "Freed event ring\n"); 717 xhci_dbg(xhci, "Freed event ring\n");
568 718
569 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); 719 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
570 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 if (xhci->cmd_ring) 720 if (xhci->cmd_ring)
572 xhci_ring_free(xhci, xhci->cmd_ring); 721 xhci_ring_free(xhci, xhci->cmd_ring);
573 xhci->cmd_ring = NULL; 722 xhci->cmd_ring = NULL;
@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
586 xhci->device_pool = NULL; 735 xhci->device_pool = NULL;
587 xhci_dbg(xhci, "Freed device context pool\n"); 736 xhci_dbg(xhci, "Freed device context pool\n");
588 737
589 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 738 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
590 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 if (xhci->dcbaa) 739 if (xhci->dcbaa)
592 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 740 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 xhci->dcbaa, xhci->dcbaa->dma); 741 xhci->dcbaa, xhci->dcbaa->dma);
@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
595 743
596 xhci->page_size = 0; 744 xhci->page_size = 0;
597 xhci->page_shift = 0; 745 xhci->page_shift = 0;
746 scratchpad_free(xhci);
598} 747}
599 748
600int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 749int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
602 dma_addr_t dma; 751 dma_addr_t dma;
603 struct device *dev = xhci_to_hcd(xhci)->self.controller; 752 struct device *dev = xhci_to_hcd(xhci)->self.controller;
604 unsigned int val, val2; 753 unsigned int val, val2;
754 u64 val_64;
605 struct xhci_segment *seg; 755 struct xhci_segment *seg;
606 u32 page_size; 756 u32 page_size;
607 int i; 757 int i;
@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
647 xhci->dcbaa->dma = dma; 797 xhci->dcbaa->dma = dma;
648 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", 798 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); 799 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 800 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
651 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652 801
653 /* 802 /*
654 * Initialize the ring segment pool. The ring must be a contiguous 803 * Initialize the ring segment pool. The ring must be a contiguous
@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
658 */ 807 */
659 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 808 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
660 SEGMENT_SIZE, 64, xhci->page_size); 809 SEGMENT_SIZE, 64, xhci->page_size);
810
661 /* See Table 46 and Note on Figure 55 */ 811 /* See Table 46 and Note on Figure 55 */
662 /* FIXME support 64-byte contexts */
663 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 812 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
664 sizeof(struct xhci_device_control), 813 2112, 64, xhci->page_size);
665 64, xhci->page_size);
666 if (!xhci->segment_pool || !xhci->device_pool) 814 if (!xhci->segment_pool || !xhci->device_pool)
667 goto fail; 815 goto fail;
668 816
@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
675 (unsigned long long)xhci->cmd_ring->first_seg->dma); 823 (unsigned long long)xhci->cmd_ring->first_seg->dma);
676 824
677 /* Set the address in the Command Ring Control register */ 825 /* Set the address in the Command Ring Control register */
678 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); 826 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
679 val = (val & ~CMD_RING_ADDR_MASK) | 827 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
680 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | 828 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
681 xhci->cmd_ring->cycle_state; 829 xhci->cmd_ring->cycle_state;
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); 830 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); 831 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
684 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 xhci_dbg_cmd_ptrs(xhci); 832 xhci_dbg_cmd_ptrs(xhci);
687 833
688 val = xhci_readl(xhci, &xhci->cap_regs->db_off); 834 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
722 /* set ring base address and size for each segment table entry */ 868 /* set ring base address and size for each segment table entry */
723 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { 869 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 struct xhci_erst_entry *entry = &xhci->erst.entries[val]; 870 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 entry->seg_addr[0] = seg->dma; 871 entry->seg_addr = seg->dma;
726 entry->seg_addr[1] = 0;
727 entry->seg_size = TRBS_PER_SEGMENT; 872 entry->seg_size = TRBS_PER_SEGMENT;
728 entry->rsvd = 0; 873 entry->rsvd = 0;
729 seg = seg->next; 874 seg = seg->next;
@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
741 /* set the segment table base address */ 886 /* set the segment table base address */
742 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", 887 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 (unsigned long long)xhci->erst.erst_dma_addr); 888 (unsigned long long)xhci->erst.erst_dma_addr);
744 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); 889 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
745 val &= ERST_PTR_MASK; 890 val_64 &= ERST_PTR_MASK;
746 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); 891 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
747 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); 892 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
748 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749 893
750 /* Set the event ring dequeue address */ 894 /* Set the event ring dequeue address */
751 xhci_set_hc_event_deq(xhci); 895 xhci_set_hc_event_deq(xhci);
@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
761 for (i = 0; i < MAX_HC_SLOTS; ++i) 905 for (i = 0; i < MAX_HC_SLOTS; ++i)
762 xhci->devs[i] = 0; 906 xhci->devs[i] = 0;
763 907
908 if (scratchpad_alloc(xhci, flags))
909 goto fail;
910
764 return 0; 911 return 0;
912
765fail: 913fail:
766 xhci_warn(xhci, "Couldn't initialize memory\n"); 914 xhci_warn(xhci, "Couldn't initialize memory\n");
767 xhci_mem_cleanup(xhci); 915 xhci_mem_cleanup(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1462709e26c0..592fe7e623f7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
117 .free_dev = xhci_free_dev, 117 .free_dev = xhci_free_dev,
118 .add_endpoint = xhci_add_endpoint, 118 .add_endpoint = xhci_add_endpoint,
119 .drop_endpoint = xhci_drop_endpoint, 119 .drop_endpoint = xhci_drop_endpoint,
120 .endpoint_reset = xhci_endpoint_reset,
120 .check_bandwidth = xhci_check_bandwidth, 121 .check_bandwidth = xhci_check_bandwidth,
121 .reset_bandwidth = xhci_reset_bandwidth, 122 .reset_bandwidth = xhci_reset_bandwidth,
122 .address_device = xhci_address_device, 123 .address_device = xhci_address_device,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 02d81985c454..aa88a067148b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci,
135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) 135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
136{ 136{
137 union xhci_trb *next = ++(ring->dequeue); 137 union xhci_trb *next = ++(ring->dequeue);
138 unsigned long long addr;
138 139
139 ring->deq_updates++; 140 ring->deq_updates++;
140 /* Update the dequeue pointer further if that was a link TRB or we're at 141 /* Update the dequeue pointer further if that was a link TRB or we're at
@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
152 ring->dequeue = ring->deq_seg->trbs; 153 ring->dequeue = ring->deq_seg->trbs;
153 next = ring->dequeue; 154 next = ring->dequeue;
154 } 155 }
156 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
157 if (ring == xhci->event_ring)
158 xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
159 else if (ring == xhci->cmd_ring)
160 xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
161 else
162 xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
155} 163}
156 164
157/* 165/*
@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
171{ 179{
172 u32 chain; 180 u32 chain;
173 union xhci_trb *next; 181 union xhci_trb *next;
182 unsigned long long addr;
174 183
175 chain = ring->enqueue->generic.field[3] & TRB_CHAIN; 184 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
176 next = ++(ring->enqueue); 185 next = ++(ring->enqueue);
@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
204 ring->enqueue = ring->enq_seg->trbs; 213 ring->enqueue = ring->enq_seg->trbs;
205 next = ring->enqueue; 214 next = ring->enqueue;
206 } 215 }
216 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
217 if (ring == xhci->event_ring)
218 xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
219 else if (ring == xhci->cmd_ring)
220 xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
221 else
222 xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
207} 223}
208 224
209/* 225/*
@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
237 253
238void xhci_set_hc_event_deq(struct xhci_hcd *xhci) 254void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{ 255{
240 u32 temp; 256 u64 temp;
241 dma_addr_t deq; 257 dma_addr_t deq;
242 258
243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, 259 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
246 xhci_warn(xhci, "WARN something wrong with SW event ring " 262 xhci_warn(xhci, "WARN something wrong with SW event ring "
247 "dequeue ptr.\n"); 263 "dequeue ptr.\n");
248 /* Update HC event ring dequeue pointer */ 264 /* Update HC event ring dequeue pointer */
249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); 265 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
250 temp &= ERST_PTR_MASK; 266 temp &= ERST_PTR_MASK;
251 if (!in_interrupt()) 267 /* Don't clear the EHB bit (which is RW1C) because
252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); 268 * there might be more events to service.
253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); 269 */
254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, 270 temp &= ~ERST_EHB;
255 &xhci->ir_set->erst_dequeue[0]); 271 xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
272 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
273 &xhci->ir_set->erst_dequeue);
256} 274}
257 275
258/* Ring the host controller doorbell after placing a command on the ring */ 276/* Ring the host controller doorbell after placing a command on the ring */
@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
279 /* Don't ring the doorbell for this endpoint if there are pending 297 /* Don't ring the doorbell for this endpoint if there are pending
280 * cancellations because the we don't want to interrupt processing. 298 * cancellations because the we don't want to interrupt processing.
281 */ 299 */
282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { 300 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
301 && !(ep_ring->state & EP_HALTED)) {
283 field = xhci_readl(xhci, db_addr) & DB_MASK; 302 field = xhci_readl(xhci, db_addr) & DB_MASK;
284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 303 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 304 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg(
316 return cur_seg; 335 return cur_seg;
317} 336}
318 337
319struct dequeue_state {
320 struct xhci_segment *new_deq_seg;
321 union xhci_trb *new_deq_ptr;
322 int new_cycle_state;
323};
324
325/* 338/*
326 * Move the xHC's endpoint ring dequeue pointer past cur_td. 339 * Move the xHC's endpoint ring dequeue pointer past cur_td.
327 * Record the new state of the xHC's endpoint ring dequeue segment, 340 * Record the new state of the xHC's endpoint ring dequeue segment,
@@ -336,24 +349,30 @@ struct dequeue_state {
336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit 349 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
337 * if we've moved it past a link TRB with the toggle cycle bit set. 350 * if we've moved it past a link TRB with the toggle cycle bit set.
338 */ 351 */
339static void find_new_dequeue_state(struct xhci_hcd *xhci, 352void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
340 unsigned int slot_id, unsigned int ep_index, 353 unsigned int slot_id, unsigned int ep_index,
341 struct xhci_td *cur_td, struct dequeue_state *state) 354 struct xhci_td *cur_td, struct xhci_dequeue_state *state)
342{ 355{
343 struct xhci_virt_device *dev = xhci->devs[slot_id]; 356 struct xhci_virt_device *dev = xhci->devs[slot_id];
344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 357 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
345 struct xhci_generic_trb *trb; 358 struct xhci_generic_trb *trb;
359 struct xhci_ep_ctx *ep_ctx;
360 dma_addr_t addr;
346 361
347 state->new_cycle_state = 0; 362 state->new_cycle_state = 0;
363 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
348 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 364 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
349 ep_ring->stopped_trb, 365 ep_ring->stopped_trb,
350 &state->new_cycle_state); 366 &state->new_cycle_state);
351 if (!state->new_deq_seg) 367 if (!state->new_deq_seg)
352 BUG(); 368 BUG();
353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 369 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; 370 xhci_dbg(xhci, "Finding endpoint context\n");
371 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
372 state->new_cycle_state = 0x1 & ep_ctx->deq;
355 373
356 state->new_deq_ptr = cur_td->last_trb; 374 state->new_deq_ptr = cur_td->last_trb;
375 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
357 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 376 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
358 state->new_deq_ptr, 377 state->new_deq_ptr,
359 &state->new_cycle_state); 378 &state->new_cycle_state);
@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 386 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
368 387
369 /* Don't update the ring cycle state for the producer (us). */ 388 /* Don't update the ring cycle state for the producer (us). */
389 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
390 state->new_deq_seg);
391 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
392 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
393 (unsigned long long) addr);
394 xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
370 ep_ring->dequeue = state->new_deq_ptr; 395 ep_ring->dequeue = state->new_deq_ptr;
371 ep_ring->deq_seg = state->new_deq_seg; 396 ep_ring->deq_seg = state->new_deq_seg;
372} 397}
@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
416 unsigned int ep_index, struct xhci_segment *deq_seg, 441 unsigned int ep_index, struct xhci_segment *deq_seg,
417 union xhci_trb *deq_ptr, u32 cycle_state); 442 union xhci_trb *deq_ptr, u32 cycle_state);
418 443
444void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
445 struct xhci_ring *ep_ring, unsigned int slot_id,
446 unsigned int ep_index, struct xhci_dequeue_state *deq_state)
447{
448 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
449 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
450 deq_state->new_deq_seg,
451 (unsigned long long)deq_state->new_deq_seg->dma,
452 deq_state->new_deq_ptr,
453 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
454 deq_state->new_cycle_state);
455 queue_set_tr_deq(xhci, slot_id, ep_index,
456 deq_state->new_deq_seg,
457 deq_state->new_deq_ptr,
458 (u32) deq_state->new_cycle_state);
459 /* Stop the TD queueing code from ringing the doorbell until
460 * this command completes. The HC won't set the dequeue pointer
461 * if the ring is running, and ringing the doorbell starts the
462 * ring running.
463 */
464 ep_ring->state |= SET_DEQ_PENDING;
465 xhci_ring_cmd_db(xhci);
466}
467
419/* 468/*
420 * When we get a command completion for a Stop Endpoint Command, we need to 469 * When we get a command completion for a Stop Endpoint Command, we need to
421 * unlink any cancelled TDs from the ring. There are two ways to do that: 470 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
436 struct xhci_td *cur_td = 0; 485 struct xhci_td *cur_td = 0;
437 struct xhci_td *last_unlinked_td; 486 struct xhci_td *last_unlinked_td;
438 487
439 struct dequeue_state deq_state; 488 struct xhci_dequeue_state deq_state;
440#ifdef CONFIG_USB_HCD_STAT 489#ifdef CONFIG_USB_HCD_STAT
441 ktime_t stop_time = ktime_get(); 490 ktime_t stop_time = ktime_get();
442#endif 491#endif
@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
464 * move the xHC endpoint ring dequeue pointer past this TD. 513 * move the xHC endpoint ring dequeue pointer past this TD.
465 */ 514 */
466 if (cur_td == ep_ring->stopped_td) 515 if (cur_td == ep_ring->stopped_td)
467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 516 xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
468 &deq_state); 517 &deq_state);
469 else 518 else
470 td_to_noop(xhci, ep_ring, cur_td); 519 td_to_noop(xhci, ep_ring, cur_td);
@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
480 529
481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 530 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 531 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " 532 xhci_queue_new_dequeue_state(xhci, ep_ring,
484 "new deq ptr = %p (0x%llx dma), new cycle = %u\n", 533 slot_id, ep_index, &deq_state);
485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr,
488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg,
492 deq_state.new_deq_ptr,
493 (u32) deq_state.new_cycle_state);
494 /* Stop the TD queueing code from ringing the doorbell until
495 * this command completes. The HC won't set the dequeue pointer
496 * if the ring is running, and ringing the doorbell starts the
497 * ring running.
498 */
499 ep_ring->state |= SET_DEQ_PENDING;
500 xhci_ring_cmd_db(xhci);
501 } else { 534 } else {
502 /* Otherwise just ring the doorbell to restart the ring */ 535 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index); 536 ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
551 unsigned int ep_index; 584 unsigned int ep_index;
552 struct xhci_ring *ep_ring; 585 struct xhci_ring *ep_ring;
553 struct xhci_virt_device *dev; 586 struct xhci_virt_device *dev;
587 struct xhci_ep_ctx *ep_ctx;
588 struct xhci_slot_ctx *slot_ctx;
554 589
555 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 590 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
556 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 591 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
557 dev = xhci->devs[slot_id]; 592 dev = xhci->devs[slot_id];
558 ep_ring = dev->ep_rings[ep_index]; 593 ep_ring = dev->ep_rings[ep_index];
594 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
595 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
559 596
560 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 597 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
561 unsigned int ep_state; 598 unsigned int ep_state;
@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
569 case COMP_CTX_STATE: 606 case COMP_CTX_STATE:
570 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 607 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
571 "to incorrect slot or ep state.\n"); 608 "to incorrect slot or ep state.\n");
572 ep_state = dev->out_ctx->ep[ep_index].ep_info; 609 ep_state = ep_ctx->ep_info;
573 ep_state &= EP_STATE_MASK; 610 ep_state &= EP_STATE_MASK;
574 slot_state = dev->out_ctx->slot.dev_state; 611 slot_state = slot_ctx->dev_state;
575 slot_state = GET_SLOT_STATE(slot_state); 612 slot_state = GET_SLOT_STATE(slot_state);
576 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 613 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
577 slot_state, ep_state); 614 slot_state, ep_state);
@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
593 * cancelling URBs, which might not be an error... 630 * cancelling URBs, which might not be an error...
594 */ 631 */
595 } else { 632 } else {
596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " 633 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
597 "deq[1] = 0x%x.\n", 634 ep_ctx->deq);
598 dev->out_ctx->ep[ep_index].deq[0],
599 dev->out_ctx->ep[ep_index].deq[1]);
600 } 635 }
601 636
602 ep_ring->state &= ~SET_DEQ_PENDING; 637 ep_ring->state &= ~SET_DEQ_PENDING;
603 ring_ep_doorbell(xhci, slot_id, ep_index); 638 ring_ep_doorbell(xhci, slot_id, ep_index);
604} 639}
605 640
641static void handle_reset_ep_completion(struct xhci_hcd *xhci,
642 struct xhci_event_cmd *event,
643 union xhci_trb *trb)
644{
645 int slot_id;
646 unsigned int ep_index;
647
648 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
649 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
650 /* This command will only fail if the endpoint wasn't halted,
651 * but we don't care.
652 */
653 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
654 (unsigned int) GET_COMP_CODE(event->status));
655
656 /* Clear our internal halted state and restart the ring */
657 xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
658 ring_ep_doorbell(xhci, slot_id, ep_index);
659}
606 660
607static void handle_cmd_completion(struct xhci_hcd *xhci, 661static void handle_cmd_completion(struct xhci_hcd *xhci,
608 struct xhci_event_cmd *event) 662 struct xhci_event_cmd *event)
@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
611 u64 cmd_dma; 665 u64 cmd_dma;
612 dma_addr_t cmd_dequeue_dma; 666 dma_addr_t cmd_dequeue_dma;
613 667
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 668 cmd_dma = event->cmd_trb;
615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 669 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue); 670 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */ 671 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
653 case TRB_TYPE(TRB_CMD_NOOP): 707 case TRB_TYPE(TRB_CMD_NOOP):
654 ++xhci->noops_handled; 708 ++xhci->noops_handled;
655 break; 709 break;
710 case TRB_TYPE(TRB_RESET_EP):
711 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
712 break;
656 default: 713 default:
657 /* Skip over unknown commands on the event ring */ 714 /* Skip over unknown commands on the event ring */
658 xhci->error_bitmask |= 1 << 6; 715 xhci->error_bitmask |= 1 << 6;
@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
756 union xhci_trb *event_trb; 813 union xhci_trb *event_trb;
757 struct urb *urb = 0; 814 struct urb *urb = 0;
758 int status = -EINPROGRESS; 815 int status = -EINPROGRESS;
816 struct xhci_ep_ctx *ep_ctx;
759 817
818 xhci_dbg(xhci, "In %s\n", __func__);
760 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 819 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
761 if (!xdev) { 820 if (!xdev) {
762 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 821 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
765 824
766 /* Endpoint ID is 1 based, our index is zero based */ 825 /* Endpoint ID is 1 based, our index is zero based */
767 ep_index = TRB_TO_EP_ID(event->flags) - 1; 826 ep_index = TRB_TO_EP_ID(event->flags) - 1;
827 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
768 ep_ring = xdev->ep_rings[ep_index]; 828 ep_ring = xdev->ep_rings[ep_index];
769 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 829 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
830 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
770 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 831 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
771 return -ENODEV; 832 return -ENODEV;
772 } 833 }
773 834
774 event_dma = event->buffer[0]; 835 event_dma = event->buffer;
775 if (event->buffer[1] != 0)
776 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
777
778 /* This TRB should be in the TD at the head of this ring's TD list */ 836 /* This TRB should be in the TD at the head of this ring's TD list */
837 xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
779 if (list_empty(&ep_ring->td_list)) { 838 if (list_empty(&ep_ring->td_list)) {
780 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 839 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
781 TRB_TO_SLOT_ID(event->flags), ep_index); 840 TRB_TO_SLOT_ID(event->flags), ep_index);
@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
785 urb = NULL; 844 urb = NULL;
786 goto cleanup; 845 goto cleanup;
787 } 846 }
847 xhci_dbg(xhci, "%s - getting list entry\n", __func__);
788 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 848 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
789 849
790 /* Is this a TRB in the currently executing TD? */ 850 /* Is this a TRB in the currently executing TD? */
851 xhci_dbg(xhci, "%s - looking for TD\n", __func__);
791 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 852 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
792 td->last_trb, event_dma); 853 td->last_trb, event_dma);
854 xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
793 if (!event_seg) { 855 if (!event_seg) {
794 /* HC is busted, give up! */ 856 /* HC is busted, give up! */
795 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); 857 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
798 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 860 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
799 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 861 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
800 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 862 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
801 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", 863 xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
802 (unsigned int) event->buffer[0]); 864 lower_32_bits(event->buffer));
803 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", 865 xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
804 (unsigned int) event->buffer[1]); 866 upper_32_bits(event->buffer));
805 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", 867 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
806 (unsigned int) event->transfer_len); 868 (unsigned int) event->transfer_len);
807 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", 869 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
823 break; 885 break;
824 case COMP_STALL: 886 case COMP_STALL:
825 xhci_warn(xhci, "WARN: Stalled endpoint\n"); 887 xhci_warn(xhci, "WARN: Stalled endpoint\n");
888 ep_ring->state |= EP_HALTED;
826 status = -EPIPE; 889 status = -EPIPE;
827 break; 890 break;
828 case COMP_TRB_ERR: 891 case COMP_TRB_ERR:
@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
833 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 896 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
834 status = -EPROTO; 897 status = -EPROTO;
835 break; 898 break;
899 case COMP_BABBLE:
900 xhci_warn(xhci, "WARN: babble error on endpoint\n");
901 status = -EOVERFLOW;
902 break;
836 case COMP_DB_ERR: 903 case COMP_DB_ERR:
837 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); 904 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
838 status = -ENOSR; 905 status = -ENOSR;
@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
874 if (event_trb != ep_ring->dequeue) { 941 if (event_trb != ep_ring->dequeue) {
875 /* The event was for the status stage */ 942 /* The event was for the status stage */
876 if (event_trb == td->last_trb) { 943 if (event_trb == td->last_trb) {
877 td->urb->actual_length = 944 if (td->urb->actual_length != 0) {
878 td->urb->transfer_buffer_length; 945 /* Don't overwrite a previously set error code */
946 if (status == -EINPROGRESS || status == 0)
947 /* Did we already see a short data stage? */
948 status = -EREMOTEIO;
949 } else {
950 td->urb->actual_length =
951 td->urb->transfer_buffer_length;
952 }
879 } else { 953 } else {
880 /* Maybe the event was for the data stage? */ 954 /* Maybe the event was for the data stage? */
881 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) 955 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
882 /* We didn't stop on a link TRB in the middle */ 956 /* We didn't stop on a link TRB in the middle */
883 td->urb->actual_length = 957 td->urb->actual_length =
884 td->urb->transfer_buffer_length - 958 td->urb->transfer_buffer_length -
885 TRB_LEN(event->transfer_len); 959 TRB_LEN(event->transfer_len);
960 xhci_dbg(xhci, "Waiting for status stage event\n");
961 urb = NULL;
962 goto cleanup;
963 }
886 } 964 }
887 } 965 }
888 } else { 966 } else {
@@ -929,16 +1007,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
929 TRB_LEN(event->transfer_len)); 1007 TRB_LEN(event->transfer_len));
930 td->urb->actual_length = 0; 1008 td->urb->actual_length = 0;
931 } 1009 }
932 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1010 /* Don't overwrite a previously set error code */
933 status = -EREMOTEIO; 1011 if (status == -EINPROGRESS) {
934 else 1012 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
935 status = 0; 1013 status = -EREMOTEIO;
1014 else
1015 status = 0;
1016 }
936 } else { 1017 } else {
937 td->urb->actual_length = td->urb->transfer_buffer_length; 1018 td->urb->actual_length = td->urb->transfer_buffer_length;
938 /* Ignore a short packet completion if the 1019 /* Ignore a short packet completion if the
939 * untransferred length was zero. 1020 * untransferred length was zero.
940 */ 1021 */
941 status = 0; 1022 if (status == -EREMOTEIO)
1023 status = 0;
942 } 1024 }
943 } else { 1025 } else {
944 /* Slow path - walk the list, starting from the dequeue 1026 /* Slow path - walk the list, starting from the dequeue
@@ -965,19 +1047,30 @@ static int handle_tx_event(struct xhci_hcd *xhci,
965 TRB_LEN(event->transfer_len); 1047 TRB_LEN(event->transfer_len);
966 } 1048 }
967 } 1049 }
968 /* The Endpoint Stop Command completion will take care of
969 * any stopped TDs. A stopped TD may be restarted, so don't update the
970 * ring dequeue pointer or take this TD off any lists yet.
971 */
972 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || 1050 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
973 GET_COMP_CODE(event->transfer_len) == COMP_STOP) { 1051 GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
1052 /* The Endpoint Stop Command completion will take care of any
1053 * stopped TDs. A stopped TD may be restarted, so don't update
1054 * the ring dequeue pointer or take this TD off any lists yet.
1055 */
974 ep_ring->stopped_td = td; 1056 ep_ring->stopped_td = td;
975 ep_ring->stopped_trb = event_trb; 1057 ep_ring->stopped_trb = event_trb;
976 } else { 1058 } else {
977 /* Update ring dequeue pointer */ 1059 if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
978 while (ep_ring->dequeue != td->last_trb) 1060 /* The transfer is completed from the driver's
1061 * perspective, but we need to issue a set dequeue
1062 * command for this stalled endpoint to move the dequeue
1063 * pointer past the TD. We can't do that here because
1064 * the halt condition must be cleared first.
1065 */
1066 ep_ring->stopped_td = td;
1067 ep_ring->stopped_trb = event_trb;
1068 } else {
1069 /* Update ring dequeue pointer */
1070 while (ep_ring->dequeue != td->last_trb)
1071 inc_deq(xhci, ep_ring, false);
979 inc_deq(xhci, ep_ring, false); 1072 inc_deq(xhci, ep_ring, false);
980 inc_deq(xhci, ep_ring, false); 1073 }
981 1074
982 /* Clean up the endpoint's TD list */ 1075 /* Clean up the endpoint's TD list */
983 urb = td->urb; 1076 urb = td->urb;
@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
987 list_del(&td->cancelled_td_list); 1080 list_del(&td->cancelled_td_list);
988 ep_ring->cancels_pending--; 1081 ep_ring->cancels_pending--;
989 } 1082 }
990 kfree(td); 1083 /* Leave the TD around for the reset endpoint function to use */
1084 if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
1085 kfree(td);
1086 }
991 urb->hcpriv = NULL; 1087 urb->hcpriv = NULL;
992 } 1088 }
993cleanup: 1089cleanup:
@@ -997,6 +1093,8 @@ cleanup:
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ 1093 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) { 1094 if (urb) {
999 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 1095 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1096 xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
1097 urb, td->urb->actual_length, status);
1000 spin_unlock(&xhci->lock); 1098 spin_unlock(&xhci->lock);
1001 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 1099 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1002 spin_lock(&xhci->lock); 1100 spin_lock(&xhci->lock);
@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1014 int update_ptrs = 1; 1112 int update_ptrs = 1;
1015 int ret; 1113 int ret;
1016 1114
1115 xhci_dbg(xhci, "In %s\n", __func__);
1017 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 1116 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1018 xhci->error_bitmask |= 1 << 1; 1117 xhci->error_bitmask |= 1 << 1;
1019 return; 1118 return;
@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1026 xhci->error_bitmask |= 1 << 2; 1125 xhci->error_bitmask |= 1 << 2;
1027 return; 1126 return;
1028 } 1127 }
1128 xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
1029 1129
1030 /* FIXME: Handle more event types. */ 1130 /* FIXME: Handle more event types. */
1031 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { 1131 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
1032 case TRB_TYPE(TRB_COMPLETION): 1132 case TRB_TYPE(TRB_COMPLETION):
1133 xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
1033 handle_cmd_completion(xhci, &event->event_cmd); 1134 handle_cmd_completion(xhci, &event->event_cmd);
1135 xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
1034 break; 1136 break;
1035 case TRB_TYPE(TRB_PORT_STATUS): 1137 case TRB_TYPE(TRB_PORT_STATUS):
1138 xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
1036 handle_port_status(xhci, event); 1139 handle_port_status(xhci, event);
1140 xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
1037 update_ptrs = 0; 1141 update_ptrs = 0;
1038 break; 1142 break;
1039 case TRB_TYPE(TRB_TRANSFER): 1143 case TRB_TYPE(TRB_TRANSFER):
1144 xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
1040 ret = handle_tx_event(xhci, &event->trans_event); 1145 ret = handle_tx_event(xhci, &event->trans_event);
1146 xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
1041 if (ret < 0) 1147 if (ret < 0)
1042 xhci->error_bitmask |= 1 << 9; 1148 xhci->error_bitmask |= 1 << 9;
1043 else 1149 else
@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1093 */ 1199 */
1094 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 1200 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
1095 return -ENOENT; 1201 return -ENOENT;
1096 case EP_STATE_HALTED:
1097 case EP_STATE_ERROR: 1202 case EP_STATE_ERROR:
1098 xhci_warn(xhci, "WARN waiting for halt or error on ep " 1203 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
1099 "to be cleared\n");
1100 /* FIXME event handling code for error needs to clear it */ 1204 /* FIXME event handling code for error needs to clear it */
1101 /* XXX not sure if this should be -ENOENT or not */ 1205 /* XXX not sure if this should be -ENOENT or not */
1102 return -EINVAL; 1206 return -EINVAL;
1207 case EP_STATE_HALTED:
1208 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
1103 case EP_STATE_STOPPED: 1209 case EP_STATE_STOPPED:
1104 case EP_STATE_RUNNING: 1210 case EP_STATE_RUNNING:
1105 break; 1211 break;
@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
1128 gfp_t mem_flags) 1234 gfp_t mem_flags)
1129{ 1235{
1130 int ret; 1236 int ret;
1131 1237 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1132 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1238 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
1133 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 1239 ep_ctx->ep_info & EP_STATE_MASK,
1134 num_trbs, mem_flags); 1240 num_trbs, mem_flags);
1135 if (ret) 1241 if (ret)
1136 return ret; 1242 return ret;
@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1285 /* Queue the first TRB, even if it's zero-length */ 1391 /* Queue the first TRB, even if it's zero-length */
1286 do { 1392 do {
1287 u32 field = 0; 1393 u32 field = 0;
1394 u32 length_field = 0;
1288 1395
1289 /* Don't change the cycle bit of the first TRB until later */ 1396 /* Don't change the cycle bit of the first TRB until later */
1290 if (first_trb) 1397 if (first_trb)
@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1314 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1421 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1315 (unsigned int) addr + trb_buff_len); 1422 (unsigned int) addr + trb_buff_len);
1316 } 1423 }
1424 length_field = TRB_LEN(trb_buff_len) |
1425 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1426 TRB_INTR_TARGET(0);
1317 queue_trb(xhci, ep_ring, false, 1427 queue_trb(xhci, ep_ring, false,
1318 (u32) addr, 1428 lower_32_bits(addr),
1319 (u32) ((u64) addr >> 32), 1429 upper_32_bits(addr),
1320 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1430 length_field,
1321 /* We always want to know if the TRB was short, 1431 /* We always want to know if the TRB was short,
1322 * or we won't get an event when it completes. 1432 * or we won't get an event when it completes.
1323 * (Unless we use event data TRBs, which are a 1433 * (Unless we use event data TRBs, which are a
@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1365 struct xhci_generic_trb *start_trb; 1475 struct xhci_generic_trb *start_trb;
1366 bool first_trb; 1476 bool first_trb;
1367 int start_cycle; 1477 int start_cycle;
1368 u32 field; 1478 u32 field, length_field;
1369 1479
1370 int running_total, trb_buff_len, ret; 1480 int running_total, trb_buff_len, ret;
1371 u64 addr; 1481 u64 addr;
@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1443 td->last_trb = ep_ring->enqueue; 1553 td->last_trb = ep_ring->enqueue;
1444 field |= TRB_IOC; 1554 field |= TRB_IOC;
1445 } 1555 }
1556 length_field = TRB_LEN(trb_buff_len) |
1557 TD_REMAINDER(urb->transfer_buffer_length - running_total) |
1558 TRB_INTR_TARGET(0);
1446 queue_trb(xhci, ep_ring, false, 1559 queue_trb(xhci, ep_ring, false,
1447 (u32) addr, 1560 lower_32_bits(addr),
1448 (u32) ((u64) addr >> 32), 1561 upper_32_bits(addr),
1449 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), 1562 length_field,
1450 /* We always want to know if the TRB was short, 1563 /* We always want to know if the TRB was short,
1451 * or we won't get an event when it completes. 1564 * or we won't get an event when it completes.
1452 * (Unless we use event data TRBs, which are a 1565 * (Unless we use event data TRBs, which are a
@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1478 struct usb_ctrlrequest *setup; 1591 struct usb_ctrlrequest *setup;
1479 struct xhci_generic_trb *start_trb; 1592 struct xhci_generic_trb *start_trb;
1480 int start_cycle; 1593 int start_cycle;
1481 u32 field; 1594 u32 field, length_field;
1482 struct xhci_td *td; 1595 struct xhci_td *td;
1483 1596
1484 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 1597 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1528 1641
1529 /* If there's data, queue data TRBs */ 1642 /* If there's data, queue data TRBs */
1530 field = 0; 1643 field = 0;
1644 length_field = TRB_LEN(urb->transfer_buffer_length) |
1645 TD_REMAINDER(urb->transfer_buffer_length) |
1646 TRB_INTR_TARGET(0);
1531 if (urb->transfer_buffer_length > 0) { 1647 if (urb->transfer_buffer_length > 0) {
1532 if (setup->bRequestType & USB_DIR_IN) 1648 if (setup->bRequestType & USB_DIR_IN)
1533 field |= TRB_DIR_IN; 1649 field |= TRB_DIR_IN;
1534 queue_trb(xhci, ep_ring, false, 1650 queue_trb(xhci, ep_ring, false,
1535 lower_32_bits(urb->transfer_dma), 1651 lower_32_bits(urb->transfer_dma),
1536 upper_32_bits(urb->transfer_dma), 1652 upper_32_bits(urb->transfer_dma),
1537 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), 1653 length_field,
1538 /* Event on short tx */ 1654 /* Event on short tx */
1539 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 1655 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
1540 } 1656 }
@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1719int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id) 1720 u32 slot_id)
1605{ 1721{
1606 return queue_command(xhci, in_ctx_ptr, 0, 0, 1722 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1723 upper_32_bits(in_ctx_ptr), 0,
1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 1724 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1608} 1725}
1609 1726
@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1728int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id) 1729 u32 slot_id)
1613{ 1730{
1614 return queue_command(xhci, in_ctx_ptr, 0, 0, 1731 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
1732 upper_32_bits(in_ctx_ptr), 0,
1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); 1733 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1616} 1734}
1617 1735
@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1639 u32 type = TRB_TYPE(TRB_SET_DEQ); 1757 u32 type = TRB_TYPE(TRB_SET_DEQ);
1640 1758
1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); 1759 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1642 if (addr == 0) 1760 if (addr == 0) {
1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); 1761 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", 1762 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
1645 deq_seg, deq_ptr); 1763 deq_seg, deq_ptr);
1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0, 1764 return 0;
1765 }
1766 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
1767 upper_32_bits(addr), 0,
1647 trb_slot_id | trb_ep_index | type); 1768 trb_slot_id | trb_ep_index | type);
1648} 1769}
1770
1771int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1772 unsigned int ep_index)
1773{
1774 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1775 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1776 u32 type = TRB_TYPE(TRB_RESET_EP);
1777
1778 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
1779}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8936eeb5588b..d31d32206ba3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/timer.h> 27#include <linux/timer.h>
28#include <linux/kernel.h>
28 29
29#include "../core/hcd.h" 30#include "../core/hcd.h"
30/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
@@ -42,14 +43,6 @@
42 * xHCI register interface. 43 * xHCI register interface.
43 * This corresponds to the eXtensible Host Controller Interface (xHCI) 44 * This corresponds to the eXtensible Host Controller Interface (xHCI)
44 * Revision 0.95 specification 45 * Revision 0.95 specification
45 *
46 * Registers should always be accessed with double word or quad word accesses.
47 *
48 * Some xHCI implementations may support 64-bit address pointers. Registers
49 * with 64-bit address pointers should be written to with dword accesses by
50 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
51 * xHCI implementations that do not support 64-bit address pointers will ignore
52 * the high dword, and write order is irrelevant.
53 */ 46 */
54 47
55/** 48/**
@@ -96,6 +89,7 @@ struct xhci_cap_regs {
96#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) 89#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
97/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ 90/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
98/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ 91/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
92#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
99 93
100/* HCSPARAMS3 - hcs_params3 - bitmasks */ 94/* HCSPARAMS3 - hcs_params3 - bitmasks */
101/* bits 0:7, Max U1 to U0 latency for the roothub ports */ 95/* bits 0:7, Max U1 to U0 latency for the roothub ports */
@@ -166,10 +160,10 @@ struct xhci_op_regs {
166 u32 reserved1; 160 u32 reserved1;
167 u32 reserved2; 161 u32 reserved2;
168 u32 dev_notification; 162 u32 dev_notification;
169 u32 cmd_ring[2]; 163 u64 cmd_ring;
170 /* rsvd: offset 0x20-2F */ 164 /* rsvd: offset 0x20-2F */
171 u32 reserved3[4]; 165 u32 reserved3[4];
172 u32 dcbaa_ptr[2]; 166 u64 dcbaa_ptr;
173 u32 config_reg; 167 u32 config_reg;
174 /* rsvd: offset 0x3C-3FF */ 168 /* rsvd: offset 0x3C-3FF */
175 u32 reserved4[241]; 169 u32 reserved4[241];
@@ -254,7 +248,7 @@ struct xhci_op_regs {
254#define CMD_RING_RUNNING (1 << 3) 248#define CMD_RING_RUNNING (1 << 3)
255/* bits 4:5 reserved and should be preserved */ 249/* bits 4:5 reserved and should be preserved */
256/* Command Ring pointer - bit mask for the lower 32 bits. */ 250/* Command Ring pointer - bit mask for the lower 32 bits. */
257#define CMD_RING_ADDR_MASK (0xffffffc0) 251#define CMD_RING_RSVD_BITS (0x3f)
258 252
259/* CONFIG - Configure Register - config_reg bitmasks */ 253/* CONFIG - Configure Register - config_reg bitmasks */
260/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ 254/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -382,8 +376,8 @@ struct xhci_intr_reg {
382 u32 irq_control; 376 u32 irq_control;
383 u32 erst_size; 377 u32 erst_size;
384 u32 rsvd; 378 u32 rsvd;
385 u32 erst_base[2]; 379 u64 erst_base;
386 u32 erst_dequeue[2]; 380 u64 erst_dequeue;
387}; 381};
388 382
389/* irq_pending bitmasks */ 383/* irq_pending bitmasks */
@@ -453,6 +447,27 @@ struct xhci_doorbell_array {
453 447
454 448
455/** 449/**
450 * struct xhci_container_ctx
451 * @type: Type of context. Used to calculated offsets to contained contexts.
452 * @size: Size of the context data
453 * @bytes: The raw context data given to HW
454 * @dma: dma address of the bytes
455 *
456 * Represents either a Device or Input context. Holds a pointer to the raw
457 * memory used for the context (bytes) and dma address of it (dma).
458 */
459struct xhci_container_ctx {
460 unsigned type;
461#define XHCI_CTX_TYPE_DEVICE 0x1
462#define XHCI_CTX_TYPE_INPUT 0x2
463
464 int size;
465
466 u8 *bytes;
467 dma_addr_t dma;
468};
469
470/**
456 * struct xhci_slot_ctx 471 * struct xhci_slot_ctx
457 * @dev_info: Route string, device speed, hub info, and last valid endpoint 472 * @dev_info: Route string, device speed, hub info, and last valid endpoint
458 * @dev_info2: Max exit latency for device number, root hub port number 473 * @dev_info2: Max exit latency for device number, root hub port number
@@ -538,7 +553,7 @@ struct xhci_slot_ctx {
538struct xhci_ep_ctx { 553struct xhci_ep_ctx {
539 u32 ep_info; 554 u32 ep_info;
540 u32 ep_info2; 555 u32 ep_info2;
541 u32 deq[2]; 556 u64 deq;
542 u32 tx_info; 557 u32 tx_info;
543 /* offset 0x14 - 0x1f reserved for HC internal use */ 558 /* offset 0x14 - 0x1f reserved for HC internal use */
544 u32 reserved[3]; 559 u32 reserved[3];
@@ -589,18 +604,16 @@ struct xhci_ep_ctx {
589 604
590 605
591/** 606/**
592 * struct xhci_device_control 607 * struct xhci_input_control_context
593 * Input/Output context; see section 6.2.5. 608 * Input control context; see section 6.2.5.
594 * 609 *
595 * @drop_context: set the bit of the endpoint context you want to disable 610 * @drop_context: set the bit of the endpoint context you want to disable
596 * @add_context: set the bit of the endpoint context you want to enable 611 * @add_context: set the bit of the endpoint context you want to enable
597 */ 612 */
598struct xhci_device_control { 613struct xhci_input_control_ctx {
599 u32 drop_flags; 614 u32 drop_flags;
600 u32 add_flags; 615 u32 add_flags;
601 u32 rsvd[6]; 616 u32 rsvd2[6];
602 struct xhci_slot_ctx slot;
603 struct xhci_ep_ctx ep[31];
604}; 617};
605 618
606/* drop context bitmasks */ 619/* drop context bitmasks */
@@ -608,7 +621,6 @@ struct xhci_device_control {
608/* add context bitmasks */ 621/* add context bitmasks */
609#define ADD_EP(x) (0x1 << x) 622#define ADD_EP(x) (0x1 << x)
610 623
611
612struct xhci_virt_device { 624struct xhci_virt_device {
613 /* 625 /*
614 * Commands to the hardware are passed an "input context" that 626 * Commands to the hardware are passed an "input context" that
@@ -618,11 +630,10 @@ struct xhci_virt_device {
618 * track of input and output contexts separately because 630 * track of input and output contexts separately because
619 * these commands might fail and we don't trust the hardware. 631 * these commands might fail and we don't trust the hardware.
620 */ 632 */
621 struct xhci_device_control *out_ctx; 633 struct xhci_container_ctx *out_ctx;
622 dma_addr_t out_ctx_dma;
623 /* Used for addressing devices and configuration changes */ 634 /* Used for addressing devices and configuration changes */
624 struct xhci_device_control *in_ctx; 635 struct xhci_container_ctx *in_ctx;
625 dma_addr_t in_ctx_dma; 636
626 /* FIXME when stream support is added */ 637 /* FIXME when stream support is added */
627 struct xhci_ring *ep_rings[31]; 638 struct xhci_ring *ep_rings[31];
628 /* Temporary storage in case the configure endpoint command fails and we 639 /* Temporary storage in case the configure endpoint command fails and we
@@ -641,7 +652,7 @@ struct xhci_virt_device {
641 */ 652 */
642struct xhci_device_context_array { 653struct xhci_device_context_array {
643 /* 64-bit device addresses; we only write 32-bit addresses */ 654 /* 64-bit device addresses; we only write 32-bit addresses */
644 u32 dev_context_ptrs[2*MAX_HC_SLOTS]; 655 u64 dev_context_ptrs[MAX_HC_SLOTS];
645 /* private xHCD pointers */ 656 /* private xHCD pointers */
646 dma_addr_t dma; 657 dma_addr_t dma;
647}; 658};
@@ -654,7 +665,7 @@ struct xhci_device_context_array {
654 665
655struct xhci_stream_ctx { 666struct xhci_stream_ctx {
656 /* 64-bit stream ring address, cycle state, and stream type */ 667 /* 64-bit stream ring address, cycle state, and stream type */
657 u32 stream_ring[2]; 668 u64 stream_ring;
658 /* offset 0x14 - 0x1f reserved for HC internal use */ 669 /* offset 0x14 - 0x1f reserved for HC internal use */
659 u32 reserved[2]; 670 u32 reserved[2];
660}; 671};
@@ -662,7 +673,7 @@ struct xhci_stream_ctx {
662 673
663struct xhci_transfer_event { 674struct xhci_transfer_event {
664 /* 64-bit buffer address, or immediate data */ 675 /* 64-bit buffer address, or immediate data */
665 u32 buffer[2]; 676 u64 buffer;
666 u32 transfer_len; 677 u32 transfer_len;
667 /* This field is interpreted differently based on the type of TRB */ 678 /* This field is interpreted differently based on the type of TRB */
668 u32 flags; 679 u32 flags;
@@ -744,7 +755,7 @@ struct xhci_transfer_event {
744 755
745struct xhci_link_trb { 756struct xhci_link_trb {
746 /* 64-bit segment pointer*/ 757 /* 64-bit segment pointer*/
747 u32 segment_ptr[2]; 758 u64 segment_ptr;
748 u32 intr_target; 759 u32 intr_target;
749 u32 control; 760 u32 control;
750}; 761};
@@ -755,7 +766,7 @@ struct xhci_link_trb {
755/* Command completion event TRB */ 766/* Command completion event TRB */
756struct xhci_event_cmd { 767struct xhci_event_cmd {
757 /* Pointer to command TRB, or the value passed by the event data trb */ 768 /* Pointer to command TRB, or the value passed by the event data trb */
758 u32 cmd_trb[2]; 769 u64 cmd_trb;
759 u32 status; 770 u32 status;
760 u32 flags; 771 u32 flags;
761}; 772};
@@ -848,8 +859,8 @@ union xhci_trb {
848#define TRB_CONFIG_EP 12 859#define TRB_CONFIG_EP 12
849/* Evaluate Context Command */ 860/* Evaluate Context Command */
850#define TRB_EVAL_CONTEXT 13 861#define TRB_EVAL_CONTEXT 13
851/* Reset Transfer Ring Command */ 862/* Reset Endpoint Command */
852#define TRB_RESET_RING 14 863#define TRB_RESET_EP 14
853/* Stop Transfer Ring Command */ 864/* Stop Transfer Ring Command */
854#define TRB_STOP_RING 15 865#define TRB_STOP_RING 15
855/* Set Transfer Ring Dequeue Pointer Command */ 866/* Set Transfer Ring Dequeue Pointer Command */
@@ -929,6 +940,7 @@ struct xhci_ring {
929 unsigned int cancels_pending; 940 unsigned int cancels_pending;
930 unsigned int state; 941 unsigned int state;
931#define SET_DEQ_PENDING (1 << 0) 942#define SET_DEQ_PENDING (1 << 0)
943#define EP_HALTED (1 << 1)
932 /* The TRB that was last reported in a stopped endpoint ring */ 944 /* The TRB that was last reported in a stopped endpoint ring */
933 union xhci_trb *stopped_trb; 945 union xhci_trb *stopped_trb;
934 struct xhci_td *stopped_td; 946 struct xhci_td *stopped_td;
@@ -940,9 +952,15 @@ struct xhci_ring {
940 u32 cycle_state; 952 u32 cycle_state;
941}; 953};
942 954
955struct xhci_dequeue_state {
956 struct xhci_segment *new_deq_seg;
957 union xhci_trb *new_deq_ptr;
958 int new_cycle_state;
959};
960
943struct xhci_erst_entry { 961struct xhci_erst_entry {
944 /* 64-bit event ring segment address */ 962 /* 64-bit event ring segment address */
945 u32 seg_addr[2]; 963 u64 seg_addr;
946 u32 seg_size; 964 u32 seg_size;
947 /* Set to zero */ 965 /* Set to zero */
948 u32 rsvd; 966 u32 rsvd;
@@ -957,6 +975,13 @@ struct xhci_erst {
957 unsigned int erst_size; 975 unsigned int erst_size;
958}; 976};
959 977
978struct xhci_scratchpad {
979 u64 *sp_array;
980 dma_addr_t sp_dma;
981 void **sp_buffers;
982 dma_addr_t *sp_dma_buffers;
983};
984
960/* 985/*
961 * Each segment table entry is 4*32bits long. 1K seems like an ok size: 986 * Each segment table entry is 4*32bits long. 1K seems like an ok size:
962 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, 987 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
@@ -1011,6 +1036,9 @@ struct xhci_hcd {
1011 struct xhci_ring *cmd_ring; 1036 struct xhci_ring *cmd_ring;
1012 struct xhci_ring *event_ring; 1037 struct xhci_ring *event_ring;
1013 struct xhci_erst erst; 1038 struct xhci_erst erst;
1039 /* Scratchpad */
1040 struct xhci_scratchpad *scratchpad;
1041
1014 /* slot enabling and address device helpers */ 1042 /* slot enabling and address device helpers */
1015 struct completion addr_dev; 1043 struct completion addr_dev;
1016 int slot_id; 1044 int slot_id;
@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1071static inline void xhci_writel(struct xhci_hcd *xhci, 1099static inline void xhci_writel(struct xhci_hcd *xhci,
1072 const unsigned int val, __u32 __iomem *regs) 1100 const unsigned int val, __u32 __iomem *regs)
1073{ 1101{
1074 if (!in_interrupt()) 1102 xhci_dbg(xhci,
1075 xhci_dbg(xhci, 1103 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1076 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", 1104 regs, val);
1077 regs, val);
1078 writel(val, regs); 1105 writel(val, regs);
1079} 1106}
1080 1107
1108/*
1109 * Registers should always be accessed with double word or quad word accesses.
1110 *
1111 * Some xHCI implementations may support 64-bit address pointers. Registers
1112 * with 64-bit address pointers should be written to with dword accesses by
1113 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1114 * xHCI implementations that do not support 64-bit address pointers will ignore
1115 * the high dword, and write order is irrelevant.
1116 */
1117static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
1118 __u64 __iomem *regs)
1119{
1120 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1121 u64 val_lo = readl(ptr);
1122 u64 val_hi = readl(ptr + 1);
1123 return val_lo + (val_hi << 32);
1124}
1125static inline void xhci_write_64(struct xhci_hcd *xhci,
1126 const u64 val, __u64 __iomem *regs)
1127{
1128 __u32 __iomem *ptr = (__u32 __iomem *) regs;
1129 u32 val_lo = lower_32_bits(val);
1130 u32 val_hi = upper_32_bits(val);
1131
1132 xhci_dbg(xhci,
1133 "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
1134 regs, (long unsigned int) val);
1135 writel(val_lo, ptr);
1136 writel(val_hi, ptr + 1);
1137}
1138
1081/* xHCI debugging */ 1139/* xHCI debugging */
1082void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1140void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
1083void xhci_print_registers(struct xhci_hcd *xhci); 1141void xhci_print_registers(struct xhci_hcd *xhci);
@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1090void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1148void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1091void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1149void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1092void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1150void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1093void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); 1151void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1094 1152
1095/* xHCI memory managment */ 1153/* xHCI memory managment */
1096void xhci_mem_cleanup(struct xhci_hcd *xhci); 1154void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1128int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); 1186int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1129int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1187int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1130int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1188int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1189void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1131int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1190int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1132void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1191void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1133 1192
@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1148 int slot_id, unsigned int ep_index); 1207 int slot_id, unsigned int ep_index);
1149int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 1208int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1150 u32 slot_id); 1209 u32 slot_id);
1210int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1211 unsigned int ep_index);
1212void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1213 unsigned int slot_id, unsigned int ep_index,
1214 struct xhci_td *cur_td, struct xhci_dequeue_state *state);
1215void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
1216 struct xhci_ring *ep_ring, unsigned int slot_id,
1217 unsigned int ep_index, struct xhci_dequeue_state *deq_state);
1151 1218
1152/* xHCI roothub code */ 1219/* xHCI roothub code */
1153int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1220int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1154 char *buf, u16 wLength); 1221 char *buf, u16 wLength);
1155int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1222int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1156 1223
1224/* xHCI contexts */
1225struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1226struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
1227struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
1228
1157#endif /* __LINUX_XHCI_HCD_H */ 1229#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index a68d91a11bee..abe3aa67ed00 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -220,7 +220,7 @@ config USB_IOWARRIOR
220 220
221config USB_TEST 221config USB_TEST
222 tristate "USB testing driver" 222 tristate "USB testing driver"
223 depends on USB && USB_DEVICEFS 223 depends on USB
224 help 224 help
225 This driver is for testing host controller software. It is used 225 This driver is for testing host controller software. It is used
226 with specialized device firmware for regression and stress testing, 226 with specialized device firmware for regression and stress testing,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 1191928902f4..1d26beddf2ca 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1326 int i; 1326 int i;
1327 1327
1328 /* log core options (read using indexed model) */ 1328 /* log core options (read using indexed model) */
1329 musb_ep_select(mbase, 0);
1330 reg = musb_read_configdata(mbase); 1329 reg = musb_read_configdata(mbase);
1331 1330
1332 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); 1331 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
@@ -1990,7 +1989,7 @@ bad_config:
1990 if (status < 0) 1989 if (status < 0)
1991 goto fail2; 1990 goto fail2;
1992 1991
1993#ifdef CONFIG_USB_OTG 1992#ifdef CONFIG_USB_MUSB_OTG
1994 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1993 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1995#endif 1994#endif
1996 1995
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 40ed50ecedff..7a6778675ad3 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -407,7 +407,7 @@ stall:
407 csr |= MUSB_RXCSR_P_SENDSTALL 407 csr |= MUSB_RXCSR_P_SENDSTALL
408 | MUSB_RXCSR_FLUSHFIFO 408 | MUSB_RXCSR_FLUSHFIFO
409 | MUSB_RXCSR_CLRDATATOG 409 | MUSB_RXCSR_CLRDATATOG
410 | MUSB_TXCSR_P_WZC_BITS; 410 | MUSB_RXCSR_P_WZC_BITS;
411 musb_writew(regs, MUSB_RXCSR, 411 musb_writew(regs, MUSB_RXCSR,
412 csr); 412 csr);
413 } 413 }
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index de3b2f18db44..fbfd3fd9ce1f 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
323 323
324static inline u8 musb_read_configdata(void __iomem *mbase) 324static inline u8 musb_read_configdata(void __iomem *mbase)
325{ 325{
326 musb_writeb(mbase, MUSB_INDEX, 0);
326 return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); 327 return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
327} 328}
328 329
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index e9a40b820fd4..985cbcf48bda 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = {
80 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 80 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
81 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ 81 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
82 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ 82 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
83 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
83 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 84 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
84 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 85 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
85 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 86 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = {
96 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 97 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
97 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 98 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
98 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 99 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
100 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
99 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 101 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
102 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
100 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 103 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
101 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 104 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
102 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 105 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 60c64cc5be2a..b574878c78b2 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = {
698 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), 698 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
699 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 699 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
700 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, 700 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
701 { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
701 { }, /* Optional parameter entry */ 702 { }, /* Optional parameter entry */
702 { } /* Terminating entry */ 703 { } /* Terminating entry */
703}; 704};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index c9fbd7415092..24dbd99e87d7 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -947,6 +947,13 @@
947#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ 947#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
948 948
949/* 949/*
950 * GN Otometrics (http://www.otometrics.com)
951 * Submitted by Ville Sundberg.
952 */
953#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
954#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
955
956/*
950 * BmRequestType: 1100 0000b 957 * BmRequestType: 1100 0000b
951 * bRequest: FTDI_E2_READ 958 * bRequest: FTDI_E2_READ
952 * wValue: 0 959 * wValue: 0
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c31940a307f8..270009afdf77 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -124,10 +124,13 @@
124#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 124#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
125#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 125#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
126 126
127/* This driver also supports the ATEN UC2324 device since it is mos7840 based 127/* This driver also supports
128 * - if I knew the device id it would also support the ATEN UC2322 */ 128 * ATEN UC2324 device using Moschip MCS7840
129 * ATEN UC2322 device using Moschip MCS7820
130 */
129#define USB_VENDOR_ID_ATENINTL 0x0557 131#define USB_VENDOR_ID_ATENINTL 0x0557
130#define ATENINTL_DEVICE_ID_UC2324 0x2011 132#define ATENINTL_DEVICE_ID_UC2324 0x2011
133#define ATENINTL_DEVICE_ID_UC2322 0x7820
131 134
132/* Interrupt Routine Defines */ 135/* Interrupt Routine Defines */
133 136
@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = {
177 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 180 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
178 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 181 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
179 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 182 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
183 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
180 {} /* terminating entry */ 184 {} /* terminating entry */
181}; 185};
182 186
@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
188 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 192 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
193 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
189 {} /* terminating entry */ 194 {} /* terminating entry */
190}; 195};
191 196
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 98262dd552bb..c784ddbe7b61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file);
66static int option_tiocmset(struct tty_struct *tty, struct file *file, 66static int option_tiocmset(struct tty_struct *tty, struct file *file,
67 unsigned int set, unsigned int clear); 67 unsigned int set, unsigned int clear);
68static int option_send_setup(struct usb_serial_port *port); 68static int option_send_setup(struct usb_serial_port *port);
69#ifdef CONFIG_PM
69static int option_suspend(struct usb_serial *serial, pm_message_t message); 70static int option_suspend(struct usb_serial *serial, pm_message_t message);
70static int option_resume(struct usb_serial *serial); 71static int option_resume(struct usb_serial *serial);
72#endif
71 73
72/* Vendor and product IDs */ 74/* Vendor and product IDs */
73#define OPTION_VENDOR_ID 0x0AF0 75#define OPTION_VENDOR_ID 0x0AF0
@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial);
205#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 207#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
206#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 208#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
207#define NOVATELWIRELESS_PRODUCT_U727 0x5010 209#define NOVATELWIRELESS_PRODUCT_U727 0x5010
210#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
208#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 211#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
209#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 212#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
210 213
@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial);
259#define AXESSTEL_VENDOR_ID 0x1726 262#define AXESSTEL_VENDOR_ID 0x1726
260#define AXESSTEL_PRODUCT_MV110H 0x1000 263#define AXESSTEL_PRODUCT_MV110H 0x1000
261 264
262#define ONDA_VENDOR_ID 0x19d2
263#define ONDA_PRODUCT_MSA501HS 0x0001
264#define ONDA_PRODUCT_ET502HS 0x0002
265#define ONDA_PRODUCT_MT503HS 0x2000
266
267#define BANDRICH_VENDOR_ID 0x1A8D 265#define BANDRICH_VENDOR_ID 0x1A8D
268#define BANDRICH_PRODUCT_C100_1 0x1002 266#define BANDRICH_PRODUCT_C100_1 0x1002
269#define BANDRICH_PRODUCT_C100_2 0x1003 267#define BANDRICH_PRODUCT_C100_2 0x1003
@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial);
301#define ZTE_PRODUCT_MF628 0x0015 299#define ZTE_PRODUCT_MF628 0x0015
302#define ZTE_PRODUCT_MF626 0x0031 300#define ZTE_PRODUCT_MF626 0x0031
303#define ZTE_PRODUCT_CDMA_TECH 0xfffe 301#define ZTE_PRODUCT_CDMA_TECH 0xfffe
302#define ZTE_PRODUCT_AC8710 0xfff1
304 303
305#define BENQ_VENDOR_ID 0x04a5 304#define BENQ_VENDOR_ID 0x04a5
306#define BENQ_PRODUCT_H10 0x4068 305#define BENQ_PRODUCT_H10 0x4068
@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial);
322#define ALINK_VENDOR_ID 0x1e0e 321#define ALINK_VENDOR_ID 0x1e0e
323#define ALINK_PRODUCT_3GU 0x9200 322#define ALINK_PRODUCT_3GU 0x9200
324 323
324/* ALCATEL PRODUCTS */
325#define ALCATEL_VENDOR_ID 0x1bbb
326#define ALCATEL_PRODUCT_X060S 0x0000
327
328
325static struct usb_device_id option_ids[] = { 329static struct usb_device_id option_ids[] = {
326 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 330 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
327 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 331 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = {
438 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 442 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
439 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 443 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
440 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 444 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
445 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
441 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 446 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
442 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ 447 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
443 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ 448 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = {
474 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 479 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
475 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, 480 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
476 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, 481 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
477 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
478 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
479 { USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
480 { USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
481 { USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
482 { USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
483 { USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
484 { USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
485 { USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
486 { USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
487 { USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
488 { USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
489 { USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
490 { USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
491 { USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
492 { USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
493 { USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
494 { USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
495 { USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
496 { USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
497 { USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
498 { USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
499 { USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
500 { USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
501 { USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
502 { USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
503 { USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
504 { USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
505 { USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
506 { USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
507 { USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
508 { USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
509 { USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
510 { USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
511 { USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
512 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
513 { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, 482 { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
514 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 483 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
515 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 484 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = {
534 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 503 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
535 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 504 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
536 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 505 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
537 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, 506 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
538 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, 507 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
539 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 508 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
540 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 509 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
510 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
511 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
512 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
513 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
514 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
515 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
516 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
517 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
518 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
521 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
522 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
523 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
524 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
525 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
526 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
527 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
528 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
529 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
530 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
531 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
532 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
533 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
534 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
535 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
536 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
537 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
538 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
539 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
540 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
541 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
542 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
543 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
544 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
545 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
546 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
547 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
548 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
550 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
551 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
552 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
555 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
556 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
558 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
561 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
562 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
563 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
564 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
565 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
566 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
567 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
568 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
569 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
570 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
571 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
572 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
573 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
574 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
541 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 575 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
542 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 576 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
543 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, 577 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = {
547 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 581 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
548 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 582 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
549 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 583 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
584 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
550 { } /* Terminating entry */ 585 { } /* Terminating entry */
551}; 586};
552MODULE_DEVICE_TABLE(usb, option_ids); 587MODULE_DEVICE_TABLE(usb, option_ids);
@@ -555,8 +590,10 @@ static struct usb_driver option_driver = {
555 .name = "option", 590 .name = "option",
556 .probe = usb_serial_probe, 591 .probe = usb_serial_probe,
557 .disconnect = usb_serial_disconnect, 592 .disconnect = usb_serial_disconnect,
593#ifdef CONFIG_PM
558 .suspend = usb_serial_suspend, 594 .suspend = usb_serial_suspend,
559 .resume = usb_serial_resume, 595 .resume = usb_serial_resume,
596#endif
560 .id_table = option_ids, 597 .id_table = option_ids,
561 .no_dynamic_id = 1, 598 .no_dynamic_id = 1,
562}; 599};
@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = {
588 .disconnect = option_disconnect, 625 .disconnect = option_disconnect,
589 .release = option_release, 626 .release = option_release,
590 .read_int_callback = option_instat_callback, 627 .read_int_callback = option_instat_callback,
628#ifdef CONFIG_PM
591 .suspend = option_suspend, 629 .suspend = option_suspend,
592 .resume = option_resume, 630 .resume = option_resume,
631#endif
593}; 632};
594 633
595static int debug; 634static int debug;
@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb)
831 int status = urb->status; 870 int status = urb->status;
832 struct usb_serial_port *port = urb->context; 871 struct usb_serial_port *port = urb->context;
833 struct option_port_private *portdata = usb_get_serial_port_data(port); 872 struct option_port_private *portdata = usb_get_serial_port_data(port);
834 struct usb_serial *serial = port->serial;
835 873
836 dbg("%s", __func__); 874 dbg("%s", __func__);
837 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); 875 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty,
927 struct usb_serial_port *port, struct file *filp) 965 struct usb_serial_port *port, struct file *filp)
928{ 966{
929 struct option_port_private *portdata; 967 struct option_port_private *portdata;
930 struct usb_serial *serial = port->serial;
931 int i, err; 968 int i, err;
932 struct urb *urb; 969 struct urb *urb;
933 970
@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial)
1187 } 1224 }
1188} 1225}
1189 1226
1227#ifdef CONFIG_PM
1190static int option_suspend(struct usb_serial *serial, pm_message_t message) 1228static int option_suspend(struct usb_serial *serial, pm_message_t message)
1191{ 1229{
1192 dbg("%s entered", __func__); 1230 dbg("%s entered", __func__);
@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial)
1245 } 1283 }
1246 return 0; 1284 return 0;
1247} 1285}
1286#endif
1248 1287
1249MODULE_AUTHOR(DRIVER_AUTHOR); 1288MODULE_AUTHOR(DRIVER_AUTHOR);
1250MODULE_DESCRIPTION(DRIVER_DESC); 1289MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcb320217218..e20dc525d177 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
961 US_BULK_GET_MAX_LUN, 961 US_BULK_GET_MAX_LUN,
962 USB_DIR_IN | USB_TYPE_CLASS | 962 USB_DIR_IN | USB_TYPE_CLASS |
963 USB_RECIP_INTERFACE, 963 USB_RECIP_INTERFACE,
964 0, us->ifnum, us->iobuf, 1, HZ); 964 0, us->ifnum, us->iobuf, 1, 10*HZ);
965 965
966 US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 966 US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
967 result, us->iobuf[0]); 967 result, us->iobuf[0]);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index c3ebb6b41ce1..7aed2565c1bd 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -72,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
72 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { 72 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
73 printk(KERN_INFO "bl : failed to set brightness\n"); 73 printk(KERN_INFO "bl : failed to set brightness\n");
74 ret = -ETIMEDOUT; 74 ret = -ETIMEDOUT;
75 goto out 75 goto out;
76 } 76 }
77 77
78 /* at this point we expect that the mcu has accepted 78 /* at this point we expect that the mcu has accepted
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index bb63c07e13de..5a72083dc67c 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -964,7 +964,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
964 struct s3c_fb *sfb = platform_get_drvdata(pdev); 964 struct s3c_fb *sfb = platform_get_drvdata(pdev);
965 int win; 965 int win;
966 966
967 for (win = 0; win <= S3C_FB_MAX_WIN; win++) 967 for (win = 0; win < S3C_FB_MAX_WIN; win++)
968 if (sfb->windows[win]) 968 if (sfb->windows[win])
969 s3c_fb_release_win(sfb, sfb->windows[win]); 969 s3c_fb_release_win(sfb, sfb->windows[win]);
970 970
@@ -988,7 +988,7 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
988 struct s3c_fb_win *win; 988 struct s3c_fb_win *win;
989 int win_no; 989 int win_no;
990 990
991 for (win_no = S3C_FB_MAX_WIN; win_no >= 0; win_no--) { 991 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
992 win = sfb->windows[win_no]; 992 win = sfb->windows[win_no];
993 if (!win) 993 if (!win)
994 continue; 994 continue;