aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-01-26 20:43:16 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-26 20:43:16 -0500
commit3eacdf58c2c0b9507afedfc19108e98b992c31e4 (patch)
treed95e7e022ff6e6181edce43fe97cf2883b5a91ed /drivers
parentdd0a251c8e087bca05e8f9a3657078591ae6e12b (diff)
parent5376071069ec8a7e6a8112beab16fc24f5139475 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/fsldma.c8
-rw-r--r--drivers/dma/ipu/Makefile1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1740
-rw-r--r--drivers/dma/ipu/ipu_intern.h176
-rw-r--r--drivers/dma/ipu/ipu_irq.c413
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c13
-rw-r--r--drivers/i2c/busses/i2c-acorn.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-elektor.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c1
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/chips/Kconfig37
-rw-r--r--drivers/i2c/chips/Makefile2
-rw-r--r--drivers/ide/falconide.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/palm_bk3710.c11
-rw-r--r--drivers/ieee1394/pcilynx.c1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/eeprom/Kconfig59
-rw-r--r--drivers/misc/eeprom/Makefile4
-rw-r--r--drivers/misc/eeprom/at24.c (renamed from drivers/i2c/chips/at24.c)0
-rw-r--r--drivers/misc/eeprom/at25.c (renamed from drivers/spi/at25.c)0
-rw-r--r--drivers/misc/eeprom/eeprom.c (renamed from drivers/i2c/chips/eeprom.c)0
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c (renamed from drivers/misc/eeprom_93cx6.c)0
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1242
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/net/arm/am79c961a.c20
-rw-r--r--drivers/net/bnx2x_main.c217
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/netxen/netxen_nic.h12
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c31
-rw-r--r--drivers/net/phy/mdio_bus.c8
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c32
-rw-r--r--drivers/net/wireless/p54/p54common.c24
-rw-r--r--drivers/net/wireless/p54/p54usb.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/oprofile/cpu_buffer.c5
-rw-r--r--drivers/oprofile/cpu_buffer.h7
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/msi.c16
-rw-r--r--drivers/pci/pci-driver.c91
-rw-r--r--drivers/pci/pci.c63
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/power/pda_power.c2
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/serial/sh-sci.h5
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/usb/host/ohci-omap.c6
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/radeon_i2c.c1
-rw-r--r--drivers/video/i810/i810-i2c.c1
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c1
-rw-r--r--drivers/video/mx3fb.c1555
-rw-r--r--drivers/video/nvidia/nv_i2c.c1
-rw-r--r--drivers/video/omap/lcdc.c4
-rw-r--r--drivers/video/savage/savagefb-i2c.c1
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/xen/balloon.c8
-rw-r--r--drivers/xen/xenfs/xenbus.c11
117 files changed, 5756 insertions, 420 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8079afca4972..55e530942ab0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -777,10 +777,16 @@ static void device_remove_class_symlinks(struct device *dev)
777int dev_set_name(struct device *dev, const char *fmt, ...) 777int dev_set_name(struct device *dev, const char *fmt, ...)
778{ 778{
779 va_list vargs; 779 va_list vargs;
780 char *s;
780 781
781 va_start(vargs, fmt); 782 va_start(vargs, fmt);
782 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs); 783 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs);
783 va_end(vargs); 784 va_end(vargs);
785
786 /* ewww... some of these buggers have / in the name... */
787 while ((s = strchr(dev->bus_id, '/')))
788 *s = '!';
789
784 return 0; 790 return 0;
785} 791}
786EXPORT_SYMBOL_GPL(dev_set_name); 792EXPORT_SYMBOL_GPL(dev_set_name);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d4e7dca06e4f..ba68a4671cb5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -102,7 +102,7 @@ static int __init omap_rng_probe(struct platform_device *pdev)
102 return -EBUSY; 102 return -EBUSY;
103 103
104 if (cpu_is_omap24xx()) { 104 if (cpu_is_omap24xx()) {
105 rng_ick = clk_get(NULL, "rng_ick"); 105 rng_ick = clk_get(&pdev->dev, "rng_ick");
106 if (IS_ERR(rng_ick)) { 106 if (IS_ERR(rng_ick)) {
107 dev_err(&pdev->dev, "Could not get rng_ick\n"); 107 dev_err(&pdev->dev, "Could not get rng_ick\n");
108 ret = PTR_ERR(rng_ick); 108 ret = PTR_ERR(rng_ick);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e34b06420816..48ea59e79672 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,25 @@ config MV_XOR
62 ---help--- 62 ---help---
63 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
64 64
65config MX3_IPU
66 bool "MX3x Image Processing Unit support"
67 depends on ARCH_MX3
68 select DMA_ENGINE
69 default y
70 help
71 If you plan to use the Image Processing unit in the i.MX3x, say
72 Y here. If unsure, select Y.
73
74config MX3_IPU_IRQS
75 int "Number of dynamically mapped interrupts for IPU"
76 depends on MX3_IPU
77 range 2 137
78 default 4
79 help
80 Out of 137 interrupt sources on i.MX31 IPU only very few are used.
81 To avoid bloating the irq_desc[] array we allocate a sufficient
82 number of IRQ slots and map them dynamically to specific sources.
83
65config DMA_ENGINE 84config DMA_ENGINE
66 bool 85 bool
67 86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 14f59527d4f6..2e5dc96700d2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
7obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o 9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 403dbe781122..a58993011edb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -329,9 +329,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
329 struct dma_chan *chan; 329 struct dma_chan *chan;
330 int cpu; 330 int cpu;
331 331
332 WARN_ONCE(dmaengine_ref_count == 0,
333 "client called %s without a reference", __func__);
334
335 cpu = get_cpu(); 332 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; 333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu(); 334 put_cpu();
@@ -348,9 +345,6 @@ void dma_issue_pending_all(void)
348 struct dma_device *device; 345 struct dma_device *device;
349 struct dma_chan *chan; 346 struct dma_chan *chan;
350 347
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock(); 348 rcu_read_lock();
355 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 349 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 350 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -961,6 +955,8 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
961 if (!dep) 955 if (!dep)
962 return; 956 return;
963 957
958 /* we'll submit tx->next now, so clear the link */
959 tx->next = NULL;
964 chan = dep->chan; 960 chan = dep->chan;
965 961
966 /* keep submitting up until a channel switch is detected 962 /* keep submitting up until a channel switch is detected
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 3603f1ea5b28..732fa1ec36ab 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -217,6 +217,10 @@ static int dmatest_func(void *data)
217 chan = thread->chan; 217 chan = thread->chan;
218 218
219 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
220 struct dma_device *dev = chan->device;
221 struct dma_async_tx_descriptor *tx;
222 dma_addr_t dma_src, dma_dest;
223
220 total_tests++; 224 total_tests++;
221 225
222 len = dmatest_random() % test_buf_size + 1; 226 len = dmatest_random() % test_buf_size + 1;
@@ -226,10 +230,30 @@ static int dmatest_func(void *data)
226 dmatest_init_srcbuf(thread->srcbuf, src_off, len); 230 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
227 dmatest_init_dstbuf(thread->dstbuf, dst_off, len); 231 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
228 232
229 cookie = dma_async_memcpy_buf_to_buf(chan, 233 dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off,
230 thread->dstbuf + dst_off, 234 len, DMA_TO_DEVICE);
231 thread->srcbuf + src_off, 235 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
232 len); 236 dma_dest = dma_map_single(dev->dev, thread->dstbuf,
237 test_buf_size, DMA_BIDIRECTIONAL);
238
239 tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
240 dma_src, len,
241 DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
242 if (!tx) {
243 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
244 dma_unmap_single(dev->dev, dma_dest,
245 test_buf_size, DMA_BIDIRECTIONAL);
246 pr_warning("%s: #%u: prep error with src_off=0x%x "
247 "dst_off=0x%x len=0x%x\n",
248 thread_name, total_tests - 1,
249 src_off, dst_off, len);
250 msleep(100);
251 failed_tests++;
252 continue;
253 }
254 tx->callback = NULL;
255 cookie = tx->tx_submit(tx);
256
233 if (dma_submit_error(cookie)) { 257 if (dma_submit_error(cookie)) {
234 pr_warning("%s: #%u: submit error %d with src_off=0x%x " 258 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
235 "dst_off=0x%x len=0x%x\n", 259 "dst_off=0x%x len=0x%x\n",
@@ -253,6 +277,9 @@ static int dmatest_func(void *data)
253 failed_tests++; 277 failed_tests++;
254 continue; 278 continue;
255 } 279 }
280 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
281 dma_unmap_single(dev->dev, dma_dest,
282 test_buf_size, DMA_BIDIRECTIONAL);
256 283
257 error_count = 0; 284 error_count = 0;
258 285
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ca70a21afc68..70126a606239 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -822,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
822 */ 822 */
823 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
824 824
825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device; 825 new_fsl_chan->dev = fdev->dev;
826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
828 828
@@ -875,7 +875,8 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
875 } 875 }
876 876
877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
878 compatible, new_fsl_chan->irq); 878 compatible,
879 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
879 880
880 return 0; 881 return 0;
881 882
@@ -890,7 +891,8 @@ err_no_reg:
890 891
891static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 892static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
892{ 893{
893 free_irq(fchan->irq, fchan); 894 if (fchan->irq != NO_IRQ)
895 free_irq(fchan->irq, fchan);
894 list_del(&fchan->common.device_node); 896 list_del(&fchan->common.device_node);
895 iounmap(fchan->reg_base); 897 iounmap(fchan->reg_base);
896 kfree(fchan); 898 kfree(fchan);
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
new file mode 100644
index 000000000000..6704cf48326d
--- /dev/null
+++ b/drivers/dma/ipu/Makefile
@@ -0,0 +1 @@
obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
new file mode 100644
index 000000000000..1f154d08e98f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -0,0 +1,1740 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/err.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/list.h>
18#include <linux/clk.h>
19#include <linux/vmalloc.h>
20#include <linux/string.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23
24#include <mach/ipu.h>
25
26#include "ipu_intern.h"
27
28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001
30
31/*
32 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as
34 * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
35 * in the ISR
36 */
37static struct ipu ipu_data;
38
39#define to_ipu(id) container_of(id, struct ipu, idmac)
40
41static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
42{
43 return __raw_readl(ipu->reg_ic + reg);
44}
45
46#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
47
48static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
49{
50 __raw_writel(value, ipu->reg_ic + reg);
51}
52
53#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
54
55static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
56{
57 return __raw_readl(ipu->reg_ipu + reg);
58}
59
60static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
61{
62 __raw_writel(value, ipu->reg_ipu + reg);
63}
64
65/*****************************************************************************
66 * IPU / IC common functions
67 */
68static void dump_idmac_reg(struct ipu *ipu)
69{
70 dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
71 "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
72 idmac_read_icreg(ipu, IDMAC_CONF),
73 idmac_read_icreg(ipu, IC_CONF),
74 idmac_read_icreg(ipu, IDMAC_CHA_EN),
75 idmac_read_icreg(ipu, IDMAC_CHA_PRI),
76 idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
77 dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
78 "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
79 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
80 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
81 idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
82 idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
83 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
84}
85
86static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
87{
88 switch (fmt) {
89 case IPU_PIX_FMT_GENERIC: /* generic data */
90 case IPU_PIX_FMT_RGB332:
91 case IPU_PIX_FMT_YUV420P:
92 case IPU_PIX_FMT_YUV422P:
93 default:
94 return 1;
95 case IPU_PIX_FMT_RGB565:
96 case IPU_PIX_FMT_YUYV:
97 case IPU_PIX_FMT_UYVY:
98 return 2;
99 case IPU_PIX_FMT_BGR24:
100 case IPU_PIX_FMT_RGB24:
101 return 3;
102 case IPU_PIX_FMT_GENERIC_32: /* generic data */
103 case IPU_PIX_FMT_BGR32:
104 case IPU_PIX_FMT_RGB32:
105 case IPU_PIX_FMT_ABGR32:
106 return 4;
107 }
108}
109
110/* Enable / disable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{
113 uint32_t ic_conf, mask;
114
115 switch (channel) {
116 case IDMAC_IC_0:
117 mask = IC_CONF_PRPENC_EN;
118 break;
119 case IDMAC_IC_7:
120 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
121 break;
122 default:
123 return;
124 }
125 ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
126 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127}
128
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{
131 uint32_t ic_conf, mask;
132
133 switch (channel) {
134 case IDMAC_IC_0:
135 mask = IC_CONF_PRPENC_EN;
136 break;
137 case IDMAC_IC_7:
138 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
139 break;
140 default:
141 return;
142 }
143 ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
144 idmac_write_icreg(ipu, ic_conf, IC_CONF);
145}
146
147static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
148{
149 uint32_t stat = TASK_STAT_IDLE;
150 uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
151
152 switch (channel) {
153 case IDMAC_IC_7:
154 stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
155 TSTAT_CSI2MEM_OFFSET;
156 break;
157 case IDMAC_IC_0:
158 case IDMAC_SDC_0:
159 case IDMAC_SDC_1:
160 default:
161 break;
162 }
163 return stat;
164}
165
166struct chan_param_mem_planar {
167 /* Word 0 */
168 u32 xv:10;
169 u32 yv:10;
170 u32 xb:12;
171
172 u32 yb:12;
173 u32 res1:2;
174 u32 nsb:1;
175 u32 lnpb:6;
176 u32 ubo_l:11;
177
178 u32 ubo_h:15;
179 u32 vbo_l:17;
180
181 u32 vbo_h:9;
182 u32 res2:3;
183 u32 fw:12;
184 u32 fh_l:8;
185
186 u32 fh_h:4;
187 u32 res3:28;
188
189 /* Word 1 */
190 u32 eba0;
191
192 u32 eba1;
193
194 u32 bpp:3;
195 u32 sl:14;
196 u32 pfs:3;
197 u32 bam:3;
198 u32 res4:2;
199 u32 npb:6;
200 u32 res5:1;
201
202 u32 sat:2;
203 u32 res6:30;
204} __attribute__ ((packed));
205
206struct chan_param_mem_interleaved {
207 /* Word 0 */
208 u32 xv:10;
209 u32 yv:10;
210 u32 xb:12;
211
212 u32 yb:12;
213 u32 sce:1;
214 u32 res1:1;
215 u32 nsb:1;
216 u32 lnpb:6;
217 u32 sx:10;
218 u32 sy_l:1;
219
220 u32 sy_h:9;
221 u32 ns:10;
222 u32 sm:10;
223 u32 sdx_l:3;
224
225 u32 sdx_h:2;
226 u32 sdy:5;
227 u32 sdrx:1;
228 u32 sdry:1;
229 u32 sdr1:1;
230 u32 res2:2;
231 u32 fw:12;
232 u32 fh_l:8;
233
234 u32 fh_h:4;
235 u32 res3:28;
236
237 /* Word 1 */
238 u32 eba0;
239
240 u32 eba1;
241
242 u32 bpp:3;
243 u32 sl:14;
244 u32 pfs:3;
245 u32 bam:3;
246 u32 res4:2;
247 u32 npb:6;
248 u32 res5:1;
249
250 u32 sat:2;
251 u32 scc:1;
252 u32 ofs0:5;
253 u32 ofs1:5;
254 u32 ofs2:5;
255 u32 ofs3:5;
256 u32 wid0:3;
257 u32 wid1:3;
258 u32 wid2:3;
259
260 u32 wid3:3;
261 u32 dec_sel:1;
262 u32 res6:28;
263} __attribute__ ((packed));
264
265union chan_param_mem {
266 struct chan_param_mem_planar pp;
267 struct chan_param_mem_interleaved ip;
268};
269
270static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
271 u32 u_offset, u32 v_offset)
272{
273 params->pp.ubo_l = u_offset & 0x7ff;
274 params->pp.ubo_h = u_offset >> 11;
275 params->pp.vbo_l = v_offset & 0x1ffff;
276 params->pp.vbo_h = v_offset >> 17;
277}
278
279static void ipu_ch_param_set_size(union chan_param_mem *params,
280 uint32_t pixel_fmt, uint16_t width,
281 uint16_t height, uint16_t stride)
282{
283 u32 u_offset;
284 u32 v_offset;
285
286 params->pp.fw = width - 1;
287 params->pp.fh_l = height - 1;
288 params->pp.fh_h = (height - 1) >> 8;
289 params->pp.sl = stride - 1;
290
291 switch (pixel_fmt) {
292 case IPU_PIX_FMT_GENERIC:
293 /*Represents 8-bit Generic data */
294 params->pp.bpp = 3;
295 params->pp.pfs = 7;
296 params->pp.npb = 31;
297 params->pp.sat = 2; /* SAT = use 32-bit access */
298 break;
299 case IPU_PIX_FMT_GENERIC_32:
300 /*Represents 32-bit Generic data */
301 params->pp.bpp = 0;
302 params->pp.pfs = 7;
303 params->pp.npb = 7;
304 params->pp.sat = 2; /* SAT = use 32-bit access */
305 break;
306 case IPU_PIX_FMT_RGB565:
307 params->ip.bpp = 2;
308 params->ip.pfs = 4;
309 params->ip.npb = 7;
310 params->ip.sat = 2; /* SAT = 32-bit access */
311 params->ip.ofs0 = 0; /* Red bit offset */
312 params->ip.ofs1 = 5; /* Green bit offset */
313 params->ip.ofs2 = 11; /* Blue bit offset */
314 params->ip.ofs3 = 16; /* Alpha bit offset */
315 params->ip.wid0 = 4; /* Red bit width - 1 */
316 params->ip.wid1 = 5; /* Green bit width - 1 */
317 params->ip.wid2 = 4; /* Blue bit width - 1 */
318 break;
319 case IPU_PIX_FMT_BGR24:
320 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
321 params->ip.pfs = 4;
322 params->ip.npb = 7;
323 params->ip.sat = 2; /* SAT = 32-bit access */
324 params->ip.ofs0 = 0; /* Red bit offset */
325 params->ip.ofs1 = 8; /* Green bit offset */
326 params->ip.ofs2 = 16; /* Blue bit offset */
327 params->ip.ofs3 = 24; /* Alpha bit offset */
328 params->ip.wid0 = 7; /* Red bit width - 1 */
329 params->ip.wid1 = 7; /* Green bit width - 1 */
330 params->ip.wid2 = 7; /* Blue bit width - 1 */
331 break;
332 case IPU_PIX_FMT_RGB24:
333 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
334 params->ip.pfs = 4;
335 params->ip.npb = 7;
336 params->ip.sat = 2; /* SAT = 32-bit access */
337 params->ip.ofs0 = 16; /* Red bit offset */
338 params->ip.ofs1 = 8; /* Green bit offset */
339 params->ip.ofs2 = 0; /* Blue bit offset */
340 params->ip.ofs3 = 24; /* Alpha bit offset */
341 params->ip.wid0 = 7; /* Red bit width - 1 */
342 params->ip.wid1 = 7; /* Green bit width - 1 */
343 params->ip.wid2 = 7; /* Blue bit width - 1 */
344 break;
345 case IPU_PIX_FMT_BGRA32:
346 case IPU_PIX_FMT_BGR32:
347 params->ip.bpp = 0;
348 params->ip.pfs = 4;
349 params->ip.npb = 7;
350 params->ip.sat = 2; /* SAT = 32-bit access */
351 params->ip.ofs0 = 8; /* Red bit offset */
352 params->ip.ofs1 = 16; /* Green bit offset */
353 params->ip.ofs2 = 24; /* Blue bit offset */
354 params->ip.ofs3 = 0; /* Alpha bit offset */
355 params->ip.wid0 = 7; /* Red bit width - 1 */
356 params->ip.wid1 = 7; /* Green bit width - 1 */
357 params->ip.wid2 = 7; /* Blue bit width - 1 */
358 params->ip.wid3 = 7; /* Alpha bit width - 1 */
359 break;
360 case IPU_PIX_FMT_RGBA32:
361 case IPU_PIX_FMT_RGB32:
362 params->ip.bpp = 0;
363 params->ip.pfs = 4;
364 params->ip.npb = 7;
365 params->ip.sat = 2; /* SAT = 32-bit access */
366 params->ip.ofs0 = 24; /* Red bit offset */
367 params->ip.ofs1 = 16; /* Green bit offset */
368 params->ip.ofs2 = 8; /* Blue bit offset */
369 params->ip.ofs3 = 0; /* Alpha bit offset */
370 params->ip.wid0 = 7; /* Red bit width - 1 */
371 params->ip.wid1 = 7; /* Green bit width - 1 */
372 params->ip.wid2 = 7; /* Blue bit width - 1 */
373 params->ip.wid3 = 7; /* Alpha bit width - 1 */
374 break;
375 case IPU_PIX_FMT_ABGR32:
376 params->ip.bpp = 0;
377 params->ip.pfs = 4;
378 params->ip.npb = 7;
379 params->ip.sat = 2; /* SAT = 32-bit access */
380 params->ip.ofs0 = 8; /* Red bit offset */
381 params->ip.ofs1 = 16; /* Green bit offset */
382 params->ip.ofs2 = 24; /* Blue bit offset */
383 params->ip.ofs3 = 0; /* Alpha bit offset */
384 params->ip.wid0 = 7; /* Red bit width - 1 */
385 params->ip.wid1 = 7; /* Green bit width - 1 */
386 params->ip.wid2 = 7; /* Blue bit width - 1 */
387 params->ip.wid3 = 7; /* Alpha bit width - 1 */
388 break;
389 case IPU_PIX_FMT_UYVY:
390 params->ip.bpp = 2;
391 params->ip.pfs = 6;
392 params->ip.npb = 7;
393 params->ip.sat = 2; /* SAT = 32-bit access */
394 break;
395 case IPU_PIX_FMT_YUV420P2:
396 case IPU_PIX_FMT_YUV420P:
397 params->ip.bpp = 3;
398 params->ip.pfs = 3;
399 params->ip.npb = 7;
400 params->ip.sat = 2; /* SAT = 32-bit access */
401 u_offset = stride * height;
402 v_offset = u_offset + u_offset / 4;
403 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
404 break;
405 case IPU_PIX_FMT_YVU422P:
406 params->ip.bpp = 3;
407 params->ip.pfs = 2;
408 params->ip.npb = 7;
409 params->ip.sat = 2; /* SAT = 32-bit access */
410 v_offset = stride * height;
411 u_offset = v_offset + v_offset / 2;
412 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
413 break;
414 case IPU_PIX_FMT_YUV422P:
415 params->ip.bpp = 3;
416 params->ip.pfs = 2;
417 params->ip.npb = 7;
418 params->ip.sat = 2; /* SAT = 32-bit access */
419 u_offset = stride * height;
420 v_offset = u_offset + u_offset / 2;
421 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
422 break;
423 default:
424 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break;
427 }
428
429 params->pp.nsb = 1;
430}
431
432static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels)
434{
435 params->pp.npb = burst_pixels - 1;
436};
437
438static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1)
440{
441 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1;
443};
444
445static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate)
447{
448 params->pp.bam = rotate;
449};
450
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words)
453{
454 for (; num_words > 0; num_words--) {
455 dev_dbg(ipu_data.dev,
456 "write param mem - addr = 0x%08X, data = 0x%08X\n",
457 addr, *data);
458 idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
459 idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
460 addr++;
461 if ((addr & 0x7) == 5) {
462 addr &= ~0x7; /* set to word 0 */
463 addr += 8; /* increment to next row */
464 }
465 }
466}
467
468static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
469 uint32_t *resize_coeff,
470 uint32_t *downsize_coeff)
471{
472 uint32_t temp_size;
473 uint32_t temp_downsize;
474
475 *resize_coeff = 1 << 13;
476 *downsize_coeff = 1 << 13;
477
478 /* Cannot downsize more than 8:1 */
479 if (out_size << 3 < in_size)
480 return -EINVAL;
481
482 /* compute downsizing coefficient */
483 temp_downsize = 0;
484 temp_size = in_size;
485 while (temp_size >= out_size * 2 && temp_downsize < 2) {
486 temp_size >>= 1;
487 temp_downsize++;
488 }
489 *downsize_coeff = temp_downsize;
490
491 /*
492 * compute resizing coefficient using the following formula:
493 * resize_coeff = M*(SI -1)/(SO - 1)
494 * where M = 2^13, SI - input size, SO - output size
495 */
496 *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
497 if (*resize_coeff >= 16384L) {
498 dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
499 *resize_coeff = 0x3FFF;
500 }
501
502 dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
503 "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
504 *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
505 ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
506
507 return 0;
508}
509
510static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
511{
512 switch (fmt) {
513 case IPU_PIX_FMT_RGB565:
514 case IPU_PIX_FMT_BGR24:
515 case IPU_PIX_FMT_RGB24:
516 case IPU_PIX_FMT_BGR32:
517 case IPU_PIX_FMT_RGB32:
518 return IPU_COLORSPACE_RGB;
519 default:
520 return IPU_COLORSPACE_YCBCR;
521 }
522}
523
524static int ipu_ic_init_prpenc(struct ipu *ipu,
525 union ipu_channel_param *params, bool src_is_csi)
526{
527 uint32_t reg, ic_conf;
528 uint32_t downsize_coeff, resize_coeff;
529 enum ipu_color_space in_fmt, out_fmt;
530
531 /* Setup vertical resizing */
532 calc_resize_coeffs(params->video.in_height,
533 params->video.out_height,
534 &resize_coeff, &downsize_coeff);
535 reg = (downsize_coeff << 30) | (resize_coeff << 16);
536
537 /* Setup horizontal resizing */
538 calc_resize_coeffs(params->video.in_width,
539 params->video.out_width,
540 &resize_coeff, &downsize_coeff);
541 reg |= (downsize_coeff << 14) | resize_coeff;
542
543 /* Setup color space conversion */
544 in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
545 out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
546
547 /*
548 * Colourspace conversion unsupported yet - see _init_csc() in
549 * Freescale sources
550 */
551 if (in_fmt != out_fmt) {
552 dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
553 return -EOPNOTSUPP;
554 }
555
556 idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
557
558 ic_conf = idmac_read_icreg(ipu, IC_CONF);
559
560 if (src_is_csi)
561 ic_conf &= ~IC_CONF_RWS_EN;
562 else
563 ic_conf |= IC_CONF_RWS_EN;
564
565 idmac_write_icreg(ipu, ic_conf, IC_CONF);
566
567 return 0;
568}
569
570static uint32_t dma_param_addr(uint32_t dma_ch)
571{
572 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4);
574};
575
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio)
578{
579 u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
580
581 if (prio)
582 reg |= 1UL << channel;
583 else
584 reg &= ~(1UL << channel);
585
586 idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
587
588 dump_idmac_reg(ipu);
589}
590
591static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
592{
593 uint32_t mask;
594
595 switch (channel) {
596 case IDMAC_IC_0:
597 case IDMAC_IC_7:
598 mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
599 break;
600 case IDMAC_SDC_0:
601 case IDMAC_SDC_1:
602 mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
603 break;
604 default:
605 mask = 0;
606 break;
607 }
608
609 return mask;
610}
611
612/**
613 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID.
615 * @return: 0 on success or negative error code on failure.
616 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
618{
619 struct ipu *ipu = to_ipu(idmac);
620 enum ipu_channel channel = ichan->dma_chan.chan_id;
621 uint32_t reg;
622 unsigned long flags;
623
624 spin_lock_irqsave(&ipu->lock, flags);
625
626 /* Reset to buffer 0 */
627 idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
628 ichan->active_buffer = 0;
629 ichan->status = IPU_CHANNEL_ENABLED;
630
631 switch (channel) {
632 case IDMAC_SDC_0:
633 case IDMAC_SDC_1:
634 case IDMAC_IC_7:
635 ipu_channel_set_priority(ipu, channel, true);
636 default:
637 break;
638 }
639
640 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
641
642 idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
643
644 ipu_ic_enable_task(ipu, channel);
645
646 spin_unlock_irqrestore(&ipu->lock, flags);
647 return 0;
648}
649
650/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels.
656 * @stride: stride length of buffer in pixels.
657 * @rot_mode: rotation mode of buffer. A rotation setting other than
658 * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
659 * rotation channels.
660 * @phyaddr_0: buffer 0 physical address.
661 * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
662 * NULL enables double buffering mode.
663 * @return: 0 on success or negative error code on failure.
664 */
665static int ipu_init_channel_buffer(struct idmac_channel *ichan,
666 enum pixel_fmt pixel_fmt,
667 uint16_t width, uint16_t height,
668 uint32_t stride,
669 enum ipu_rotate_mode rot_mode,
670 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
671{
672 enum ipu_channel channel = ichan->dma_chan.chan_id;
673 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
674 struct ipu *ipu = to_ipu(idmac);
675 union chan_param_mem params = {};
676 unsigned long flags;
677 uint32_t reg;
678 uint32_t stride_bytes;
679
680 stride_bytes = stride * bytes_per_pixel(pixel_fmt);
681
682 if (stride_bytes % 4) {
683 dev_err(ipu->dev,
684 "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
685 stride, stride_bytes);
686 return -EINVAL;
687 }
688
689 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL;
693 }
694
695 /* Build parameter memory data for DMA channel */
696 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
697 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
698 ipu_ch_param_set_rotation(&params, rot_mode);
699 /* Some channels (rotation) have restriction on burst length */
700 switch (channel) {
701 case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
702 invalid - Table 44-30 */
703/*
704 ipu_ch_param_set_burst_size(&params, 8);
705 */
706 break;
707 case IDMAC_SDC_0:
708 case IDMAC_SDC_1:
709 /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
710 ipu_ch_param_set_burst_size(&params, 16);
711 break;
712 case IDMAC_IC_0:
713 default:
714 break;
715 }
716
717 spin_lock_irqsave(&ipu->lock, flags);
718
719 ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
720
721 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
722
723 if (phyaddr_1)
724 reg |= 1UL << channel;
725 else
726 reg &= ~(1UL << channel);
727
728 idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
729
730 ichan->status = IPU_CHANNEL_READY;
731
732 spin_unlock_irqrestore(ipu->lock, flags);
733
734 return 0;
735}
736
737/**
738 * ipu_select_buffer() - mark a channel's buffer as ready.
739 * @channel: channel ID.
740 * @buffer_n: buffer number to mark ready.
741 */
742static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
743{
744 /* No locking - this is a write-one-to-set register, cleared by IPU */
745 if (buffer_n == 0)
746 /* Mark buffer 0 as ready. */
747 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
748 else
749 /* Mark buffer 1 as ready. */
750 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
751}
752
753/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID.
756 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address.
759 * @return: Returns 0 on success or negative error code on failure. This
760 * function will fail if the buffer is set to ready.
761 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel,
764 int buffer_n, dma_addr_t phyaddr)
765{
766 uint32_t reg;
767 unsigned long flags;
768
769 spin_lock_irqsave(&ipu_data.lock, flags);
770
771 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags);
775 return -EACCES;
776 }
777
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
779 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
780 0x0008UL, IPU_IMA_ADDR);
781 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
782 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags);
786 return -EACCES;
787 }
788
789 /* Check if double-buffering is already enabled */
790 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
791
792 if (!(reg & (1UL << channel)))
793 idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
794 IPU_CHA_DB_MODE_SEL);
795
796 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
797 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
798 0x0009UL, IPU_IMA_ADDR);
799 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
800 }
801
802 spin_unlock_irqrestore(&ipu_data.lock, flags);
803
804 return 0;
805}
806
807/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc)
810{
811 struct scatterlist *sg;
812 int i, ret = 0;
813
814 for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
815 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg;
817
818 /*
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0)
828 return ret;
829
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg);
833 }
834 }
835
836 return ret;
837}
838
839static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
840{
841 struct idmac_tx_desc *desc = to_tx_desc(tx);
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac);
845 dma_cookie_t cookie;
846 unsigned long flags;
847
848 /* Sanity check */
849 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device,
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY;
854 }
855
856 mutex_lock(&ichan->chan_mutex);
857
858 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video;
860 /*
861 * Initial buffer assignment - the first two sg-entries from
862 * the descriptor will end up in the IDMAC buffers
863 */
864 dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
865 sg_dma_address(&desc->sg[1]);
866
867 WARN_ON(ichan->sg[0] || ichan->sg[1]);
868
869 cookie = ipu_init_channel_buffer(ichan,
870 video->out_pixel_fmt,
871 video->out_width,
872 video->out_height,
873 video->out_stride,
874 IPU_ROTATE_NONE,
875 sg_dma_address(&desc->sg[0]),
876 dma_1);
877 if (cookie < 0)
878 goto out;
879 }
880
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891
892 cookie = ichan->dma_chan.cookie;
893
894 if (++cookie < 0)
895 cookie = 1;
896
897 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie;
900 spin_lock_irqsave(&ichan->lock, flags);
901 list_add_tail(&desc->list, &ichan->queue);
902 spin_unlock_irqrestore(&ichan->lock, flags);
903
904 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) {
907 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags);
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 }
914 }
915
916 dump_idmac_reg(ipu);
917
918out:
919 mutex_unlock(&ichan->chan_mutex);
920
921 return cookie;
922}
923
924/* Called with ichan->chan_mutex held */
925static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
926{
927 struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
928 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
929
930 if (!desc)
931 return -ENOMEM;
932
933 /* No interrupts, just disable the tasklet for a moment */
934 tasklet_disable(&to_ipu(idmac)->tasklet);
935
936 ichan->n_tx_desc = n;
937 ichan->desc = desc;
938 INIT_LIST_HEAD(&ichan->queue);
939 INIT_LIST_HEAD(&ichan->free_list);
940
941 while (n--) {
942 struct dma_async_tx_descriptor *txd = &desc->txd;
943
944 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949
950 list_add(&desc->list, &ichan->free_list);
951
952 desc++;
953 }
954
955 tasklet_enable(&to_ipu(idmac)->tasklet);
956
957 return 0;
958}
959
960/**
961 * ipu_init_channel() - initialize an IPU channel.
962 * @idmac: IPU DMAC context.
963 * @ichan: pointer to the channel object.
964 * @return 0 on success or negative error code on failure.
965 */
966static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
967{
968 union ipu_channel_param *params = &ichan->params;
969 uint32_t ipu_conf;
970 enum ipu_channel channel = ichan->dma_chan.chan_id;
971 unsigned long flags;
972 uint32_t reg;
973 struct ipu *ipu = to_ipu(idmac);
974 int ret = 0, n_desc = 0;
975
976 dev_dbg(ipu->dev, "init channel = %d\n", channel);
977
978 if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
979 channel != IDMAC_IC_7)
980 return -EINVAL;
981
982 spin_lock_irqsave(&ipu->lock, flags);
983
984 switch (channel) {
985 case IDMAC_IC_7:
986 n_desc = 16;
987 reg = idmac_read_icreg(ipu, IC_CONF);
988 idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
989 break;
990 case IDMAC_IC_0:
991 n_desc = 16;
992 reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
993 idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
994 ret = ipu_ic_init_prpenc(ipu, params, true);
995 break;
996 case IDMAC_SDC_0:
997 case IDMAC_SDC_1:
998 n_desc = 4;
999 default:
1000 break;
1001 }
1002
1003 ipu->channel_init_mask |= 1L << channel;
1004
1005 /* Enable IPU sub module */
1006 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
1007 ipu_channel_conf_mask(channel);
1008 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1009
1010 spin_unlock_irqrestore(&ipu->lock, flags);
1011
1012 if (n_desc && !ichan->desc)
1013 ret = idmac_desc_alloc(ichan, n_desc);
1014
1015 dump_idmac_reg(ipu);
1016
1017 return ret;
1018}
1019
1020/**
1021 * ipu_uninit_channel() - uninitialize an IPU channel.
1022 * @idmac: IPU DMAC context.
1023 * @ichan: pointer to the channel object.
1024 */
1025static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
1026{
1027 enum ipu_channel channel = ichan->dma_chan.chan_id;
1028 unsigned long flags;
1029 uint32_t reg;
1030 unsigned long chan_mask = 1UL << channel;
1031 uint32_t ipu_conf;
1032 struct ipu *ipu = to_ipu(idmac);
1033
1034 spin_lock_irqsave(&ipu->lock, flags);
1035
1036 if (!(ipu->channel_init_mask & chan_mask)) {
1037 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1038 channel);
1039 spin_unlock_irqrestore(&ipu->lock, flags);
1040 return;
1041 }
1042
1043 /* Reset the double buffer */
1044 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
1045 idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
1046
1047 ichan->sec_chan_en = false;
1048
1049 switch (channel) {
1050 case IDMAC_IC_7:
1051 reg = idmac_read_icreg(ipu, IC_CONF);
1052 idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
1053 IC_CONF);
1054 break;
1055 case IDMAC_IC_0:
1056 reg = idmac_read_icreg(ipu, IC_CONF);
1057 idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
1058 IC_CONF);
1059 break;
1060 case IDMAC_SDC_0:
1061 case IDMAC_SDC_1:
1062 default:
1063 break;
1064 }
1065
1066 ipu->channel_init_mask &= ~(1L << channel);
1067
1068 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
1069 ~ipu_channel_conf_mask(channel);
1070 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1071
1072 spin_unlock_irqrestore(&ipu->lock, flags);
1073
1074 ichan->n_tx_desc = 0;
1075 vfree(ichan->desc);
1076 ichan->desc = NULL;
1077}
1078
1079/**
1080 * ipu_disable_channel() - disable an IPU channel.
1081 * @idmac: IPU DMAC context.
1082 * @ichan: channel object pointer.
1083 * @wait_for_stop: flag to set whether to wait for channel end of frame or
1084 * return immediately.
1085 * @return: 0 on success or negative error code on failure.
1086 */
1087static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1088 bool wait_for_stop)
1089{
1090 enum ipu_channel channel = ichan->dma_chan.chan_id;
1091 struct ipu *ipu = to_ipu(idmac);
1092 uint32_t reg;
1093 unsigned long flags;
1094 unsigned long chan_mask = 1UL << channel;
1095 unsigned int timeout;
1096
1097 if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
1098 timeout = 40;
1099 /* This waiting always fails. Related to spurious irq problem */
1100 while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
1101 (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
1102 timeout--;
1103 msleep(10);
1104
1105 if (!timeout) {
1106 dev_dbg(ipu->dev,
1107 "Warning: timeout waiting for channel %u to "
1108 "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1109 "busy = 0x%08X, tstat = 0x%08X\n", channel,
1110 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
1111 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
1112 idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
1113 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
1114 break;
1115 }
1116 }
1117 dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
1118 }
1119 /* SDC BG and FG must be disabled before DMA is disabled */
1120 if (wait_for_stop && (channel == IDMAC_SDC_0 ||
1121 channel == IDMAC_SDC_1)) {
1122 for (timeout = 5;
1123 timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
1124 msleep(5);
1125 }
1126
1127 spin_lock_irqsave(&ipu->lock, flags);
1128
1129 /* Disable IC task */
1130 ipu_ic_disable_task(ipu, channel);
1131
1132 /* Disable DMA channel(s) */
1133 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1134 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1135
1136 /*
1137 * Problem (observed with channel DMAIC_7): after enabling the channel
1138 * and initialising buffers, there comes an interrupt with current still
1139 * pointing at buffer 0, whereas it should use buffer 0 first and only
1140 * generate an interrupt when it is done, then current should already
1141 * point to buffer 1. This spurious interrupt also comes on channel
1142 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1143 * first interrupt, there comes the second with current correctly
1144 * pointing to buffer 1 this time. But sometimes this second interrupt
1145 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1146 * the channel seems to prevent the channel from hanging, but it doesn't
1147 * prevent the spurious interrupt. This might also be unsafe. Think
1148 * about the IDMAC controller trying to switch to a buffer, when we
1149 * clear the ready bit, and re-enable it a moment later.
1150 */
1151 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1152 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1153 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1154
1155 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1156 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1157 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1158
1159 spin_unlock_irqrestore(&ipu->lock, flags);
1160
1161 return 0;
1162}
1163
1164/*
1165 * We have several possibilities here:
1166 * current BUF next BUF
1167 *
1168 * not last sg next not last sg
1169 * not last sg next last sg
1170 * last sg first sg from next descriptor
1171 * last sg NULL
1172 *
1173 * Besides, the descriptor queue might be empty or not. We process all these
1174 * cases carefully.
1175 */
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{
1178 struct idmac_channel *ichan = dev_id;
1179 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew;
1183 dma_async_tx_callback callback;
1184 void *callback_param;
1185 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191
1192 pr_debug("IDMAC irq %d\n", irq);
1193 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100;
1199
1200 /* This doesn't help. See comment in ipu_disable_channel() */
1201 while (--i) {
1202 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1203 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1204 break;
1205 cpu_relax();
1206 }
1207
1208 if (!i) {
1209 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev,
1211 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE;
1215 }
1216 }
1217
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) {
1221 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev,
1223 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1);
1226 return IRQ_NONE;
1227 }
1228
1229 if (unlikely(list_empty(&ichan->queue))) {
1230 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev,
1232 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1);
1235 return IRQ_NONE;
1236 }
1237
1238 /*
1239 * active_buffer is a software flag, it shows which buffer we are
1240 * currently expecting back from the hardware, IDMAC should be
1241 * processing the other buffer already
1242 */
1243 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer];
1245
1246 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty
1249 */
1250 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) {
1252 dev_err(ichan->dma_chan.device->dev,
1253 "Broken buffer-update locking on channel %x!\n",
1254 chan_id);
1255 /* We'll let the user catch up */
1256 } else {
1257 /* Underrun */
1258 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev,
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */
1263 }
1264 }
1265
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1267
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280
1281 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /*
1283 * Last element in scatterlist done, remove from the queue,
1284 * _init for debugging
1285 */
1286 list_del_init(&desc->list);
1287 done = true;
1288 }
1289
1290 *sg = sgnew;
1291
1292 if (likely(sgnew)) {
1293 int ret;
1294
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
1296 sg_dma_address(*sg));
1297 if (ret < 0)
1298 dev_err(ichan->dma_chan.device->dev,
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 }
1304
1305 /* Flip the active buffer - even if update above failed */
1306 ichan->active_buffer = !ichan->active_buffer;
1307 if (done)
1308 ichan->completed = desc->txd.cookie;
1309
1310 callback = desc->txd.callback;
1311 callback_param = desc->txd.callback_param;
1312
1313 spin_unlock(&ichan->lock);
1314
1315 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
1316 callback(callback_param);
1317
1318 return IRQ_HANDLED;
1319}
1320
1321static void ipu_gc_tasklet(unsigned long arg)
1322{
1323 struct ipu *ipu = (struct ipu *)arg;
1324 int i;
1325
1326 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1327 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc;
1329 unsigned long flags;
1330 int j;
1331
1332 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list);
1337 async_tx_clear_ack(&desc->txd);
1338 }
1339 spin_unlock_irqrestore(&ichan->lock, flags);
1340 }
1341 }
1342}
1343
1344/*
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags)
1354{
1355 struct idmac_channel *ichan = to_idmac_chan(chan);
1356 struct idmac_tx_desc *desc = NULL;
1357 struct dma_async_tx_descriptor *txd = NULL;
1358 unsigned long flags;
1359
1360 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7)
1363 return NULL;
1364
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
1366 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1367 return NULL;
1368 }
1369
1370 mutex_lock(&ichan->chan_mutex);
1371
1372 spin_lock_irqsave(&ichan->lock, flags);
1373 if (!list_empty(&ichan->free_list)) {
1374 desc = list_entry(ichan->free_list.next,
1375 struct idmac_tx_desc, list);
1376
1377 list_del_init(&desc->list);
1378
1379 desc->sg_len = sg_len;
1380 desc->sg = sgl;
1381 txd = &desc->txd;
1382 txd->flags = tx_flags;
1383 }
1384 spin_unlock_irqrestore(&ichan->lock, flags);
1385
1386 mutex_unlock(&ichan->chan_mutex);
1387
1388 tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
1389
1390 return txd;
1391}
1392
1393/* Re-select the current buffer and re-activate the channel */
1394static void idmac_issue_pending(struct dma_chan *chan)
1395{
1396 struct idmac_channel *ichan = to_idmac_chan(chan);
1397 struct idmac *idmac = to_idmac(chan->device);
1398 struct ipu *ipu = to_ipu(idmac);
1399 unsigned long flags;
1400
1401 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags);
1405
1406 /*
1407 * Might need to perform some parts of initialisation from
1408 * ipu_enable_channel(), but not all, we do not want to reset to buffer
1409 * 0, don't need to set priority again either, but re-enabling the task
1410 * and the channel might be a good idea.
1411 */
1412}
1413
1414static void __idmac_terminate_all(struct dma_chan *chan)
1415{
1416 struct idmac_channel *ichan = to_idmac_chan(chan);
1417 struct idmac *idmac = to_idmac(chan->device);
1418 unsigned long flags;
1419 int i;
1420
1421 ipu_disable_channel(idmac, ichan,
1422 ichan->status >= IPU_CHANNEL_ENABLED);
1423
1424 tasklet_disable(&to_ipu(idmac)->tasklet);
1425
1426 /* ichan->queue is modified in ISR, have to spinlock */
1427 spin_lock_irqsave(&ichan->lock, flags);
1428 list_splice_init(&ichan->queue, &ichan->free_list);
1429
1430 if (ichan->desc)
1431 for (i = 0; i < ichan->n_tx_desc; i++) {
1432 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list,
1436 &ichan->free_list);
1437
1438 async_tx_clear_ack(&desc->txd);
1439 }
1440
1441 ichan->sg[0] = NULL;
1442 ichan->sg[1] = NULL;
1443 spin_unlock_irqrestore(&ichan->lock, flags);
1444
1445 tasklet_enable(&to_ipu(idmac)->tasklet);
1446
1447 ichan->status = IPU_CHANNEL_INITIALIZED;
1448}
1449
1450static void idmac_terminate_all(struct dma_chan *chan)
1451{
1452 struct idmac_channel *ichan = to_idmac_chan(chan);
1453
1454 mutex_lock(&ichan->chan_mutex);
1455
1456 __idmac_terminate_all(chan);
1457
1458 mutex_unlock(&ichan->chan_mutex);
1459}
1460
1461static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{
1463 struct idmac_channel *ichan = to_idmac_chan(chan);
1464 struct idmac *idmac = to_idmac(chan->device);
1465 int ret;
1466
1467 /* dmaengine.c now guarantees to only offer free channels */
1468 BUG_ON(chan->client_count > 1);
1469 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1470
1471 chan->cookie = 1;
1472 ichan->completed = -ENXIO;
1473
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id);
1475 if (ret < 0)
1476 goto eimap;
1477
1478 ichan->eof_irq = ret;
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan);
1481 if (ret < 0)
1482 goto erirq;
1483
1484 ret = ipu_init_channel(idmac, ichan);
1485 if (ret < 0)
1486 goto eichan;
1487
1488 ichan->status = IPU_CHANNEL_INITIALIZED;
1489
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq);
1492
1493 return ret;
1494
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id);
1499eimap:
1500 return ret;
1501}
1502
1503static void idmac_free_chan_resources(struct dma_chan *chan)
1504{
1505 struct idmac_channel *ichan = to_idmac_chan(chan);
1506 struct idmac *idmac = to_idmac(chan->device);
1507
1508 mutex_lock(&ichan->chan_mutex);
1509
1510 __idmac_terminate_all(chan);
1511
1512 if (ichan->status > IPU_CHANNEL_FREE) {
1513 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id);
1515 }
1516
1517 ichan->status = IPU_CHANNEL_FREE;
1518
1519 ipu_uninit_channel(idmac, ichan);
1520
1521 mutex_unlock(&ichan->chan_mutex);
1522
1523 tasklet_schedule(&to_ipu(idmac)->tasklet);
1524}
1525
1526static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
1527 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
1528{
1529 struct idmac_channel *ichan = to_idmac_chan(chan);
1530
1531 if (done)
1532 *done = ichan->completed;
1533 if (used)
1534 *used = chan->cookie;
1535 if (cookie != chan->cookie)
1536 return DMA_ERROR;
1537 return DMA_SUCCESS;
1538}
1539
1540static int __init ipu_idmac_init(struct ipu *ipu)
1541{
1542 struct idmac *idmac = &ipu->idmac;
1543 struct dma_device *dma = &idmac->dma;
1544 int i;
1545
1546 dma_cap_set(DMA_SLAVE, dma->cap_mask);
1547 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1548
1549 /* Compulsory common fields */
1550 dma->dev = ipu->dev;
1551 dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
1552 dma->device_free_chan_resources = idmac_free_chan_resources;
1553 dma->device_is_tx_complete = idmac_is_tx_complete;
1554 dma->device_issue_pending = idmac_issue_pending;
1555
1556 /* Compulsory for DMA_SLAVE fields */
1557 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1558 dma->device_terminate_all = idmac_terminate_all;
1559
1560 INIT_LIST_HEAD(&dma->channels);
1561 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1562 struct idmac_channel *ichan = ipu->channel + i;
1563 struct dma_chan *dma_chan = &ichan->dma_chan;
1564
1565 spin_lock_init(&ichan->lock);
1566 mutex_init(&ichan->chan_mutex);
1567
1568 ichan->status = IPU_CHANNEL_FREE;
1569 ichan->sec_chan_en = false;
1570 ichan->completed = -ENXIO;
1571 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1572
1573 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
1577 }
1578
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
1580
1581 return dma_async_device_register(&idmac->dma);
1582}
1583
1584static void ipu_idmac_exit(struct ipu *ipu)
1585{
1586 int i;
1587 struct idmac *idmac = &ipu->idmac;
1588
1589 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1590 struct idmac_channel *ichan = ipu->channel + i;
1591
1592 idmac_terminate_all(&ichan->dma_chan);
1593 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
1594 }
1595
1596 dma_async_device_unregister(&idmac->dma);
1597}
1598
1599/*****************************************************************************
1600 * IPU common probe / remove
1601 */
1602
1603static int ipu_probe(struct platform_device *pdev)
1604{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic;
1607 int ret;
1608
1609 spin_lock_init(&ipu_data.lock);
1610
1611 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1612 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1613 if (!pdata || !mem_ipu || !mem_ic)
1614 return -EINVAL;
1615
1616 ipu_data.dev = &pdev->dev;
1617
1618 platform_set_drvdata(pdev, &ipu_data);
1619
1620 ret = platform_get_irq(pdev, 0);
1621 if (ret < 0)
1622 goto err_noirq;
1623
1624 ipu_data.irq_fn = ret;
1625 ret = platform_get_irq(pdev, 1);
1626 if (ret < 0)
1627 goto err_noirq;
1628
1629 ipu_data.irq_err = ret;
1630 ipu_data.irq_base = pdata->irq_base;
1631
1632 dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n",
1633 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
1634
1635 /* Remap IPU common registers */
1636 ipu_data.reg_ipu = ioremap(mem_ipu->start,
1637 mem_ipu->end - mem_ipu->start + 1);
1638 if (!ipu_data.reg_ipu) {
1639 ret = -ENOMEM;
1640 goto err_ioremap_ipu;
1641 }
1642
1643 /* Remap Image Converter and Image DMA Controller registers */
1644 ipu_data.reg_ic = ioremap(mem_ic->start,
1645 mem_ic->end - mem_ic->start + 1);
1646 if (!ipu_data.reg_ic) {
1647 ret = -ENOMEM;
1648 goto err_ioremap_ic;
1649 }
1650
1651 /* Get IPU clock */
1652 ipu_data.ipu_clk = clk_get(&pdev->dev, "ipu_clk");
1653 if (IS_ERR(ipu_data.ipu_clk)) {
1654 ret = PTR_ERR(ipu_data.ipu_clk);
1655 goto err_clk_get;
1656 }
1657
1658 /* Make sure IPU HSP clock is running */
1659 clk_enable(ipu_data.ipu_clk);
1660
1661 /* Disable all interrupts */
1662 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
1663 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
1664 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
1665 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
1666 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
1667
1668 dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
1669 (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
1670
1671 ret = ipu_irq_attach_irq(&ipu_data, pdev);
1672 if (ret < 0)
1673 goto err_attach_irq;
1674
1675 /* Initialize DMA engine */
1676 ret = ipu_idmac_init(&ipu_data);
1677 if (ret < 0)
1678 goto err_idmac_init;
1679
1680 tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
1681
1682 ipu_data.dev = &pdev->dev;
1683
1684 dev_dbg(ipu_data.dev, "IPU initialized\n");
1685
1686 return 0;
1687
1688err_idmac_init:
1689err_attach_irq:
1690 ipu_irq_detach_irq(&ipu_data, pdev);
1691 clk_disable(ipu_data.ipu_clk);
1692 clk_put(ipu_data.ipu_clk);
1693err_clk_get:
1694 iounmap(ipu_data.reg_ic);
1695err_ioremap_ic:
1696 iounmap(ipu_data.reg_ipu);
1697err_ioremap_ipu:
1698err_noirq:
1699 dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
1700 return ret;
1701}
1702
1703static int ipu_remove(struct platform_device *pdev)
1704{
1705 struct ipu *ipu = platform_get_drvdata(pdev);
1706
1707 ipu_idmac_exit(ipu);
1708 ipu_irq_detach_irq(ipu, pdev);
1709 clk_disable(ipu->ipu_clk);
1710 clk_put(ipu->ipu_clk);
1711 iounmap(ipu->reg_ic);
1712 iounmap(ipu->reg_ipu);
1713 tasklet_kill(&ipu->tasklet);
1714 platform_set_drvdata(pdev, NULL);
1715
1716 return 0;
1717}
1718
1719/*
1720 * We need two MEM resources - with IPU-common and Image Converter registers,
1721 * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
1722 */
1723static struct platform_driver ipu_platform_driver = {
1724 .driver = {
1725 .name = "ipu-core",
1726 .owner = THIS_MODULE,
1727 },
1728 .remove = ipu_remove,
1729};
1730
1731static int __init ipu_init(void)
1732{
1733 return platform_driver_probe(&ipu_platform_driver, ipu_probe);
1734}
1735subsys_initcall(ipu_init);
1736
1737MODULE_DESCRIPTION("IPU core driver");
1738MODULE_LICENSE("GPL v2");
1739MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1740MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
new file mode 100644
index 000000000000..545cf11a94ab
--- /dev/null
+++ b/drivers/dma/ipu/ipu_intern.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _IPU_INTERN_H_
13#define _IPU_INTERN_H_
14
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/interrupt.h>
18
19/* IPU Common registers */
20#define IPU_CONF 0x00
21#define IPU_CHA_BUF0_RDY 0x04
22#define IPU_CHA_BUF1_RDY 0x08
23#define IPU_CHA_DB_MODE_SEL 0x0C
24#define IPU_CHA_CUR_BUF 0x10
25#define IPU_FS_PROC_FLOW 0x14
26#define IPU_FS_DISP_FLOW 0x18
27#define IPU_TASKS_STAT 0x1C
28#define IPU_IMA_ADDR 0x20
29#define IPU_IMA_DATA 0x24
30#define IPU_INT_CTRL_1 0x28
31#define IPU_INT_CTRL_2 0x2C
32#define IPU_INT_CTRL_3 0x30
33#define IPU_INT_CTRL_4 0x34
34#define IPU_INT_CTRL_5 0x38
35#define IPU_INT_STAT_1 0x3C
36#define IPU_INT_STAT_2 0x40
37#define IPU_INT_STAT_3 0x44
38#define IPU_INT_STAT_4 0x48
39#define IPU_INT_STAT_5 0x4C
40#define IPU_BRK_CTRL_1 0x50
41#define IPU_BRK_CTRL_2 0x54
42#define IPU_BRK_STAT 0x58
43#define IPU_DIAGB_CTRL 0x5C
44
45/* IPU_CONF Register bits */
46#define IPU_CONF_CSI_EN 0x00000001
47#define IPU_CONF_IC_EN 0x00000002
48#define IPU_CONF_ROT_EN 0x00000004
49#define IPU_CONF_PF_EN 0x00000008
50#define IPU_CONF_SDC_EN 0x00000010
51#define IPU_CONF_ADC_EN 0x00000020
52#define IPU_CONF_DI_EN 0x00000040
53#define IPU_CONF_DU_EN 0x00000080
54#define IPU_CONF_PXL_ENDIAN 0x00000100
55
56/* Image Converter Registers */
57#define IC_CONF 0x88
58#define IC_PRP_ENC_RSC 0x8C
59#define IC_PRP_VF_RSC 0x90
60#define IC_PP_RSC 0x94
61#define IC_CMBP_1 0x98
62#define IC_CMBP_2 0x9C
63#define PF_CONF 0xA0
64#define IDMAC_CONF 0xA4
65#define IDMAC_CHA_EN 0xA8
66#define IDMAC_CHA_PRI 0xAC
67#define IDMAC_CHA_BUSY 0xB0
68
69/* Image Converter Register bits */
70#define IC_CONF_PRPENC_EN 0x00000001
71#define IC_CONF_PRPENC_CSC1 0x00000002
72#define IC_CONF_PRPENC_ROT_EN 0x00000004
73#define IC_CONF_PRPVF_EN 0x00000100
74#define IC_CONF_PRPVF_CSC1 0x00000200
75#define IC_CONF_PRPVF_CSC2 0x00000400
76#define IC_CONF_PRPVF_CMB 0x00000800
77#define IC_CONF_PRPVF_ROT_EN 0x00001000
78#define IC_CONF_PP_EN 0x00010000
79#define IC_CONF_PP_CSC1 0x00020000
80#define IC_CONF_PP_CSC2 0x00040000
81#define IC_CONF_PP_CMB 0x00080000
82#define IC_CONF_PP_ROT_EN 0x00100000
83#define IC_CONF_IC_GLB_LOC_A 0x10000000
84#define IC_CONF_KEY_COLOR_EN 0x20000000
85#define IC_CONF_RWS_EN 0x40000000
86#define IC_CONF_CSI_MEM_WR_EN 0x80000000
87
88#define IDMA_CHAN_INVALID 0x000000FF
89#define IDMA_IC_0 0x00000001
90#define IDMA_IC_1 0x00000002
91#define IDMA_IC_2 0x00000004
92#define IDMA_IC_3 0x00000008
93#define IDMA_IC_4 0x00000010
94#define IDMA_IC_5 0x00000020
95#define IDMA_IC_6 0x00000040
96#define IDMA_IC_7 0x00000080
97#define IDMA_IC_8 0x00000100
98#define IDMA_IC_9 0x00000200
99#define IDMA_IC_10 0x00000400
100#define IDMA_IC_11 0x00000800
101#define IDMA_IC_12 0x00001000
102#define IDMA_IC_13 0x00002000
103#define IDMA_SDC_BG 0x00004000
104#define IDMA_SDC_FG 0x00008000
105#define IDMA_SDC_MASK 0x00010000
106#define IDMA_SDC_PARTIAL 0x00020000
107#define IDMA_ADC_SYS1_WR 0x00040000
108#define IDMA_ADC_SYS2_WR 0x00080000
109#define IDMA_ADC_SYS1_CMD 0x00100000
110#define IDMA_ADC_SYS2_CMD 0x00200000
111#define IDMA_ADC_SYS1_RD 0x00400000
112#define IDMA_ADC_SYS2_RD 0x00800000
113#define IDMA_PF_QP 0x01000000
114#define IDMA_PF_BSP 0x02000000
115#define IDMA_PF_Y_IN 0x04000000
116#define IDMA_PF_U_IN 0x08000000
117#define IDMA_PF_V_IN 0x10000000
118#define IDMA_PF_Y_OUT 0x20000000
119#define IDMA_PF_U_OUT 0x40000000
120#define IDMA_PF_V_OUT 0x80000000
121
122#define TSTAT_PF_H264_PAUSE 0x00000001
123#define TSTAT_CSI2MEM_MASK 0x0000000C
124#define TSTAT_CSI2MEM_OFFSET 2
125#define TSTAT_VF_MASK 0x00000600
126#define TSTAT_VF_OFFSET 9
127#define TSTAT_VF_ROT_MASK 0x000C0000
128#define TSTAT_VF_ROT_OFFSET 18
129#define TSTAT_ENC_MASK 0x00000180
130#define TSTAT_ENC_OFFSET 7
131#define TSTAT_ENC_ROT_MASK 0x00030000
132#define TSTAT_ENC_ROT_OFFSET 16
133#define TSTAT_PP_MASK 0x00001800
134#define TSTAT_PP_OFFSET 11
135#define TSTAT_PP_ROT_MASK 0x00300000
136#define TSTAT_PP_ROT_OFFSET 20
137#define TSTAT_PF_MASK 0x00C00000
138#define TSTAT_PF_OFFSET 22
139#define TSTAT_ADCSYS1_MASK 0x03000000
140#define TSTAT_ADCSYS1_OFFSET 24
141#define TSTAT_ADCSYS2_MASK 0x0C000000
142#define TSTAT_ADCSYS2_OFFSET 26
143
144#define TASK_STAT_IDLE 0
145#define TASK_STAT_ACTIVE 1
146#define TASK_STAT_WAIT4READY 2
147
148struct idmac {
149 struct dma_device dma;
150};
151
152struct ipu {
153 void __iomem *reg_ipu;
154 void __iomem *reg_ic;
155 unsigned int irq_fn; /* IPU Function IRQ to the CPU */
156 unsigned int irq_err; /* IPU Error IRQ to the CPU */
157 unsigned int irq_base; /* Beginning of the IPU IRQ range */
158 unsigned long channel_init_mask;
159 spinlock_t lock;
160 struct clk *ipu_clk;
161 struct device *dev;
162 struct idmac idmac;
163 struct idmac_channel channel[IPU_CHANNELS_NUM];
164 struct tasklet_struct tasklet;
165};
166
167#define to_idmac(d) container_of(d, struct idmac, dma)
168
169extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
170extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
171
172extern bool ipu_irq_status(uint32_t irq);
173extern int ipu_irq_map(unsigned int source);
174extern int ipu_irq_unmap(unsigned int source);
175
176#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
new file mode 100644
index 000000000000..83f532cc767f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -0,0 +1,413 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/init.h>
11#include <linux/err.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/clk.h>
15#include <linux/irq.h>
16#include <linux/io.h>
17
18#include <mach/ipu.h>
19
20#include "ipu_intern.h"
21
22/*
23 * Register read / write - shall be inlined by the compiler
24 */
25static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
26{
27 return __raw_readl(ipu->reg_ipu + reg);
28}
29
30static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
31{
32 __raw_writel(value, ipu->reg_ipu + reg);
33}
34
35
36/*
37 * IPU IRQ chip driver
38 */
39
40#define IPU_IRQ_NR_FN_BANKS 3
41#define IPU_IRQ_NR_ERR_BANKS 2
42#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
43
44struct ipu_irq_bank {
45 unsigned int control;
46 unsigned int status;
47 spinlock_t lock;
48 struct ipu *ipu;
49};
50
51static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
52 /* 3 groups of functional interrupts */
53 {
54 .control = IPU_INT_CTRL_1,
55 .status = IPU_INT_STAT_1,
56 }, {
57 .control = IPU_INT_CTRL_2,
58 .status = IPU_INT_STAT_2,
59 }, {
60 .control = IPU_INT_CTRL_3,
61 .status = IPU_INT_STAT_3,
62 },
63 /* 2 groups of error interrupts */
64 {
65 .control = IPU_INT_CTRL_4,
66 .status = IPU_INT_STAT_4,
67 }, {
68 .control = IPU_INT_CTRL_5,
69 .status = IPU_INT_STAT_5,
70 },
71};
72
73struct ipu_irq_map {
74 unsigned int irq;
75 int source;
76 struct ipu_irq_bank *bank;
77 struct ipu *ipu;
78};
79
80static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
81/* Protects allocations from the above array of maps */
82static DEFINE_MUTEX(map_lock);
83/* Protects register accesses and individual mappings */
84static DEFINE_SPINLOCK(bank_lock);
85
86static struct ipu_irq_map *src2map(unsigned int src)
87{
88 int i;
89
90 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
91 if (irq_map[i].source == src)
92 return irq_map + i;
93
94 return NULL;
95}
96
97static void ipu_irq_unmask(unsigned int irq)
98{
99 struct ipu_irq_map *map = get_irq_chip_data(irq);
100 struct ipu_irq_bank *bank;
101 uint32_t reg;
102 unsigned long lock_flags;
103
104 spin_lock_irqsave(&bank_lock, lock_flags);
105
106 bank = map->bank;
107 if (!bank) {
108 spin_unlock_irqrestore(&bank_lock, lock_flags);
109 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
110 return;
111 }
112
113 reg = ipu_read_reg(bank->ipu, bank->control);
114 reg |= (1UL << (map->source & 31));
115 ipu_write_reg(bank->ipu, reg, bank->control);
116
117 spin_unlock_irqrestore(&bank_lock, lock_flags);
118}
119
120static void ipu_irq_mask(unsigned int irq)
121{
122 struct ipu_irq_map *map = get_irq_chip_data(irq);
123 struct ipu_irq_bank *bank;
124 uint32_t reg;
125 unsigned long lock_flags;
126
127 spin_lock_irqsave(&bank_lock, lock_flags);
128
129 bank = map->bank;
130 if (!bank) {
131 spin_unlock_irqrestore(&bank_lock, lock_flags);
132 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
133 return;
134 }
135
136 reg = ipu_read_reg(bank->ipu, bank->control);
137 reg &= ~(1UL << (map->source & 31));
138 ipu_write_reg(bank->ipu, reg, bank->control);
139
140 spin_unlock_irqrestore(&bank_lock, lock_flags);
141}
142
143static void ipu_irq_ack(unsigned int irq)
144{
145 struct ipu_irq_map *map = get_irq_chip_data(irq);
146 struct ipu_irq_bank *bank;
147 unsigned long lock_flags;
148
149 spin_lock_irqsave(&bank_lock, lock_flags);
150
151 bank = map->bank;
152 if (!bank) {
153 spin_unlock_irqrestore(&bank_lock, lock_flags);
154 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
155 return;
156 }
157
158 ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
159 spin_unlock_irqrestore(&bank_lock, lock_flags);
160}
161
162/**
163 * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
164 * @irq: interrupt line to get status for.
165 * @return: true if the interrupt is pending/asserted or false if the
166 * interrupt is not pending.
167 */
168bool ipu_irq_status(unsigned int irq)
169{
170 struct ipu_irq_map *map = get_irq_chip_data(irq);
171 struct ipu_irq_bank *bank;
172 unsigned long lock_flags;
173 bool ret;
174
175 spin_lock_irqsave(&bank_lock, lock_flags);
176 bank = map->bank;
177 ret = bank && ipu_read_reg(bank->ipu, bank->status) &
178 (1UL << (map->source & 31));
179 spin_unlock_irqrestore(&bank_lock, lock_flags);
180
181 return ret;
182}
183
184/**
185 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
186 * @source: interrupt source bit position (see below)
187 * @return: mapped IRQ number or negative error code
188 *
189 * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
190 * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
191 * However, the source argument of this function is not the sequence number of
192 * the possible IRQ, but rather its bit position. So, first interrupt in fourth
193 * register has source number 96, and not 88. This makes calculations easier,
194 * and also provides forward compatibility with any future IPU implementations
195 * with any interrupt bit assignments.
196 */
197int ipu_irq_map(unsigned int source)
198{
199 int i, ret = -ENOMEM;
200 struct ipu_irq_map *map;
201
202 might_sleep();
203
204 mutex_lock(&map_lock);
205 map = src2map(source);
206 if (map) {
207 pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
208 ret = -EBUSY;
209 goto out;
210 }
211
212 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
213 if (irq_map[i].source < 0) {
214 unsigned long lock_flags;
215
216 spin_lock_irqsave(&bank_lock, lock_flags);
217 irq_map[i].source = source;
218 irq_map[i].bank = irq_bank + source / 32;
219 spin_unlock_irqrestore(&bank_lock, lock_flags);
220
221 ret = irq_map[i].irq;
222 pr_debug("IPU: mapped source %u to IRQ %u\n",
223 source, ret);
224 break;
225 }
226 }
227out:
228 mutex_unlock(&map_lock);
229
230 if (ret < 0)
231 pr_err("IPU: couldn't map source %u: %d\n", source, ret);
232
233 return ret;
234}
235
236/**
237 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
238 * @source: interrupt source bit position (see ipu_irq_map())
239 * @return: 0 or negative error code
240 */
241int ipu_irq_unmap(unsigned int source)
242{
243 int i, ret = -EINVAL;
244
245 might_sleep();
246
247 mutex_lock(&map_lock);
248 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
249 if (irq_map[i].source == source) {
250 unsigned long lock_flags;
251
252 pr_debug("IPU: unmapped source %u from IRQ %u\n",
253 source, irq_map[i].irq);
254
255 spin_lock_irqsave(&bank_lock, lock_flags);
256 irq_map[i].source = -EINVAL;
257 irq_map[i].bank = NULL;
258 spin_unlock_irqrestore(&bank_lock, lock_flags);
259
260 ret = 0;
261 break;
262 }
263 }
264 mutex_unlock(&map_lock);
265
266 return ret;
267}
268
269/* Chained IRQ handler for IPU error interrupt */
270static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
271{
272 struct ipu *ipu = get_irq_data(irq);
273 u32 status;
274 int i, line;
275
276 for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
277 struct ipu_irq_bank *bank = irq_bank + i;
278
279 spin_lock(&bank_lock);
280 status = ipu_read_reg(ipu, bank->status);
281 /*
282 * Don't think we have to clear all interrupts here, they will
283 * be acked by ->handle_irq() (handle_level_irq). However, we
284 * might want to clear unhandled interrupts after the loop...
285 */
286 status &= ipu_read_reg(ipu, bank->control);
287 spin_unlock(&bank_lock);
288 while ((line = ffs(status))) {
289 struct ipu_irq_map *map;
290
291 line--;
292 status &= ~(1UL << line);
293
294 spin_lock(&bank_lock);
295 map = src2map(32 * i + line);
296 if (map)
297 irq = map->irq;
298 spin_unlock(&bank_lock);
299
300 if (!map) {
301 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
302 line, i);
303 continue;
304 }
305 generic_handle_irq(irq);
306 }
307 }
308}
309
310/* Chained IRQ handler for IPU function interrupt */
311static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
312{
313 struct ipu *ipu = get_irq_data(irq);
314 u32 status;
315 int i, line;
316
317 for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
318 struct ipu_irq_bank *bank = irq_bank + i;
319
320 spin_lock(&bank_lock);
321 status = ipu_read_reg(ipu, bank->status);
322 /* Not clearing all interrupts, see above */
323 status &= ipu_read_reg(ipu, bank->control);
324 spin_unlock(&bank_lock);
325 while ((line = ffs(status))) {
326 struct ipu_irq_map *map;
327
328 line--;
329 status &= ~(1UL << line);
330
331 spin_lock(&bank_lock);
332 map = src2map(32 * i + line);
333 if (map)
334 irq = map->irq;
335 spin_unlock(&bank_lock);
336
337 if (!map) {
338 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
339 line, i);
340 continue;
341 }
342 generic_handle_irq(irq);
343 }
344 }
345}
346
347static struct irq_chip ipu_irq_chip = {
348 .name = "ipu_irq",
349 .ack = ipu_irq_ack,
350 .mask = ipu_irq_mask,
351 .unmask = ipu_irq_unmask,
352};
353
354/* Install the IRQ handler */
355int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{
357 struct ipu_platform_data *pdata = dev->dev.platform_data;
358 unsigned int irq, irq_base, i;
359
360 irq_base = pdata->irq_base;
361
362 for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
363 irq_bank[i].ipu = ipu;
364
365 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
366 int ret;
367
368 irq = irq_base + i;
369 ret = set_irq_chip(irq, &ipu_irq_chip);
370 if (ret < 0)
371 return ret;
372 ret = set_irq_chip_data(irq, irq_map + i);
373 if (ret < 0)
374 return ret;
375 irq_map[i].ipu = ipu;
376 irq_map[i].irq = irq;
377 irq_map[i].source = -EINVAL;
378 set_irq_handler(irq, handle_level_irq);
379#ifdef CONFIG_ARM
380 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
381#endif
382 }
383
384 set_irq_data(ipu->irq_fn, ipu);
385 set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn);
386
387 set_irq_data(ipu->irq_err, ipu);
388 set_irq_chained_handler(ipu->irq_err, ipu_irq_err);
389
390 return 0;
391}
392
393void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
394{
395 struct ipu_platform_data *pdata = dev->dev.platform_data;
396 unsigned int irq, irq_base;
397
398 irq_base = pdata->irq_base;
399
400 set_irq_chained_handler(ipu->irq_fn, NULL);
401 set_irq_data(ipu->irq_fn, NULL);
402
403 set_irq_chained_handler(ipu->irq_err, NULL);
404 set_irq_data(ipu->irq_err, NULL);
405
406 for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
407#ifdef CONFIG_ARM
408 set_irq_flags(irq, 0);
409#endif
410 set_irq_chip(irq, NULL);
411 set_irq_chip_data(irq, NULL);
412 }
413}
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d33b8252b58..14796594e5d9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,10 +33,11 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
37 36
38#if __OS_HAS_AGP 37#if __OS_HAS_AGP
39 38
39#include <asm/agp.h>
40
40/** 41/**
41 * Get AGP information. 42 * Get AGP information.
42 * 43 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5b2cbb778162..bfce0992fefb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -194,7 +194,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
194 * @type: object type 194 * @type: object type
195 * 195 *
196 * LOCKING: 196 * LOCKING:
197 * Caller must hold DRM mode_config lock.
198 * 197 *
199 * Create a unique identifier based on @ptr in @dev's identifier space. Used 198 * Create a unique identifier based on @ptr in @dev's identifier space. Used
200 * for tracking modes, CRTCs and connectors. 199 * for tracking modes, CRTCs and connectors.
@@ -209,15 +208,15 @@ static int drm_mode_object_get(struct drm_device *dev,
209 int new_id = 0; 208 int new_id = 0;
210 int ret; 209 int ret;
211 210
212 WARN(!mutex_is_locked(&dev->mode_config.mutex),
213 "%s called w/o mode_config lock\n", __func__);
214again: 211again:
215 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 212 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
216 DRM_ERROR("Ran out memory getting a mode number\n"); 213 DRM_ERROR("Ran out memory getting a mode number\n");
217 return -EINVAL; 214 return -EINVAL;
218 } 215 }
219 216
217 mutex_lock(&dev->mode_config.idr_mutex);
220 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 218 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
219 mutex_unlock(&dev->mode_config.idr_mutex);
221 if (ret == -EAGAIN) 220 if (ret == -EAGAIN)
222 goto again; 221 goto again;
223 222
@@ -239,16 +238,20 @@ again:
239static void drm_mode_object_put(struct drm_device *dev, 238static void drm_mode_object_put(struct drm_device *dev,
240 struct drm_mode_object *object) 239 struct drm_mode_object *object)
241{ 240{
241 mutex_lock(&dev->mode_config.idr_mutex);
242 idr_remove(&dev->mode_config.crtc_idr, object->id); 242 idr_remove(&dev->mode_config.crtc_idr, object->id);
243 mutex_unlock(&dev->mode_config.idr_mutex);
243} 244}
244 245
245void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 246void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
246{ 247{
247 struct drm_mode_object *obj; 248 struct drm_mode_object *obj = NULL;
248 249
250 mutex_lock(&dev->mode_config.idr_mutex);
249 obj = idr_find(&dev->mode_config.crtc_idr, id); 251 obj = idr_find(&dev->mode_config.crtc_idr, id);
250 if (!obj || (obj->type != type) || (obj->id != id)) 252 if (!obj || (obj->type != type) || (obj->id != id))
251 return NULL; 253 obj = NULL;
254 mutex_unlock(&dev->mode_config.idr_mutex);
252 255
253 return obj; 256 return obj;
254} 257}
@@ -786,6 +789,7 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
786void drm_mode_config_init(struct drm_device *dev) 789void drm_mode_config_init(struct drm_device *dev)
787{ 790{
788 mutex_init(&dev->mode_config.mutex); 791 mutex_init(&dev->mode_config.mutex);
792 mutex_init(&dev->mode_config.idr_mutex);
789 INIT_LIST_HEAD(&dev->mode_config.fb_list); 793 INIT_LIST_HEAD(&dev->mode_config.fb_list);
790 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); 794 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
791 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 795 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 5ff88d952226..14c7a23dc157 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -294,6 +294,7 @@ EXPORT_SYMBOL(drm_init);
294 */ 294 */
295static void drm_cleanup(struct drm_device * dev) 295static void drm_cleanup(struct drm_device * dev)
296{ 296{
297 struct drm_map_list *r_list, *list_temp;
297 DRM_DEBUG("\n"); 298 DRM_DEBUG("\n");
298 299
299 if (!dev) { 300 if (!dev) {
@@ -325,6 +326,9 @@ static void drm_cleanup(struct drm_device * dev)
325 drm_ht_remove(&dev->map_hash); 326 drm_ht_remove(&dev->map_hash);
326 drm_ctxbitmap_cleanup(dev); 327 drm_ctxbitmap_cleanup(dev);
327 328
329 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
330 drm_rmmap(dev, r_list->map);
331
328 if (drm_core_check_feature(dev, DRIVER_MODESET)) 332 if (drm_core_check_feature(dev, DRIVER_MODESET))
329 drm_put_minor(&dev->control); 333 drm_put_minor(&dev->control);
330 334
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0fbb0da342cb..5a4d3244758a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -660,7 +660,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
660 660
661 edid = (struct edid *)drm_ddc_read(adapter); 661 edid = (struct edid *)drm_ddc_read(adapter);
662 if (!edid) { 662 if (!edid) {
663 dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", 663 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
664 drm_get_connector_name(connector)); 664 drm_get_connector_name(connector));
665 return NULL; 665 return NULL;
666 } 666 }
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 5ca132afa4f2..46bb923b097c 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -118,12 +118,20 @@ static void drm_master_destroy(struct kref *kref)
118 struct drm_master *master = container_of(kref, struct drm_master, refcount); 118 struct drm_master *master = container_of(kref, struct drm_master, refcount);
119 struct drm_magic_entry *pt, *next; 119 struct drm_magic_entry *pt, *next;
120 struct drm_device *dev = master->minor->dev; 120 struct drm_device *dev = master->minor->dev;
121 struct drm_map_list *r_list, *list_temp;
121 122
122 list_del(&master->head); 123 list_del(&master->head);
123 124
124 if (dev->driver->master_destroy) 125 if (dev->driver->master_destroy)
125 dev->driver->master_destroy(dev, master); 126 dev->driver->master_destroy(dev, master);
126 127
128 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
129 if (r_list->master == master) {
130 drm_rmmap_locked(dev, r_list->map);
131 r_list = NULL;
132 }
133 }
134
127 if (master->unique) { 135 if (master->unique) {
128 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 136 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
129 master->unique = NULL; 137 master->unique = NULL;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index bbadf1c04142..ee64b7301f67 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -944,13 +944,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
945 0xff000000; 945 0xff000000;
946 946
947 DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); 947 if (IS_MOBILE(dev) || IS_I9XX(dev))
948
949 if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
950 dev_priv->cursor_needs_physical = true; 948 dev_priv->cursor_needs_physical = true;
951 else 949 else
952 dev_priv->cursor_needs_physical = false; 950 dev_priv->cursor_needs_physical = false;
953 951
952 if (IS_I965G(dev) || IS_G33(dev))
953 dev_priv->cursor_needs_physical = false;
954
954 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 955 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
955 if (ret) 956 if (ret)
956 goto kfree_devname; 957 goto kfree_devname;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 96316fd47233..debad5c04cc0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3364,7 +3364,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
3364{ 3364{
3365 int i; 3365 int i;
3366 3366
3367 for (i = 0; i < I915_MAX_PHYS_OBJECT; i++) 3367 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3368 i915_gem_free_phys_object(dev, i); 3368 i915_gem_free_phys_object(dev, i);
3369} 3369}
3370 3370
@@ -3427,7 +3427,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3427 ret = i915_gem_init_phys_object(dev, id, 3427 ret = i915_gem_init_phys_object(dev, id,
3428 obj->size); 3428 obj->size);
3429 if (ret) { 3429 if (ret) {
3430 DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); 3430 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3431 goto out; 3431 goto out;
3432 } 3432 }
3433 } 3433 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index a5a2f5339e9e..5ee9d4c25753 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -137,10 +137,6 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
137 chan->reg = reg; 137 chan->reg = reg;
138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); 138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140#ifndef I2C_HW_B_INTELFB
141#define I2C_HW_B_INTELFB I2C_HW_B_I810
142#endif
143 chan->adapter.id = I2C_HW_B_INTELFB;
144 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
145 chan->adapter.dev.parent = &dev->pdev->dev; 141 chan->adapter.dev.parent = &dev->pdev->dev;
146 chan->algo.setsda = set_data; 142 chan->algo.setsda = set_data;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2fafdcc108fe..6b1148fc2cbe 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -340,6 +340,18 @@ static void intel_lvds_destroy(struct drm_connector *connector)
340 kfree(connector); 340 kfree(connector);
341} 341}
342 342
343static int intel_lvds_set_property(struct drm_connector *connector,
344 struct drm_property *property,
345 uint64_t value)
346{
347 struct drm_device *dev = connector->dev;
348
349 if (property == dev->mode_config.dpms_property && connector->encoder)
350 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
351
352 return 0;
353}
354
343static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 355static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
344 .dpms = intel_lvds_dpms, 356 .dpms = intel_lvds_dpms,
345 .mode_fixup = intel_lvds_mode_fixup, 357 .mode_fixup = intel_lvds_mode_fixup,
@@ -359,6 +371,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
359 .restore = intel_lvds_restore, 371 .restore = intel_lvds_restore,
360 .detect = intel_lvds_detect, 372 .detect = intel_lvds_detect,
361 .fill_modes = drm_helper_probe_single_connector_modes, 373 .fill_modes = drm_helper_probe_single_connector_modes,
374 .set_property = intel_lvds_set_property,
362 .destroy = intel_lvds_destroy, 375 .destroy = intel_lvds_destroy,
363}; 376};
364 377
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 75089febbc13..9fee3ca17344 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data ioc_data = {
83}; 83};
84 84
85static struct i2c_adapter ioc_ops = { 85static struct i2c_adapter ioc_ops = {
86 .id = I2C_HW_B_IOC,
87 .algo_data = &ioc_data, 86 .algo_data = &ioc_data,
88}; 87};
89 88
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 9cead9b9458e..981e080b32ae 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -476,7 +476,6 @@ static const struct i2c_algorithm smbus_algorithm = {
476 476
477static struct i2c_adapter ali1535_adapter = { 477static struct i2c_adapter ali1535_adapter = {
478 .owner = THIS_MODULE, 478 .owner = THIS_MODULE,
479 .id = I2C_HW_SMBUS_ALI1535,
480 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 479 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
481 .algo = &smbus_algorithm, 480 .algo = &smbus_algorithm,
482}; 481};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index dd9e796fad69..f70f46582c6c 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -386,7 +386,6 @@ static const struct i2c_algorithm ali1563_algorithm = {
386 386
387static struct i2c_adapter ali1563_adapter = { 387static struct i2c_adapter ali1563_adapter = {
388 .owner = THIS_MODULE, 388 .owner = THIS_MODULE,
389 .id = I2C_HW_SMBUS_ALI1563,
390 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 389 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
391 .algo = &ali1563_algorithm, 390 .algo = &ali1563_algorithm,
392}; 391};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 234fdde7d40e..39066dee46e3 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -473,7 +473,6 @@ static const struct i2c_algorithm smbus_algorithm = {
473 473
474static struct i2c_adapter ali15x3_adapter = { 474static struct i2c_adapter ali15x3_adapter = {
475 .owner = THIS_MODULE, 475 .owner = THIS_MODULE,
476 .id = I2C_HW_SMBUS_ALI15X3,
477 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 476 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
478 .algo = &smbus_algorithm, 477 .algo = &smbus_algorithm,
479}; 478};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 36bee5b9c952..220f4a1eee1d 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -298,7 +298,6 @@ static const struct i2c_algorithm smbus_algorithm = {
298 298
299struct i2c_adapter amd756_smbus = { 299struct i2c_adapter amd756_smbus = {
300 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
301 .id = I2C_HW_SMBUS_AMD756,
302 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 301 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
303 .algo = &smbus_algorithm, 302 .algo = &smbus_algorithm,
304}; 303};
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 3972208876b3..edab51973bf5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -387,7 +387,6 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
387 smbus->adapter.owner = THIS_MODULE; 387 smbus->adapter.owner = THIS_MODULE;
388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), 388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
389 "SMBus2 AMD8111 adapter at %04x", smbus->base); 389 "SMBus2 AMD8111 adapter at %04x", smbus->base);
390 smbus->adapter.id = I2C_HW_SMBUS_AMD8111;
391 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 390 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
392 smbus->adapter.algo = &smbus_algorithm; 391 smbus->adapter.algo = &smbus_algorithm;
393 smbus->adapter.algo_data = smbus; 392 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 66a04c2c660f..f78ce523e3db 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -400,7 +400,6 @@ i2c_au1550_probe(struct platform_device *pdev)
400 priv->xfer_timeout = 200; 400 priv->xfer_timeout = 200;
401 priv->ack_timeout = 200; 401 priv->ack_timeout = 200;
402 402
403 priv->adap.id = I2C_HW_AU1550_PSC;
404 priv->adap.nr = pdev->id; 403 priv->adap.nr = pdev->id;
405 priv->adap.algo = &au1550_algo; 404 priv->adap.algo = &au1550_algo;
406 priv->adap.algo_data = priv; 405 priv->adap.algo_data = priv;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3fd2c417c1e0..fc548b3d002e 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -651,7 +651,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
651 iface->timeout_timer.data = (unsigned long)iface; 651 iface->timeout_timer.data = (unsigned long)iface;
652 652
653 p_adap = &iface->adap; 653 p_adap = &iface->adap;
654 p_adap->id = I2C_HW_BLACKFIN;
655 p_adap->nr = pdev->id; 654 p_adap->nr = pdev->id;
656 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); 655 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
657 p_adap->algo = &bfin_twi_algorithm; 656 p_adap->algo = &bfin_twi_algorithm;
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 0ed3ccb81b63..448b4bf35eb7 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -202,7 +202,6 @@ static struct i2c_algo_pcf_data pcf_isa_data = {
202static struct i2c_adapter pcf_isa_ops = { 202static struct i2c_adapter pcf_isa_ops = {
203 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
205 .id = I2C_HW_P_ELEK,
206 .algo_data = &pcf_isa_data, 205 .algo_data = &pcf_isa_data,
207 .name = "i2c-elektor", 206 .name = "i2c-elektor",
208}; 207};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 648aa7baff83..bec9b845dd16 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -102,7 +102,6 @@ static struct i2c_algo_bit_data hydra_bit_data = {
102static struct i2c_adapter hydra_adap = { 102static struct i2c_adapter hydra_adap = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .name = "Hydra i2c", 104 .name = "Hydra i2c",
105 .id = I2C_HW_B_HYDRA,
106 .algo_data = &hydra_bit_data, 105 .algo_data = &hydra_bit_data,
107}; 106};
108 107
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 526625eaa84b..230238df56c4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -556,7 +556,6 @@ static const struct i2c_algorithm smbus_algorithm = {
556 556
557static struct i2c_adapter i801_adapter = { 557static struct i2c_adapter i801_adapter = {
558 .owner = THIS_MODULE, 558 .owner = THIS_MODULE,
559 .id = I2C_HW_SMBUS_I801,
560 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 559 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
561 .algo = &smbus_algorithm, 560 .algo = &smbus_algorithm,
562}; 561};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 651f2f1ae5b7..88f0db73b364 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -746,7 +746,6 @@ static int __devinit iic_probe(struct of_device *ofdev,
746 adap->dev.parent = &ofdev->dev; 746 adap->dev.parent = &ofdev->dev;
747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); 747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
748 i2c_set_adapdata(adap, dev); 748 i2c_set_adapdata(adap, dev);
749 adap->id = I2C_HW_OCP;
750 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 749 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
751 adap->algo = &iic_algo; 750 adap->algo = &iic_algo;
752 adap->timeout = 1; 751 adap->timeout = 1;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index fc2714ac0c0f..3190690c26ce 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -480,7 +480,6 @@ iop3xx_i2c_probe(struct platform_device *pdev)
480 } 480 }
481 481
482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); 482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
483 new_adapter->id = I2C_HW_IOP3XX;
484 new_adapter->owner = THIS_MODULE; 483 new_adapter->owner = THIS_MODULE;
485 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 484 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
486 new_adapter->dev.parent = &pdev->dev; 485 new_adapter->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 05d72e981353..8e8467970481 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -116,7 +116,6 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev)
116 drv_data->algo_data.udelay = 6; 116 drv_data->algo_data.udelay = 6;
117 drv_data->algo_data.timeout = 100; 117 drv_data->algo_data.timeout = 100;
118 118
119 drv_data->adapter.id = I2C_HW_B_IXP2000,
120 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, 119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
121 sizeof(drv_data->adapter.name)); 120 sizeof(drv_data->adapter.name));
122 drv_data->adapter.algo_data = &drv_data->algo_data, 121 drv_data->adapter.algo_data = &drv_data->algo_data,
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a9a45fcc8544..aedbbe6618db 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -310,7 +310,6 @@ static const struct i2c_algorithm mpc_algo = {
310static struct i2c_adapter mpc_ops = { 310static struct i2c_adapter mpc_ops = {
311 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
312 .name = "MPC adapter", 312 .name = "MPC adapter",
313 .id = I2C_HW_MPC107,
314 .algo = &mpc_algo, 313 .algo = &mpc_algo,
315 .timeout = 1, 314 .timeout = 1,
316}; 315};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 9e8118d2fe64..eeda276f8f16 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -527,7 +527,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
527 goto exit_unmap_regs; 527 goto exit_unmap_regs;
528 } 528 }
529 drv_data->adapter.dev.parent = &pd->dev; 529 drv_data->adapter.dev.parent = &pd->dev;
530 drv_data->adapter.id = I2C_HW_MV64XXX;
531 drv_data->adapter.algo = &mv64xxx_i2c_algo; 530 drv_data->adapter.algo = &mv64xxx_i2c_algo;
532 drv_data->adapter.owner = THIS_MODULE; 531 drv_data->adapter.owner = THIS_MODULE;
533 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 532 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 3b19bc41a60b..05af6cd7f270 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -355,7 +355,6 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
355 return -EBUSY; 355 return -EBUSY;
356 } 356 }
357 smbus->adapter.owner = THIS_MODULE; 357 smbus->adapter.owner = THIS_MODULE;
358 smbus->adapter.id = I2C_HW_SMBUS_NFORCE2;
359 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 358 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
360 smbus->adapter.algo = &smbus_algorithm; 359 smbus->adapter.algo = &smbus_algorithm;
361 smbus->adapter.algo_data = smbus; 360 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index b2b8380f6602..322c5691e38e 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -115,7 +115,6 @@ static struct i2c_algo_bit_data parport_algo_data = {
115static struct i2c_adapter parport_adapter = { 115static struct i2c_adapter parport_adapter = {
116 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
117 .class = I2C_CLASS_HWMON, 117 .class = I2C_CLASS_HWMON,
118 .id = I2C_HW_B_LP,
119 .algo_data = &parport_algo_data, 118 .algo_data = &parport_algo_data,
120 .name = "Parallel port adapter (light)", 119 .name = "Parallel port adapter (light)",
121}; 120};
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a257cd5cd134..0d8998610c74 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -164,7 +164,6 @@ static void i2c_parport_attach (struct parport *port)
164 /* Fill the rest of the structure */ 164 /* Fill the rest of the structure */
165 adapter->adapter.owner = THIS_MODULE; 165 adapter->adapter.owner = THIS_MODULE;
166 adapter->adapter.class = I2C_CLASS_HWMON; 166 adapter->adapter.class = I2C_CLASS_HWMON;
167 adapter->adapter.id = I2C_HW_B_LP;
168 strlcpy(adapter->adapter.name, "Parallel port adapter", 167 strlcpy(adapter->adapter.name, "Parallel port adapter",
169 sizeof(adapter->adapter.name)); 168 sizeof(adapter->adapter.name));
170 adapter->algo_data = parport_algo_data; 169 adapter->algo_data = parport_algo_data;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 9eb76268ec78..4aa8138cb0a9 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -101,7 +101,6 @@ static struct i2c_algo_pca_data pca_isa_data = {
101 101
102static struct i2c_adapter pca_isa_ops = { 102static struct i2c_adapter pca_isa_ops = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .id = I2C_HW_A_ISA,
105 .algo_data = &pca_isa_data, 104 .algo_data = &pca_isa_data,
106 .name = "PCA9564 ISA Adapter", 105 .name = "PCA9564 ISA Adapter",
107 .timeout = 100, 106 .timeout = 100,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index eaa9b387543e..761f9dd53620 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -403,7 +403,6 @@ static const struct i2c_algorithm smbus_algorithm = {
403 403
404static struct i2c_adapter piix4_adapter = { 404static struct i2c_adapter piix4_adapter = {
405 .owner = THIS_MODULE, 405 .owner = THIS_MODULE,
406 .id = I2C_HW_SMBUS_PIIX4,
407 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 406 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
408 .algo = &smbus_algorithm, 407 .algo = &smbus_algorithm,
409}; 408};
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 4ddefbf238e9..98b1ec489159 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -155,7 +155,6 @@ static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
155static struct i2c_adapter sibyte_board_adapter[2] = { 155static struct i2c_adapter sibyte_board_adapter[2] = {
156 { 156 {
157 .owner = THIS_MODULE, 157 .owner = THIS_MODULE,
158 .id = I2C_HW_SIBYTE,
159 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 158 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
160 .algo = NULL, 159 .algo = NULL,
161 .algo_data = &sibyte_board_data[0], 160 .algo_data = &sibyte_board_data[0],
@@ -164,7 +163,6 @@ static struct i2c_adapter sibyte_board_adapter[2] = {
164 }, 163 },
165 { 164 {
166 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
167 .id = I2C_HW_SIBYTE,
168 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 166 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
169 .algo = NULL, 167 .algo = NULL,
170 .algo_data = &sibyte_board_data[1], 168 .algo_data = &sibyte_board_data[1],
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 8ce2daff985c..f320ab27da46 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -365,7 +365,6 @@ static const struct i2c_algorithm smbus_algorithm = {
365 365
366static struct i2c_adapter sis5595_adapter = { 366static struct i2c_adapter sis5595_adapter = {
367 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
368 .id = I2C_HW_SMBUS_SIS5595,
369 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 368 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
370 .algo = &smbus_algorithm, 369 .algo = &smbus_algorithm,
371}; 370};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 9c9c016ff2b5..50c3610e6028 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -464,7 +464,6 @@ static const struct i2c_algorithm smbus_algorithm = {
464 464
465static struct i2c_adapter sis630_adapter = { 465static struct i2c_adapter sis630_adapter = {
466 .owner = THIS_MODULE, 466 .owner = THIS_MODULE,
467 .id = I2C_HW_SMBUS_SIS630,
468 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 467 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
469 .algo = &smbus_algorithm, 468 .algo = &smbus_algorithm,
470}; 469};
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index f1bba6396641..7e1594b40579 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -241,7 +241,6 @@ static const struct i2c_algorithm smbus_algorithm = {
241 241
242static struct i2c_adapter sis96x_adapter = { 242static struct i2c_adapter sis96x_adapter = {
243 .owner = THIS_MODULE, 243 .owner = THIS_MODULE,
244 .id = I2C_HW_SMBUS_SIS96X,
245 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 244 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
246 .algo = &smbus_algorithm, 245 .algo = &smbus_algorithm,
247}; 246};
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 29cef0433f34..8b24f192103a 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data bit_data = {
83 83
84static struct i2c_adapter vt586b_adapter = { 84static struct i2c_adapter vt586b_adapter = {
85 .owner = THIS_MODULE, 85 .owner = THIS_MODULE,
86 .id = I2C_HW_B_VIA,
87 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 86 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
88 .name = "VIA i2c", 87 .name = "VIA i2c",
89 .algo_data = &bit_data, 88 .algo_data = &bit_data,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 9f194d9efd91..02e6f724b05f 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -321,7 +321,6 @@ static const struct i2c_algorithm smbus_algorithm = {
321 321
322static struct i2c_adapter vt596_adapter = { 322static struct i2c_adapter vt596_adapter = {
323 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
324 .id = I2C_HW_SMBUS_VIA2,
325 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 324 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
326 .algo = &smbus_algorithm, 325 .algo = &smbus_algorithm,
327}; 326};
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 1d4ae26ba73d..1a474acc0ddd 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -163,7 +163,6 @@ static struct i2c_algo_bit_data voo_i2c_bit_data = {
163 163
164static struct i2c_adapter voodoo3_i2c_adapter = { 164static struct i2c_adapter voodoo3_i2c_adapter = {
165 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
166 .id = I2C_HW_B_VOO,
167 .class = I2C_CLASS_TV_ANALOG, 166 .class = I2C_CLASS_TV_ANALOG,
168 .name = "I2C Voodoo3/Banshee adapter", 167 .name = "I2C Voodoo3/Banshee adapter",
169 .algo_data = &voo_i2c_bit_data, 168 .algo_data = &voo_i2c_bit_data,
@@ -180,7 +179,6 @@ static struct i2c_algo_bit_data voo_ddc_bit_data = {
180 179
181static struct i2c_adapter voodoo3_ddc_adapter = { 180static struct i2c_adapter voodoo3_ddc_adapter = {
182 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
183 .id = I2C_HW_B_VOO,
184 .class = I2C_CLASS_DDC, 182 .class = I2C_CLASS_DDC,
185 .name = "DDC Voodoo3/Banshee adapter", 183 .name = "DDC Voodoo3/Banshee adapter",
186 .algo_data = &voo_ddc_bit_data, 184 .algo_data = &voo_ddc_bit_data,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index ed794b145a11..648ecc6f60e6 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -440,7 +440,6 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
440 i2c_set_adapdata(adapter, iface); 440 i2c_set_adapdata(adapter, iface);
441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); 441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index);
442 adapter->owner = THIS_MODULE; 442 adapter->owner = THIS_MODULE;
443 adapter->id = I2C_HW_SMBUS_SCX200;
444 adapter->algo = &scx200_acb_algorithm; 443 adapter->algo = &scx200_acb_algorithm;
445 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 444 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
446 adapter->dev.parent = dev; 445 adapter->dev.parent = dev;
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index e4c98539c517..162b74a04886 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -82,7 +82,6 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
83 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
85 .id = I2C_HW_B_SCX200,
86 .algo_data = &scx200_i2c_data, 85 .algo_data = &scx200_i2c_data,
87 .name = "NatSemi SCx200 I2C", 86 .name = "NatSemi SCx200 I2C",
88}; 87};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index b9bef04b7be4..c80312c1f382 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -16,43 +16,6 @@ config DS1682
16 This driver can also be built as a module. If so, the module 16 This driver can also be built as a module. If so, the module
17 will be called ds1682. 17 will be called ds1682.
18 18
19config AT24
20 tristate "EEPROMs from most vendors"
21 depends on SYSFS && EXPERIMENTAL
22 help
23 Enable this driver to get read/write support to most I2C EEPROMs,
24 after you configure the driver to know about each EEPROM on
25 your target board. Use these generic chip names, instead of
26 vendor-specific ones like at24c64 or 24lc02:
27
28 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
29 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
30
31 Unless you like data loss puzzles, always be sure that any chip
32 you configure as a 24c32 (32 kbit) or larger is NOT really a
33 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
34 as read-only won't help recover from this. Also, if your chip
35 has any software write-protect mechanism you may want to review the
36 code to make sure this driver won't turn it on by accident.
37
38 If you use this with an SMBus adapter instead of an I2C adapter,
39 full functionality is not available. Only smaller devices are
40 supported (24c16 and below, max 4 kByte).
41
42 This driver can also be built as a module. If so, the module
43 will be called at24.
44
45config SENSORS_EEPROM
46 tristate "EEPROM reader"
47 depends on EXPERIMENTAL
48 help
49 If you say yes here you get read-only access to the EEPROM data
50 available on modern memory DIMMs and Sony Vaio laptops. Such
51 EEPROMs could theoretically be available on other devices as well.
52
53 This driver can also be built as a module. If so, the module
54 will be called eeprom.
55
56config SENSORS_PCF8574 19config SENSORS_PCF8574
57 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)" 20 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)"
58 depends on EXPERIMENTAL && GPIO_PCF857X = "n" 21 depends on EXPERIMENTAL && GPIO_PCF857X = "n"
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 00fcb5193ac2..d142f238a2de 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,8 +11,6 @@
11# 11#
12 12
13obj-$(CONFIG_DS1682) += ds1682.o 13obj-$(CONFIG_DS1682) += ds1682.o
14obj-$(CONFIG_AT24) += at24.o
15obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o
16obj-$(CONFIG_SENSORS_MAX6875) += max6875.o 14obj-$(CONFIG_SENSORS_MAX6875) += max6875.o
17obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o 15obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
18obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o 16obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index a5ba820d69bb..a638e952d67a 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -82,7 +82,7 @@ static const struct ide_tp_ops falconide_tp_ops = {
82 82
83static const struct ide_port_info falconide_port_info = { 83static const struct ide_port_info falconide_port_info = {
84 .tp_ops = &falconide_tp_ops, 84 .tp_ops = &falconide_tp_ops,
85 .host_flags = IDE_HFLAG_NO_DMA, 85 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_SERIALIZE,
86}; 86};
87 87
88static void __init falconide_setup_ports(hw_regs_t *hw) 88static void __init falconide_setup_ports(hw_regs_t *hw)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 312127ea443a..0db1ed9f5fc2 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -649,7 +649,8 @@ static int ide_register_port(ide_hwif_t *hwif)
649 /* register with global device tree */ 649 /* register with global device tree */
650 dev_set_name(&hwif->gendev, hwif->name); 650 dev_set_name(&hwif->gendev, hwif->name);
651 hwif->gendev.driver_data = hwif; 651 hwif->gendev.driver_data = hwif;
652 hwif->gendev.parent = hwif->dev; 652 if (hwif->gendev.parent == NULL)
653 hwif->gendev.parent = hwif->dev;
653 hwif->gendev.release = hwif_release_dev; 654 hwif->gendev.release = hwif_release_dev;
654 655
655 ret = device_register(&hwif->gendev); 656 ret = device_register(&hwif->gendev);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index a7ac490c9ae3..f38aac78044c 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -346,7 +346,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
346{ 346{
347 struct clk *clk; 347 struct clk *clk;
348 struct resource *mem, *irq; 348 struct resource *mem, *irq;
349 unsigned long base, rate; 349 void __iomem *base;
350 unsigned long rate;
350 int i, rc; 351 int i, rc;
351 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 352 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
352 353
@@ -382,11 +383,13 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
382 base = IO_ADDRESS(mem->start); 383 base = IO_ADDRESS(mem->start);
383 384
384 /* Configure the Palm Chip controller */ 385 /* Configure the Palm Chip controller */
385 palm_bk3710_chipinit((void __iomem *)base); 386 palm_bk3710_chipinit(base);
386 387
387 for (i = 0; i < IDE_NR_PORTS - 2; i++) 388 for (i = 0; i < IDE_NR_PORTS - 2; i++)
388 hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; 389 hw.io_ports_array[i] = (unsigned long)
389 hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; 390 (base + IDE_PALM_ATA_PRI_REG_OFFSET + i);
391 hw.io_ports.ctl_addr = (unsigned long)
392 (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
390 hw.irq = irq->start; 393 hw.irq = irq->start;
391 hw.dev = &pdev->dev; 394 hw.dev = &pdev->dev;
392 hw.chipset = ide_palm3710; 395 hw.chipset = ide_palm3710;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index dc15cadb06ef..38f712036201 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1419,7 +1419,6 @@ static int __devinit add_card(struct pci_dev *dev,
1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL); 1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1421 1421
1422 i2c_ad->id = I2C_HW_B_PCILYNX;
1423 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name)); 1422 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1424 i2c_adapter_data = bit_data; 1423 i2c_adapter_data = bit_data;
1425 i2c_ad->algo_data = &i2c_adapter_data; 1424 i2c_ad->algo_data = &i2c_adapter_data;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a3551dd0324d..aa30b5cb3513 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -431,6 +431,7 @@ set_arg(void __user *b, void *val,int len)
431 return 0; 431 return 0;
432} 432}
433 433
434#ifdef CONFIG_IPPP_FILTER
434static int get_filter(void __user *arg, struct sock_filter **p) 435static int get_filter(void __user *arg, struct sock_filter **p)
435{ 436{
436 struct sock_fprog uprog; 437 struct sock_fprog uprog;
@@ -465,6 +466,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
465 *p = code; 466 *p = code;
466 return uprog.len; 467 return uprog.len;
467} 468}
469#endif /* CONFIG_IPPP_FILTER */
468 470
469/* 471/*
470 * ippp device ioctl 472 * ippp device ioctl
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 419c378bd24b..56073199ceba 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -87,14 +87,6 @@ config PHANTOM
87 If you choose to build module, its name will be phantom. If unsure, 87 If you choose to build module, its name will be phantom. If unsure,
88 say N here. 88 say N here.
89 89
90config EEPROM_93CX6
91 tristate "EEPROM 93CX6 support"
92 ---help---
93 This is a driver for the EEPROM chipsets 93c46 and 93c66.
94 The driver supports both read as well as write commands.
95
96 If unsure, say N.
97
98config SGI_IOC4 90config SGI_IOC4
99 tristate "SGI IOC4 Base IO support" 91 tristate "SGI IOC4 Base IO support"
100 depends on PCI 92 depends on PCI
@@ -231,5 +223,6 @@ config DELL_LAPTOP
231 laptops. 223 laptops.
232 224
233source "drivers/misc/c2port/Kconfig" 225source "drivers/misc/c2port/Kconfig"
226source "drivers/misc/eeprom/Kconfig"
234 227
235endif # MISC_DEVICES 228endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d5749a7bc777..bc1199830554 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -13,10 +13,10 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
14obj-$(CONFIG_PHANTOM) += phantom.o 14obj-$(CONFIG_PHANTOM) += phantom.o
15obj-$(CONFIG_SGI_IOC4) += ioc4.o 15obj-$(CONFIG_SGI_IOC4) += ioc4.o
16obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
17obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 16obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
18obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 17obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
19obj-$(CONFIG_SGI_XP) += sgi-xp/ 18obj-$(CONFIG_SGI_XP) += sgi-xp/
20obj-$(CONFIG_SGI_GRU) += sgi-gru/ 19obj-$(CONFIG_SGI_GRU) += sgi-gru/
21obj-$(CONFIG_HP_ILO) += hpilo.o 20obj-$(CONFIG_HP_ILO) += hpilo.o
22obj-$(CONFIG_C2PORT) += c2port/ 21obj-$(CONFIG_C2PORT) += c2port/
22obj-y += eeprom/
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
new file mode 100644
index 000000000000..c76df8cda5ef
--- /dev/null
+++ b/drivers/misc/eeprom/Kconfig
@@ -0,0 +1,59 @@
1menu "EEPROM support"
2
3config EEPROM_AT24
4 tristate "I2C EEPROMs from most vendors"
5 depends on I2C && SYSFS && EXPERIMENTAL
6 help
7 Enable this driver to get read/write support to most I2C EEPROMs,
8 after you configure the driver to know about each EEPROM on
9 your target board. Use these generic chip names, instead of
10 vendor-specific ones like at24c64 or 24lc02:
11
12 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
13 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
14
15 Unless you like data loss puzzles, always be sure that any chip
16 you configure as a 24c32 (32 kbit) or larger is NOT really a
17 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
18 as read-only won't help recover from this. Also, if your chip
19 has any software write-protect mechanism you may want to review the
20 code to make sure this driver won't turn it on by accident.
21
22 If you use this with an SMBus adapter instead of an I2C adapter,
23 full functionality is not available. Only smaller devices are
24 supported (24c16 and below, max 4 kByte).
25
26 This driver can also be built as a module. If so, the module
27 will be called at24.
28
29config EEPROM_AT25
30 tristate "SPI EEPROMs from most vendors"
31 depends on SPI && SYSFS
32 help
33 Enable this driver to get read/write support to most SPI EEPROMs,
34 after you configure the board init code to know about each eeprom
35 on your target board.
36
37 This driver can also be built as a module. If so, the module
38 will be called at25.
39
40config EEPROM_LEGACY
41 tristate "Old I2C EEPROM reader"
42 depends on I2C && SYSFS
43 help
44 If you say yes here you get read-only access to the EEPROM data
45 available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
46 EEPROMs could theoretically be available on other devices as well.
47
48 This driver can also be built as a module. If so, the module
49 will be called eeprom.
50
51config EEPROM_93CX6
52 tristate "EEPROM 93CX6 support"
53 help
54 This is a driver for the EEPROM chipsets 93c46 and 93c66.
55 The driver supports both read as well as write commands.
56
57 If unsure, say N.
58
59endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
new file mode 100644
index 000000000000..539dd8f88128
--- /dev/null
+++ b/drivers/misc/eeprom/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_EEPROM_AT24) += at24.o
2obj-$(CONFIG_EEPROM_AT25) += at25.o
3obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
4obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
diff --git a/drivers/i2c/chips/at24.c b/drivers/misc/eeprom/at24.c
index d4775528abc6..d4775528abc6 100644
--- a/drivers/i2c/chips/at24.c
+++ b/drivers/misc/eeprom/at24.c
diff --git a/drivers/spi/at25.c b/drivers/misc/eeprom/at25.c
index 290dbe99647a..290dbe99647a 100644
--- a/drivers/spi/at25.c
+++ b/drivers/misc/eeprom/at25.c
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..2c27193aeaa0 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c8..15b1780025c8 100644
--- a/drivers/misc/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index dfa585f7feaf..0efa390978bd 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -76,6 +76,16 @@ config MMC_OMAP
76 76
77 If unsure, say N. 77 If unsure, say N.
78 78
79config MMC_OMAP_HS
80 tristate "TI OMAP High Speed Multimedia Card Interface support"
81 depends on ARCH_OMAP2430 || ARCH_OMAP3
82 help
83 This selects the TI OMAP High Speed Multimedia card Interface.
84 If you have an OMAP2430 or OMAP3 board with a Multimedia Card slot,
85 say Y or M here.
86
87 If unsure, say N.
88
79config MMC_WBSD 89config MMC_WBSD
80 tristate "Winbond W83L51xD SD/MMC Card Interface support" 90 tristate "Winbond W83L51xD SD/MMC Card Interface support"
81 depends on ISA_DMA_API 91 depends on ISA_DMA_API
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4853288bbb1..98cab84829b8 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
15obj-$(CONFIG_MMC_WBSD) += wbsd.o 15obj-$(CONFIG_MMC_WBSD) += wbsd.o
16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 16obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
17obj-$(CONFIG_MMC_OMAP) += omap.o 17obj-$(CONFIG_MMC_OMAP) += omap.o
18obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
18obj-$(CONFIG_MMC_AT91) += at91_mci.o 19obj-$(CONFIG_MMC_AT91) += at91_mci.o
19obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o 20obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
20obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 21obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
new file mode 100644
index 000000000000..db37490f67ec
--- /dev/null
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -0,0 +1,1242 @@
1/*
2 * drivers/mmc/host/omap_hsmmc.c
3 *
4 * Driver for OMAP2430/3430 MMC controller.
5 *
6 * Copyright (C) 2007 Texas Instruments.
7 *
8 * Authors:
9 * Syed Mohammed Khasim <x0khasim@ti.com>
10 * Madhusudhan <madhu.cr@ti.com>
11 * Mohit Jalori <mjalori@ti.com>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/dma-mapping.h>
23#include <linux/platform_device.h>
24#include <linux/workqueue.h>
25#include <linux/timer.h>
26#include <linux/clk.h>
27#include <linux/mmc/host.h>
28#include <linux/io.h>
29#include <linux/semaphore.h>
30#include <mach/dma.h>
31#include <mach/hardware.h>
32#include <mach/board.h>
33#include <mach/mmc.h>
34#include <mach/cpu.h>
35
36/* OMAP HSMMC Host Controller Registers */
37#define OMAP_HSMMC_SYSCONFIG 0x0010
38#define OMAP_HSMMC_CON 0x002C
39#define OMAP_HSMMC_BLK 0x0104
40#define OMAP_HSMMC_ARG 0x0108
41#define OMAP_HSMMC_CMD 0x010C
42#define OMAP_HSMMC_RSP10 0x0110
43#define OMAP_HSMMC_RSP32 0x0114
44#define OMAP_HSMMC_RSP54 0x0118
45#define OMAP_HSMMC_RSP76 0x011C
46#define OMAP_HSMMC_DATA 0x0120
47#define OMAP_HSMMC_HCTL 0x0128
48#define OMAP_HSMMC_SYSCTL 0x012C
49#define OMAP_HSMMC_STAT 0x0130
50#define OMAP_HSMMC_IE 0x0134
51#define OMAP_HSMMC_ISE 0x0138
52#define OMAP_HSMMC_CAPA 0x0140
53
54#define VS18 (1 << 26)
55#define VS30 (1 << 25)
56#define SDVS18 (0x5 << 9)
57#define SDVS30 (0x6 << 9)
58#define SDVSCLR 0xFFFFF1FF
59#define SDVSDET 0x00000400
60#define AUTOIDLE 0x1
61#define SDBP (1 << 8)
62#define DTO 0xe
63#define ICE 0x1
64#define ICS 0x2
65#define CEN (1 << 2)
66#define CLKD_MASK 0x0000FFC0
67#define CLKD_SHIFT 6
68#define DTO_MASK 0x000F0000
69#define DTO_SHIFT 16
70#define INT_EN_MASK 0x307F0033
71#define INIT_STREAM (1 << 1)
72#define DP_SELECT (1 << 21)
73#define DDIR (1 << 4)
74#define DMA_EN 0x1
75#define MSBS (1 << 5)
76#define BCE (1 << 1)
77#define FOUR_BIT (1 << 1)
78#define CC 0x1
79#define TC 0x02
80#define OD 0x1
81#define ERR (1 << 15)
82#define CMD_TIMEOUT (1 << 16)
83#define DATA_TIMEOUT (1 << 20)
84#define CMD_CRC (1 << 17)
85#define DATA_CRC (1 << 21)
86#define CARD_ERR (1 << 28)
87#define STAT_CLEAR 0xFFFFFFFF
88#define INIT_STREAM_CMD 0x00000000
89#define DUAL_VOLT_OCR_BIT 7
90#define SRC (1 << 25)
91#define SRD (1 << 26)
92
93/*
94 * FIXME: Most likely all the data using these _DEVID defines should come
95 * from the platform_data, or implemented in controller and slot specific
96 * functions.
97 */
98#define OMAP_MMC1_DEVID 0
99#define OMAP_MMC2_DEVID 1
100
101#define OMAP_MMC_DATADIR_NONE 0
102#define OMAP_MMC_DATADIR_READ 1
103#define OMAP_MMC_DATADIR_WRITE 2
104#define MMC_TIMEOUT_MS 20
105#define OMAP_MMC_MASTER_CLOCK 96000000
106#define DRIVER_NAME "mmci-omap-hs"
107
108/*
109 * One controller can have multiple slots, like on some omap boards using
110 * omap.c controller driver. Luckily this is not currently done on any known
111 * omap_hsmmc.c device.
112 */
113#define mmc_slot(host) (host->pdata->slots[host->slot_id])
114
115/*
116 * MMC Host controller read/write API's
117 */
118#define OMAP_HSMMC_READ(base, reg) \
119 __raw_readl((base) + OMAP_HSMMC_##reg)
120
121#define OMAP_HSMMC_WRITE(base, reg, val) \
122 __raw_writel((val), (base) + OMAP_HSMMC_##reg)
123
124struct mmc_omap_host {
125 struct device *dev;
126 struct mmc_host *mmc;
127 struct mmc_request *mrq;
128 struct mmc_command *cmd;
129 struct mmc_data *data;
130 struct clk *fclk;
131 struct clk *iclk;
132 struct clk *dbclk;
133 struct semaphore sem;
134 struct work_struct mmc_carddetect_work;
135 void __iomem *base;
136 resource_size_t mapbase;
137 unsigned int id;
138 unsigned int dma_len;
139 unsigned int dma_dir;
140 unsigned char bus_mode;
141 unsigned char datadir;
142 u32 *buffer;
143 u32 bytesleft;
144 int suspended;
145 int irq;
146 int carddetect;
147 int use_dma, dma_ch;
148 int initstr;
149 int slot_id;
150 int dbclk_enabled;
151 struct omap_mmc_platform_data *pdata;
152};
153
154/*
155 * Stop clock to the card
156 */
157static void omap_mmc_stop_clock(struct mmc_omap_host *host)
158{
159 OMAP_HSMMC_WRITE(host->base, SYSCTL,
160 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
161 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0)
162 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
163}
164
165/*
166 * Send init stream sequence to card
167 * before sending IDLE command
168 */
169static void send_init_stream(struct mmc_omap_host *host)
170{
171 int reg = 0;
172 unsigned long timeout;
173
174 disable_irq(host->irq);
175 OMAP_HSMMC_WRITE(host->base, CON,
176 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
177 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
178
179 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
180 while ((reg != CC) && time_before(jiffies, timeout))
181 reg = OMAP_HSMMC_READ(host->base, STAT) & CC;
182
183 OMAP_HSMMC_WRITE(host->base, CON,
184 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
185 enable_irq(host->irq);
186}
187
188static inline
189int mmc_omap_cover_is_closed(struct mmc_omap_host *host)
190{
191 int r = 1;
192
193 if (host->pdata->slots[host->slot_id].get_cover_state)
194 r = host->pdata->slots[host->slot_id].get_cover_state(host->dev,
195 host->slot_id);
196 return r;
197}
198
199static ssize_t
200mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
201 char *buf)
202{
203 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
204 struct mmc_omap_host *host = mmc_priv(mmc);
205
206 return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" :
207 "open");
208}
209
210static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
211
212static ssize_t
213mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
214 char *buf)
215{
216 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
217 struct mmc_omap_host *host = mmc_priv(mmc);
218 struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id];
219
220 return sprintf(buf, "slot:%s\n", slot.name);
221}
222
223static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
224
225/*
226 * Configure the response type and send the cmd.
227 */
228static void
229mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
230 struct mmc_data *data)
231{
232 int cmdreg = 0, resptype = 0, cmdtype = 0;
233
234 dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",
235 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
236 host->cmd = cmd;
237
238 /*
239 * Clear status bits and enable interrupts
240 */
241 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
242 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
243 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
244
245 if (cmd->flags & MMC_RSP_PRESENT) {
246 if (cmd->flags & MMC_RSP_136)
247 resptype = 1;
248 else
249 resptype = 2;
250 }
251
252 /*
253 * Unlike OMAP1 controller, the cmdtype does not seem to be based on
254 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need
255 * a val of 0x3, rest 0x0.
256 */
257 if (cmd == host->mrq->stop)
258 cmdtype = 0x3;
259
260 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
261
262 if (data) {
263 cmdreg |= DP_SELECT | MSBS | BCE;
264 if (data->flags & MMC_DATA_READ)
265 cmdreg |= DDIR;
266 else
267 cmdreg &= ~(DDIR);
268 }
269
270 if (host->use_dma)
271 cmdreg |= DMA_EN;
272
273 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
274 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
275}
276
277/*
278 * Notify the transfer complete to MMC core
279 */
280static void
281mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
282{
283 host->data = NULL;
284
285 if (host->use_dma && host->dma_ch != -1)
286 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
287 host->dma_dir);
288
289 host->datadir = OMAP_MMC_DATADIR_NONE;
290
291 if (!data->error)
292 data->bytes_xfered += data->blocks * (data->blksz);
293 else
294 data->bytes_xfered = 0;
295
296 if (!data->stop) {
297 host->mrq = NULL;
298 mmc_request_done(host->mmc, data->mrq);
299 return;
300 }
301 mmc_omap_start_command(host, data->stop, NULL);
302}
303
304/*
305 * Notify the core about command completion
306 */
307static void
308mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
309{
310 host->cmd = NULL;
311
312 if (cmd->flags & MMC_RSP_PRESENT) {
313 if (cmd->flags & MMC_RSP_136) {
314 /* response type 2 */
315 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10);
316 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32);
317 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54);
318 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76);
319 } else {
320 /* response types 1, 1b, 3, 4, 5, 6 */
321 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
322 }
323 }
324 if (host->data == NULL || cmd->error) {
325 host->mrq = NULL;
326 mmc_request_done(host->mmc, cmd->mrq);
327 }
328}
329
330/*
331 * DMA clean up for command errors
332 */
333static void mmc_dma_cleanup(struct mmc_omap_host *host)
334{
335 host->data->error = -ETIMEDOUT;
336
337 if (host->use_dma && host->dma_ch != -1) {
338 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
339 host->dma_dir);
340 omap_free_dma(host->dma_ch);
341 host->dma_ch = -1;
342 up(&host->sem);
343 }
344 host->data = NULL;
345 host->datadir = OMAP_MMC_DATADIR_NONE;
346}
347
348/*
349 * Readable error output
350 */
351#ifdef CONFIG_MMC_DEBUG
352static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
353{
354 /* --- means reserved bit without definition at documentation */
355 static const char *mmc_omap_status_bits[] = {
356 "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
357 "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
358 "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
359 "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---"
360 };
361 char res[256];
362 char *buf = res;
363 int len, i;
364
365 len = sprintf(buf, "MMC IRQ 0x%x :", status);
366 buf += len;
367
368 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
369 if (status & (1 << i)) {
370 len = sprintf(buf, " %s", mmc_omap_status_bits[i]);
371 buf += len;
372 }
373
374 dev_dbg(mmc_dev(host->mmc), "%s\n", res);
375}
376#endif /* CONFIG_MMC_DEBUG */
377
378
379/*
380 * MMC controller IRQ handler
381 */
382static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
383{
384 struct mmc_omap_host *host = dev_id;
385 struct mmc_data *data;
386 int end_cmd = 0, end_trans = 0, status;
387
388 if (host->cmd == NULL && host->data == NULL) {
389 OMAP_HSMMC_WRITE(host->base, STAT,
390 OMAP_HSMMC_READ(host->base, STAT));
391 return IRQ_HANDLED;
392 }
393
394 data = host->data;
395 status = OMAP_HSMMC_READ(host->base, STAT);
396 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
397
398 if (status & ERR) {
399#ifdef CONFIG_MMC_DEBUG
400 mmc_omap_report_irq(host, status);
401#endif
402 if ((status & CMD_TIMEOUT) ||
403 (status & CMD_CRC)) {
404 if (host->cmd) {
405 if (status & CMD_TIMEOUT) {
406 OMAP_HSMMC_WRITE(host->base, SYSCTL,
407 OMAP_HSMMC_READ(host->base,
408 SYSCTL) | SRC);
409 while (OMAP_HSMMC_READ(host->base,
410 SYSCTL) & SRC)
411 ;
412
413 host->cmd->error = -ETIMEDOUT;
414 } else {
415 host->cmd->error = -EILSEQ;
416 }
417 end_cmd = 1;
418 }
419 if (host->data)
420 mmc_dma_cleanup(host);
421 }
422 if ((status & DATA_TIMEOUT) ||
423 (status & DATA_CRC)) {
424 if (host->data) {
425 if (status & DATA_TIMEOUT)
426 mmc_dma_cleanup(host);
427 else
428 host->data->error = -EILSEQ;
429 OMAP_HSMMC_WRITE(host->base, SYSCTL,
430 OMAP_HSMMC_READ(host->base,
431 SYSCTL) | SRD);
432 while (OMAP_HSMMC_READ(host->base,
433 SYSCTL) & SRD)
434 ;
435 end_trans = 1;
436 }
437 }
438 if (status & CARD_ERR) {
439 dev_dbg(mmc_dev(host->mmc),
440 "Ignoring card err CMD%d\n", host->cmd->opcode);
441 if (host->cmd)
442 end_cmd = 1;
443 if (host->data)
444 end_trans = 1;
445 }
446 }
447
448 OMAP_HSMMC_WRITE(host->base, STAT, status);
449
450 if (end_cmd || (status & CC))
451 mmc_omap_cmd_done(host, host->cmd);
452 if (end_trans || (status & TC))
453 mmc_omap_xfer_done(host, data);
454
455 return IRQ_HANDLED;
456}
457
458/*
459 * Switch MMC operating voltage
460 */
461static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
462{
463 u32 reg_val = 0;
464 int ret;
465
466 /* Disable the clocks */
467 clk_disable(host->fclk);
468 clk_disable(host->iclk);
469 clk_disable(host->dbclk);
470
471 /* Turn the power off */
472 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
473 if (ret != 0)
474 goto err;
475
476 /* Turn the power ON with given VDD 1.8 or 3.0v */
477 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd);
478 if (ret != 0)
479 goto err;
480
481 clk_enable(host->fclk);
482 clk_enable(host->iclk);
483 clk_enable(host->dbclk);
484
485 OMAP_HSMMC_WRITE(host->base, HCTL,
486 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
487 reg_val = OMAP_HSMMC_READ(host->base, HCTL);
488 /*
489 * If a MMC dual voltage card is detected, the set_ios fn calls
490 * this fn with VDD bit set for 1.8V. Upon card removal from the
491 * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
492 *
493 * Only MMC1 supports 3.0V. MMC2 will not function if SDVS30 is
494 * set in HCTL.
495 */
496 if (host->id == OMAP_MMC1_DEVID && (((1 << vdd) == MMC_VDD_32_33) ||
497 ((1 << vdd) == MMC_VDD_33_34)))
498 reg_val |= SDVS30;
499 if ((1 << vdd) == MMC_VDD_165_195)
500 reg_val |= SDVS18;
501
502 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val);
503
504 OMAP_HSMMC_WRITE(host->base, HCTL,
505 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
506
507 return 0;
508err:
509 dev_dbg(mmc_dev(host->mmc), "Unable to switch operating voltage\n");
510 return ret;
511}
512
513/*
514 * Work Item to notify the core about card insertion/removal
515 */
516static void mmc_omap_detect(struct work_struct *work)
517{
518 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
519 mmc_carddetect_work);
520
521 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
522 if (host->carddetect) {
523 mmc_detect_change(host->mmc, (HZ * 200) / 1000);
524 } else {
525 OMAP_HSMMC_WRITE(host->base, SYSCTL,
526 OMAP_HSMMC_READ(host->base, SYSCTL) | SRD);
527 while (OMAP_HSMMC_READ(host->base, SYSCTL) & SRD)
528 ;
529
530 mmc_detect_change(host->mmc, (HZ * 50) / 1000);
531 }
532}
533
534/*
535 * ISR for handling card insertion and removal
536 */
537static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
538{
539 struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id;
540
541 host->carddetect = mmc_slot(host).card_detect(irq);
542 schedule_work(&host->mmc_carddetect_work);
543
544 return IRQ_HANDLED;
545}
546
547/*
548 * DMA call back function
549 */
550static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
551{
552 struct mmc_omap_host *host = data;
553
554 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
555 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
556
557 if (host->dma_ch < 0)
558 return;
559
560 omap_free_dma(host->dma_ch);
561 host->dma_ch = -1;
562 /*
563 * DMA Callback: run in interrupt context.
564 * mutex_unlock will through a kernel warning if used.
565 */
566 up(&host->sem);
567}
568
569/*
570 * Configure dma src and destination parameters
571 */
572static int mmc_omap_config_dma_param(int sync_dir, struct mmc_omap_host *host,
573 struct mmc_data *data)
574{
575 if (sync_dir == 0) {
576 omap_set_dma_dest_params(host->dma_ch, 0,
577 OMAP_DMA_AMODE_CONSTANT,
578 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
579 omap_set_dma_src_params(host->dma_ch, 0,
580 OMAP_DMA_AMODE_POST_INC,
581 sg_dma_address(&data->sg[0]), 0, 0);
582 } else {
583 omap_set_dma_src_params(host->dma_ch, 0,
584 OMAP_DMA_AMODE_CONSTANT,
585 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
586 omap_set_dma_dest_params(host->dma_ch, 0,
587 OMAP_DMA_AMODE_POST_INC,
588 sg_dma_address(&data->sg[0]), 0, 0);
589 }
590 return 0;
591}
592/*
593 * Routine to configure and start DMA for the MMC card
594 */
595static int
596mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
597{
598 int sync_dev, sync_dir = 0;
599 int dma_ch = 0, ret = 0, err = 1;
600 struct mmc_data *data = req->data;
601
602 /*
603 * If for some reason the DMA transfer is still active,
604 * we wait for timeout period and free the dma
605 */
606 if (host->dma_ch != -1) {
607 set_current_state(TASK_UNINTERRUPTIBLE);
608 schedule_timeout(100);
609 if (down_trylock(&host->sem)) {
610 omap_free_dma(host->dma_ch);
611 host->dma_ch = -1;
612 up(&host->sem);
613 return err;
614 }
615 } else {
616 if (down_trylock(&host->sem))
617 return err;
618 }
619
620 if (!(data->flags & MMC_DATA_WRITE)) {
621 host->dma_dir = DMA_FROM_DEVICE;
622 if (host->id == OMAP_MMC1_DEVID)
623 sync_dev = OMAP24XX_DMA_MMC1_RX;
624 else
625 sync_dev = OMAP24XX_DMA_MMC2_RX;
626 } else {
627 host->dma_dir = DMA_TO_DEVICE;
628 if (host->id == OMAP_MMC1_DEVID)
629 sync_dev = OMAP24XX_DMA_MMC1_TX;
630 else
631 sync_dev = OMAP24XX_DMA_MMC2_TX;
632 }
633
634 ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
635 host, &dma_ch);
636 if (ret != 0) {
637 dev_dbg(mmc_dev(host->mmc),
638 "%s: omap_request_dma() failed with %d\n",
639 mmc_hostname(host->mmc), ret);
640 return ret;
641 }
642
643 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
644 data->sg_len, host->dma_dir);
645 host->dma_ch = dma_ch;
646
647 if (!(data->flags & MMC_DATA_WRITE))
648 mmc_omap_config_dma_param(1, host, data);
649 else
650 mmc_omap_config_dma_param(0, host, data);
651
652 if ((data->blksz % 4) == 0)
653 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
654 (data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
655 sync_dev, sync_dir);
656 else
657 /* REVISIT: The MMC buffer increments only when MSB is written.
658 * Return error for blksz which is non multiple of four.
659 */
660 return -EINVAL;
661
662 omap_start_dma(dma_ch);
663 return 0;
664}
665
666static void set_data_timeout(struct mmc_omap_host *host,
667 struct mmc_request *req)
668{
669 unsigned int timeout, cycle_ns;
670 uint32_t reg, clkd, dto = 0;
671
672 reg = OMAP_HSMMC_READ(host->base, SYSCTL);
673 clkd = (reg & CLKD_MASK) >> CLKD_SHIFT;
674 if (clkd == 0)
675 clkd = 1;
676
677 cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
678 timeout = req->data->timeout_ns / cycle_ns;
679 timeout += req->data->timeout_clks;
680 if (timeout) {
681 while ((timeout & 0x80000000) == 0) {
682 dto += 1;
683 timeout <<= 1;
684 }
685 dto = 31 - dto;
686 timeout <<= 1;
687 if (timeout && dto)
688 dto += 1;
689 if (dto >= 13)
690 dto -= 13;
691 else
692 dto = 0;
693 if (dto > 14)
694 dto = 14;
695 }
696
697 reg &= ~DTO_MASK;
698 reg |= dto << DTO_SHIFT;
699 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);
700}
701
702/*
703 * Configure block length for MMC/SD cards and initiate the transfer.
704 */
705static int
706mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
707{
708 int ret;
709 host->data = req->data;
710
711 if (req->data == NULL) {
712 host->datadir = OMAP_MMC_DATADIR_NONE;
713 OMAP_HSMMC_WRITE(host->base, BLK, 0);
714 return 0;
715 }
716
717 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
718 | (req->data->blocks << 16));
719 set_data_timeout(host, req);
720
721 host->datadir = (req->data->flags & MMC_DATA_WRITE) ?
722 OMAP_MMC_DATADIR_WRITE : OMAP_MMC_DATADIR_READ;
723
724 if (host->use_dma) {
725 ret = mmc_omap_start_dma_transfer(host, req);
726 if (ret != 0) {
727 dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
728 return ret;
729 }
730 }
731 return 0;
732}
733
734/*
735 * Request function. for read/write operation
736 */
737static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
738{
739 struct mmc_omap_host *host = mmc_priv(mmc);
740
741 WARN_ON(host->mrq != NULL);
742 host->mrq = req;
743 mmc_omap_prepare_data(host, req);
744 mmc_omap_start_command(host, req->cmd, req->data);
745}
746
747
748/* Routine to configure clock values. Exposed API to core */
749static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
750{
751 struct mmc_omap_host *host = mmc_priv(mmc);
752 u16 dsor = 0;
753 unsigned long regval;
754 unsigned long timeout;
755
756 switch (ios->power_mode) {
757 case MMC_POWER_OFF:
758 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
759 /*
760 * Reset bus voltage to 3V if it got set to 1.8V earlier.
761 * REVISIT: If we are able to detect cards after unplugging
762 * a 1.8V card, this code should not be needed.
763 */
764 if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) {
765 int vdd = fls(host->mmc->ocr_avail) - 1;
766 if (omap_mmc_switch_opcond(host, vdd) != 0)
767 host->mmc->ios.vdd = vdd;
768 }
769 break;
770 case MMC_POWER_UP:
771 mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd);
772 break;
773 }
774
775 switch (mmc->ios.bus_width) {
776 case MMC_BUS_WIDTH_4:
777 OMAP_HSMMC_WRITE(host->base, HCTL,
778 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
779 break;
780 case MMC_BUS_WIDTH_1:
781 OMAP_HSMMC_WRITE(host->base, HCTL,
782 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
783 break;
784 }
785
786 if (host->id == OMAP_MMC1_DEVID) {
787 /* Only MMC1 can operate at 3V/1.8V */
788 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) &&
789 (ios->vdd == DUAL_VOLT_OCR_BIT)) {
790 /*
791 * The mmc_select_voltage fn of the core does
792 * not seem to set the power_mode to
793 * MMC_POWER_UP upon recalculating the voltage.
794 * vdd 1.8v.
795 */
796 if (omap_mmc_switch_opcond(host, ios->vdd) != 0)
797 dev_dbg(mmc_dev(host->mmc),
798 "Switch operation failed\n");
799 }
800 }
801
802 if (ios->clock) {
803 dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
804 if (dsor < 1)
805 dsor = 1;
806
807 if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
808 dsor++;
809
810 if (dsor > 250)
811 dsor = 250;
812 }
813 omap_mmc_stop_clock(host);
814 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
815 regval = regval & ~(CLKD_MASK);
816 regval = regval | (dsor << 6) | (DTO << 16);
817 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
818 OMAP_HSMMC_WRITE(host->base, SYSCTL,
819 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
820
821 /* Wait till the ICS bit is set */
822 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
823 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2
824 && time_before(jiffies, timeout))
825 msleep(1);
826
827 OMAP_HSMMC_WRITE(host->base, SYSCTL,
828 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
829
830 if (ios->power_mode == MMC_POWER_ON)
831 send_init_stream(host);
832
833 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
834 OMAP_HSMMC_WRITE(host->base, CON,
835 OMAP_HSMMC_READ(host->base, CON) | OD);
836}
837
838static int omap_hsmmc_get_cd(struct mmc_host *mmc)
839{
840 struct mmc_omap_host *host = mmc_priv(mmc);
841 struct omap_mmc_platform_data *pdata = host->pdata;
842
843 if (!pdata->slots[0].card_detect)
844 return -ENOSYS;
845 return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq);
846}
847
848static int omap_hsmmc_get_ro(struct mmc_host *mmc)
849{
850 struct mmc_omap_host *host = mmc_priv(mmc);
851 struct omap_mmc_platform_data *pdata = host->pdata;
852
853 if (!pdata->slots[0].get_ro)
854 return -ENOSYS;
855 return pdata->slots[0].get_ro(host->dev, 0);
856}
857
858static struct mmc_host_ops mmc_omap_ops = {
859 .request = omap_mmc_request,
860 .set_ios = omap_mmc_set_ios,
861 .get_cd = omap_hsmmc_get_cd,
862 .get_ro = omap_hsmmc_get_ro,
863 /* NYET -- enable_sdio_irq */
864};
865
866static int __init omap_mmc_probe(struct platform_device *pdev)
867{
868 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
869 struct mmc_host *mmc;
870 struct mmc_omap_host *host = NULL;
871 struct resource *res;
872 int ret = 0, irq;
873 u32 hctl, capa;
874
875 if (pdata == NULL) {
876 dev_err(&pdev->dev, "Platform Data is missing\n");
877 return -ENXIO;
878 }
879
880 if (pdata->nr_slots == 0) {
881 dev_err(&pdev->dev, "No Slots\n");
882 return -ENXIO;
883 }
884
885 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
886 irq = platform_get_irq(pdev, 0);
887 if (res == NULL || irq < 0)
888 return -ENXIO;
889
890 res = request_mem_region(res->start, res->end - res->start + 1,
891 pdev->name);
892 if (res == NULL)
893 return -EBUSY;
894
895 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
896 if (!mmc) {
897 ret = -ENOMEM;
898 goto err;
899 }
900
901 host = mmc_priv(mmc);
902 host->mmc = mmc;
903 host->pdata = pdata;
904 host->dev = &pdev->dev;
905 host->use_dma = 1;
906 host->dev->dma_mask = &pdata->dma_mask;
907 host->dma_ch = -1;
908 host->irq = irq;
909 host->id = pdev->id;
910 host->slot_id = 0;
911 host->mapbase = res->start;
912 host->base = ioremap(host->mapbase, SZ_4K);
913
914 platform_set_drvdata(pdev, host);
915 INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect);
916
917 mmc->ops = &mmc_omap_ops;
918 mmc->f_min = 400000;
919 mmc->f_max = 52000000;
920
921 sema_init(&host->sem, 1);
922
923 host->iclk = clk_get(&pdev->dev, "mmchs_ick");
924 if (IS_ERR(host->iclk)) {
925 ret = PTR_ERR(host->iclk);
926 host->iclk = NULL;
927 goto err1;
928 }
929 host->fclk = clk_get(&pdev->dev, "mmchs_fck");
930 if (IS_ERR(host->fclk)) {
931 ret = PTR_ERR(host->fclk);
932 host->fclk = NULL;
933 clk_put(host->iclk);
934 goto err1;
935 }
936
937 if (clk_enable(host->fclk) != 0) {
938 clk_put(host->iclk);
939 clk_put(host->fclk);
940 goto err1;
941 }
942
943 if (clk_enable(host->iclk) != 0) {
944 clk_disable(host->fclk);
945 clk_put(host->iclk);
946 clk_put(host->fclk);
947 goto err1;
948 }
949
950 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
951 /*
952 * MMC can still work without debounce clock.
953 */
954 if (IS_ERR(host->dbclk))
955 dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n");
956 else
957 if (clk_enable(host->dbclk) != 0)
958 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
959 " clk failed\n");
960 else
961 host->dbclk_enabled = 1;
962
963#ifdef CONFIG_MMC_BLOCK_BOUNCE
964 mmc->max_phys_segs = 1;
965 mmc->max_hw_segs = 1;
966#endif
967 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
968 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
969 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
970 mmc->max_seg_size = mmc->max_req_size;
971
972 mmc->ocr_avail = mmc_slot(host).ocr_mask;
973 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
974
975 if (pdata->slots[host->slot_id].wires >= 4)
976 mmc->caps |= MMC_CAP_4_BIT_DATA;
977
978 /* Only MMC1 supports 3.0V */
979 if (host->id == OMAP_MMC1_DEVID) {
980 hctl = SDVS30;
981 capa = VS30 | VS18;
982 } else {
983 hctl = SDVS18;
984 capa = VS18;
985 }
986
987 OMAP_HSMMC_WRITE(host->base, HCTL,
988 OMAP_HSMMC_READ(host->base, HCTL) | hctl);
989
990 OMAP_HSMMC_WRITE(host->base, CAPA,
991 OMAP_HSMMC_READ(host->base, CAPA) | capa);
992
993 /* Set the controller to AUTO IDLE mode */
994 OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
995 OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
996
997 /* Set SD bus power bit */
998 OMAP_HSMMC_WRITE(host->base, HCTL,
999 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
1000
1001 /* Request IRQ for MMC operations */
1002 ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED,
1003 mmc_hostname(mmc), host);
1004 if (ret) {
1005 dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
1006 goto err_irq;
1007 }
1008
1009 if (pdata->init != NULL) {
1010 if (pdata->init(&pdev->dev) != 0) {
1011 dev_dbg(mmc_dev(host->mmc),
1012 "Unable to configure MMC IRQs\n");
1013 goto err_irq_cd_init;
1014 }
1015 }
1016
1017 /* Request IRQ for card detect */
1018 if ((mmc_slot(host).card_detect_irq) && (mmc_slot(host).card_detect)) {
1019 ret = request_irq(mmc_slot(host).card_detect_irq,
1020 omap_mmc_cd_handler,
1021 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
1022 | IRQF_DISABLED,
1023 mmc_hostname(mmc), host);
1024 if (ret) {
1025 dev_dbg(mmc_dev(host->mmc),
1026 "Unable to grab MMC CD IRQ\n");
1027 goto err_irq_cd;
1028 }
1029 }
1030
1031 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
1032 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
1033
1034 mmc_add_host(mmc);
1035
1036 if (host->pdata->slots[host->slot_id].name != NULL) {
1037 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
1038 if (ret < 0)
1039 goto err_slot_name;
1040 }
1041 if (mmc_slot(host).card_detect_irq && mmc_slot(host).card_detect &&
1042 host->pdata->slots[host->slot_id].get_cover_state) {
1043 ret = device_create_file(&mmc->class_dev,
1044 &dev_attr_cover_switch);
1045 if (ret < 0)
1046 goto err_cover_switch;
1047 }
1048
1049 return 0;
1050
1051err_cover_switch:
1052 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1053err_slot_name:
1054 mmc_remove_host(mmc);
1055err_irq_cd:
1056 free_irq(mmc_slot(host).card_detect_irq, host);
1057err_irq_cd_init:
1058 free_irq(host->irq, host);
1059err_irq:
1060 clk_disable(host->fclk);
1061 clk_disable(host->iclk);
1062 clk_put(host->fclk);
1063 clk_put(host->iclk);
1064 if (host->dbclk_enabled) {
1065 clk_disable(host->dbclk);
1066 clk_put(host->dbclk);
1067 }
1068
1069err1:
1070 iounmap(host->base);
1071err:
1072 dev_dbg(mmc_dev(host->mmc), "Probe Failed\n");
1073 release_mem_region(res->start, res->end - res->start + 1);
1074 if (host)
1075 mmc_free_host(mmc);
1076 return ret;
1077}
1078
1079static int omap_mmc_remove(struct platform_device *pdev)
1080{
1081 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1082 struct resource *res;
1083
1084 if (host) {
1085 mmc_remove_host(host->mmc);
1086 if (host->pdata->cleanup)
1087 host->pdata->cleanup(&pdev->dev);
1088 free_irq(host->irq, host);
1089 if (mmc_slot(host).card_detect_irq)
1090 free_irq(mmc_slot(host).card_detect_irq, host);
1091 flush_scheduled_work();
1092
1093 clk_disable(host->fclk);
1094 clk_disable(host->iclk);
1095 clk_put(host->fclk);
1096 clk_put(host->iclk);
1097 if (host->dbclk_enabled) {
1098 clk_disable(host->dbclk);
1099 clk_put(host->dbclk);
1100 }
1101
1102 mmc_free_host(host->mmc);
1103 iounmap(host->base);
1104 }
1105
1106 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1107 if (res)
1108 release_mem_region(res->start, res->end - res->start + 1);
1109 platform_set_drvdata(pdev, NULL);
1110
1111 return 0;
1112}
1113
1114#ifdef CONFIG_PM
1115static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
1116{
1117 int ret = 0;
1118 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1119
1120 if (host && host->suspended)
1121 return 0;
1122
1123 if (host) {
1124 ret = mmc_suspend_host(host->mmc, state);
1125 if (ret == 0) {
1126 host->suspended = 1;
1127
1128 OMAP_HSMMC_WRITE(host->base, ISE, 0);
1129 OMAP_HSMMC_WRITE(host->base, IE, 0);
1130
1131 if (host->pdata->suspend) {
1132 ret = host->pdata->suspend(&pdev->dev,
1133 host->slot_id);
1134 if (ret)
1135 dev_dbg(mmc_dev(host->mmc),
1136 "Unable to handle MMC board"
1137 " level suspend\n");
1138 }
1139
1140 if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) {
1141 OMAP_HSMMC_WRITE(host->base, HCTL,
1142 OMAP_HSMMC_READ(host->base, HCTL)
1143 & SDVSCLR);
1144 OMAP_HSMMC_WRITE(host->base, HCTL,
1145 OMAP_HSMMC_READ(host->base, HCTL)
1146 | SDVS30);
1147 OMAP_HSMMC_WRITE(host->base, HCTL,
1148 OMAP_HSMMC_READ(host->base, HCTL)
1149 | SDBP);
1150 }
1151
1152 clk_disable(host->fclk);
1153 clk_disable(host->iclk);
1154 clk_disable(host->dbclk);
1155 }
1156
1157 }
1158 return ret;
1159}
1160
1161/* Routine to resume the MMC device */
1162static int omap_mmc_resume(struct platform_device *pdev)
1163{
1164 int ret = 0;
1165 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1166
1167 if (host && !host->suspended)
1168 return 0;
1169
1170 if (host) {
1171
1172 ret = clk_enable(host->fclk);
1173 if (ret)
1174 goto clk_en_err;
1175
1176 ret = clk_enable(host->iclk);
1177 if (ret) {
1178 clk_disable(host->fclk);
1179 clk_put(host->fclk);
1180 goto clk_en_err;
1181 }
1182
1183 if (clk_enable(host->dbclk) != 0)
1184 dev_dbg(mmc_dev(host->mmc),
1185 "Enabling debounce clk failed\n");
1186
1187 if (host->pdata->resume) {
1188 ret = host->pdata->resume(&pdev->dev, host->slot_id);
1189 if (ret)
1190 dev_dbg(mmc_dev(host->mmc),
1191 "Unmask interrupt failed\n");
1192 }
1193
1194 /* Notify the core to resume the host */
1195 ret = mmc_resume_host(host->mmc);
1196 if (ret == 0)
1197 host->suspended = 0;
1198 }
1199
1200 return ret;
1201
1202clk_en_err:
1203 dev_dbg(mmc_dev(host->mmc),
1204 "Failed to enable MMC clocks during resume\n");
1205 return ret;
1206}
1207
1208#else
1209#define omap_mmc_suspend NULL
1210#define omap_mmc_resume NULL
1211#endif
1212
1213static struct platform_driver omap_mmc_driver = {
1214 .probe = omap_mmc_probe,
1215 .remove = omap_mmc_remove,
1216 .suspend = omap_mmc_suspend,
1217 .resume = omap_mmc_resume,
1218 .driver = {
1219 .name = DRIVER_NAME,
1220 .owner = THIS_MODULE,
1221 },
1222};
1223
1224static int __init omap_mmc_init(void)
1225{
1226 /* Register the MMC driver */
1227 return platform_driver_register(&omap_mmc_driver);
1228}
1229
1230static void __exit omap_mmc_cleanup(void)
1231{
1232 /* Unregister MMC driver */
1233 platform_driver_unregister(&omap_mmc_driver);
1234}
1235
1236module_init(omap_mmc_init);
1237module_exit(omap_mmc_cleanup);
1238
1239MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
1240MODULE_LICENSE("GPL");
1241MODULE_ALIAS("platform:" DRIVER_NAME);
1242MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index fcc98a4cce3c..35a98eec7414 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -20,7 +20,7 @@
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/io.h> 21#include <linux/io.h>
22 22
23#include <asm/dma.h> 23#include <mach/dma.h>
24 24
25#include <mach/regs-sdi.h> 25#include <mach/regs-sdi.h>
26#include <mach/regs-gpio.h> 26#include <mach/regs-gpio.h>
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 96ecc1766fa8..77a4f1446156 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -629,7 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
629 } 629 }
630 630
631 if (c->gpio_irq) { 631 if (c->gpio_irq) {
632 if ((r = omap_request_gpio(c->gpio_irq)) < 0) { 632 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
633 dev_err(&pdev->dev, "Failed to request GPIO%d for " 633 dev_err(&pdev->dev, "Failed to request GPIO%d for "
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
@@ -726,7 +726,7 @@ err_release_dma:
726 free_irq(gpio_to_irq(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 gpio_free(c->gpio_irq);
730err_iounmap: 730err_iounmap:
731 iounmap(c->onenand.base); 731 iounmap(c->onenand.base);
732err_release_mem_region: 732err_release_mem_region:
@@ -761,7 +761,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(gpio_to_irq(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 gpio_free(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
767 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 767 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c628a9e5339..c2d012fcc29b 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -208,9 +208,9 @@ am79c961_init_for_open(struct net_device *dev)
208 /* 208 /*
209 * Stop the chip. 209 * Stop the chip.
210 */ 210 */
211 spin_lock_irqsave(priv->chip_lock, flags); 211 spin_lock_irqsave(&priv->chip_lock, flags);
212 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); 212 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
213 spin_unlock_irqrestore(priv->chip_lock, flags); 213 spin_unlock_irqrestore(&priv->chip_lock, flags);
214 214
215 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ 215 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
216 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ 216 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
@@ -332,10 +332,10 @@ am79c961_close(struct net_device *dev)
332 netif_stop_queue(dev); 332 netif_stop_queue(dev);
333 netif_carrier_off(dev); 333 netif_carrier_off(dev);
334 334
335 spin_lock_irqsave(priv->chip_lock, flags); 335 spin_lock_irqsave(&priv->chip_lock, flags);
336 write_rreg (dev->base_addr, CSR0, CSR0_STOP); 336 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
337 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); 337 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
338 spin_unlock_irqrestore(priv->chip_lock, flags); 338 spin_unlock_irqrestore(&priv->chip_lock, flags);
339 339
340 free_irq (dev->irq, dev); 340 free_irq (dev->irq, dev);
341 341
@@ -391,7 +391,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
391 am79c961_mc_hash(dmi, multi_hash); 391 am79c961_mc_hash(dmi, multi_hash);
392 } 392 }
393 393
394 spin_lock_irqsave(priv->chip_lock, flags); 394 spin_lock_irqsave(&priv->chip_lock, flags);
395 395
396 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; 396 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
397 397
@@ -405,9 +405,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
405 * Spin waiting for chip to report suspend mode 405 * Spin waiting for chip to report suspend mode
406 */ 406 */
407 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { 407 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
408 spin_unlock_irqrestore(priv->chip_lock, flags); 408 spin_unlock_irqrestore(&priv->chip_lock, flags);
409 nop(); 409 nop();
410 spin_lock_irqsave(priv->chip_lock, flags); 410 spin_lock_irqsave(&priv->chip_lock, flags);
411 } 411 }
412 } 412 }
413 413
@@ -429,7 +429,7 @@ static void am79c961_setmulticastlist (struct net_device *dev)
429 write_rreg(dev->base_addr, CTRL1, 0); 429 write_rreg(dev->base_addr, CTRL1, 0);
430 } 430 }
431 431
432 spin_unlock_irqrestore(priv->chip_lock, flags); 432 spin_unlock_irqrestore(&priv->chip_lock, flags);
433} 433}
434 434
435static void am79c961_timeout(struct net_device *dev) 435static void am79c961_timeout(struct net_device *dev)
@@ -467,10 +467,10 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
467 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); 467 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
468 priv->txhead = head; 468 priv->txhead = head;
469 469
470 spin_lock_irqsave(priv->chip_lock, flags); 470 spin_lock_irqsave(&priv->chip_lock, flags);
471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); 471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
472 dev->trans_start = jiffies; 472 dev->trans_start = jiffies;
473 spin_unlock_irqrestore(priv->chip_lock, flags); 473 spin_unlock_irqrestore(&priv->chip_lock, flags);
474 474
475 /* 475 /*
476 * If the next packet is owned by the ethernet device, 476 * If the next packet is owned by the ethernet device,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 21764bfc048e..71f81c79d638 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x.h" 57#include "bnx2x.h"
58#include "bnx2x_init.h" 58#include "bnx2x_init.h"
59 59
60#define DRV_MODULE_VERSION "1.45.24" 60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/14" 61#define DRV_MODULE_RELDATE "2009/01/26"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64/* Time in jiffies before concluding the transmitter is hung */ 64/* Time in jiffies before concluding the transmitter is hung */
@@ -740,8 +740,15 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
740 /* Tell compiler that status block fields can change */ 740 /* Tell compiler that status block fields can change */
741 barrier(); 741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb); 742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return ((fp->tx_pkt_prod != tx_cons_sb) || 743 return (fp->tx_pkt_cons != tx_cons_sb);
744 (fp->tx_pkt_prod != fp->tx_pkt_cons)); 744}
745
746static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747{
748 /* Tell compiler that consumer and producer can change */
749 barrier();
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
745} 752}
746 753
747/* free skb in the packet ring at pos idx 754/* free skb in the packet ring at pos idx
@@ -5148,12 +5155,21 @@ static void enable_blocks_attention(struct bnx2x *bp)
5148} 5155}
5149 5156
5150 5157
5158static void bnx2x_reset_common(struct bnx2x *bp)
5159{
5160 /* reset_common */
5161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5162 0xd3ffff7f);
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5164}
5165
5151static int bnx2x_init_common(struct bnx2x *bp) 5166static int bnx2x_init_common(struct bnx2x *bp)
5152{ 5167{
5153 u32 val, i; 5168 u32 val, i;
5154 5169
5155 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 5170 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5156 5171
5172 bnx2x_reset_common(bp);
5157 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5158 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5159 5175
@@ -6134,8 +6150,8 @@ static void bnx2x_netif_start(struct bnx2x *bp)
6134static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 6150static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6135{ 6151{
6136 bnx2x_int_disable_sync(bp, disable_hw); 6152 bnx2x_int_disable_sync(bp, disable_hw);
6153 bnx2x_napi_disable(bp);
6137 if (netif_running(bp->dev)) { 6154 if (netif_running(bp->dev)) {
6138 bnx2x_napi_disable(bp);
6139 netif_tx_disable(bp->dev); 6155 netif_tx_disable(bp->dev);
6140 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6141 } 6157 }
@@ -6319,7 +6335,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6319static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 6335static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6320{ 6336{
6321 u32 load_code; 6337 u32 load_code;
6322 int i, rc; 6338 int i, rc = 0;
6323#ifdef BNX2X_STOP_ON_ERROR 6339#ifdef BNX2X_STOP_ON_ERROR
6324 if (unlikely(bp->panic)) 6340 if (unlikely(bp->panic))
6325 return -EPERM; 6341 return -EPERM;
@@ -6327,48 +6343,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6327 6343
6328 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6329 6345
6330 /* Send LOAD_REQUEST command to MCP
6331 Returns the type of LOAD command:
6332 if it is the first port to be initialized
6333 common blocks should be initialized, otherwise - not
6334 */
6335 if (!BP_NOMCP(bp)) {
6336 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6337 if (!load_code) {
6338 BNX2X_ERR("MCP response failure, aborting\n");
6339 return -EBUSY;
6340 }
6341 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6342 return -EBUSY; /* other port in diagnostic mode */
6343
6344 } else {
6345 int port = BP_PORT(bp);
6346
6347 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6348 load_count[0], load_count[1], load_count[2]);
6349 load_count[0]++;
6350 load_count[1 + port]++;
6351 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6352 load_count[0], load_count[1], load_count[2]);
6353 if (load_count[0] == 1)
6354 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6355 else if (load_count[1 + port] == 1)
6356 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6357 else
6358 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6359 }
6360
6361 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6362 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6363 bp->port.pmf = 1;
6364 else
6365 bp->port.pmf = 0;
6366 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6367
6368 /* if we can't use MSI-X we only need one fp,
6369 * so try to enable MSI-X with the requested number of fp's
6370 * and fallback to inta with one fp
6371 */
6372 if (use_inta) { 6346 if (use_inta) {
6373 bp->num_queues = 1; 6347 bp->num_queues = 1;
6374 6348
@@ -6383,7 +6357,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6383 else 6357 else
6384 bp->num_queues = 1; 6358 bp->num_queues = 1;
6385 6359
6386 if (bnx2x_enable_msix(bp)) { 6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
6362
6363 /* if we can't use MSI-X we only need one fp,
6364 * so try to enable MSI-X with the requested number of fp's
6365 * and fallback to MSI or legacy INTx with one fp
6366 */
6367 rc = bnx2x_enable_msix(bp);
6368 if (rc) {
6387 /* failed to enable MSI-X */ 6369 /* failed to enable MSI-X */
6388 bp->num_queues = 1; 6370 bp->num_queues = 1;
6389 if (use_multi) 6371 if (use_multi)
@@ -6391,8 +6373,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6391 " to enable MSI-X\n"); 6373 " to enable MSI-X\n");
6392 } 6374 }
6393 } 6375 }
6394 DP(NETIF_MSG_IFUP,
6395 "set number of queues to %d\n", bp->num_queues);
6396 6376
6397 if (bnx2x_alloc_mem(bp)) 6377 if (bnx2x_alloc_mem(bp))
6398 return -ENOMEM; 6378 return -ENOMEM;
@@ -6401,30 +6381,85 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6401 bnx2x_fp(bp, i, disable_tpa) = 6381 bnx2x_fp(bp, i, disable_tpa) =
6402 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6403 6383
6384 for_each_queue(bp, i)
6385 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6386 bnx2x_poll, 128);
6387
6388#ifdef BNX2X_STOP_ON_ERROR
6389 for_each_queue(bp, i) {
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 fp->poll_no_work = 0;
6393 fp->poll_calls = 0;
6394 fp->poll_max_calls = 0;
6395 fp->poll_complete = 0;
6396 fp->poll_exit = 0;
6397 }
6398#endif
6399 bnx2x_napi_enable(bp);
6400
6404 if (bp->flags & USING_MSIX_FLAG) { 6401 if (bp->flags & USING_MSIX_FLAG) {
6405 rc = bnx2x_req_msix_irqs(bp); 6402 rc = bnx2x_req_msix_irqs(bp);
6406 if (rc) { 6403 if (rc) {
6407 pci_disable_msix(bp->pdev); 6404 pci_disable_msix(bp->pdev);
6408 goto load_error; 6405 goto load_error1;
6409 } 6406 }
6407 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6410 } else { 6408 } else {
6411 bnx2x_ack_int(bp); 6409 bnx2x_ack_int(bp);
6412 rc = bnx2x_req_irq(bp); 6410 rc = bnx2x_req_irq(bp);
6413 if (rc) { 6411 if (rc) {
6414 BNX2X_ERR("IRQ request failed, aborting\n"); 6412 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6415 goto load_error; 6413 goto load_error1;
6416 } 6414 }
6417 } 6415 }
6418 6416
6419 for_each_queue(bp, i) 6417 /* Send LOAD_REQUEST command to MCP
6420 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 6418 Returns the type of LOAD command:
6421 bnx2x_poll, 128); 6419 if it is the first port to be initialized
6420 common blocks should be initialized, otherwise - not
6421 */
6422 if (!BP_NOMCP(bp)) {
6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6424 if (!load_code) {
6425 BNX2X_ERR("MCP response failure, aborting\n");
6426 rc = -EBUSY;
6427 goto load_error2;
6428 }
6429 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6430 rc = -EBUSY; /* other port in diagnostic mode */
6431 goto load_error2;
6432 }
6433
6434 } else {
6435 int port = BP_PORT(bp);
6436
6437 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6438 load_count[0], load_count[1], load_count[2]);
6439 load_count[0]++;
6440 load_count[1 + port]++;
6441 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 if (load_count[0] == 1)
6444 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6445 else if (load_count[1 + port] == 1)
6446 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6447 else
6448 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6449 }
6450
6451 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6452 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6453 bp->port.pmf = 1;
6454 else
6455 bp->port.pmf = 0;
6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6422 6457
6423 /* Initialize HW */ 6458 /* Initialize HW */
6424 rc = bnx2x_init_hw(bp, load_code); 6459 rc = bnx2x_init_hw(bp, load_code);
6425 if (rc) { 6460 if (rc) {
6426 BNX2X_ERR("HW init failed, aborting\n"); 6461 BNX2X_ERR("HW init failed, aborting\n");
6427 goto load_int_disable; 6462 goto load_error2;
6428 } 6463 }
6429 6464
6430 /* Setup NIC internals and enable interrupts */ 6465 /* Setup NIC internals and enable interrupts */
@@ -6436,7 +6471,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6436 if (!load_code) { 6471 if (!load_code) {
6437 BNX2X_ERR("MCP response failure, aborting\n"); 6472 BNX2X_ERR("MCP response failure, aborting\n");
6438 rc = -EBUSY; 6473 rc = -EBUSY;
6439 goto load_rings_free; 6474 goto load_error3;
6440 } 6475 }
6441 } 6476 }
6442 6477
@@ -6445,7 +6480,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6445 rc = bnx2x_setup_leading(bp); 6480 rc = bnx2x_setup_leading(bp);
6446 if (rc) { 6481 if (rc) {
6447 BNX2X_ERR("Setup leading failed!\n"); 6482 BNX2X_ERR("Setup leading failed!\n");
6448 goto load_netif_stop; 6483 goto load_error3;
6449 } 6484 }
6450 6485
6451 if (CHIP_IS_E1H(bp)) 6486 if (CHIP_IS_E1H(bp))
@@ -6458,7 +6493,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6458 for_each_nondefault_queue(bp, i) { 6493 for_each_nondefault_queue(bp, i) {
6459 rc = bnx2x_setup_multi(bp, i); 6494 rc = bnx2x_setup_multi(bp, i);
6460 if (rc) 6495 if (rc)
6461 goto load_netif_stop; 6496 goto load_error3;
6462 } 6497 }
6463 6498
6464 if (CHIP_IS_E1(bp)) 6499 if (CHIP_IS_E1(bp))
@@ -6474,18 +6509,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6474 case LOAD_NORMAL: 6509 case LOAD_NORMAL:
6475 /* Tx queue should be only reenabled */ 6510 /* Tx queue should be only reenabled */
6476 netif_wake_queue(bp->dev); 6511 netif_wake_queue(bp->dev);
6512 /* Initialize the receive filter. */
6477 bnx2x_set_rx_mode(bp->dev); 6513 bnx2x_set_rx_mode(bp->dev);
6478 break; 6514 break;
6479 6515
6480 case LOAD_OPEN: 6516 case LOAD_OPEN:
6481 netif_start_queue(bp->dev); 6517 netif_start_queue(bp->dev);
6518 /* Initialize the receive filter. */
6482 bnx2x_set_rx_mode(bp->dev); 6519 bnx2x_set_rx_mode(bp->dev);
6483 if (bp->flags & USING_MSIX_FLAG)
6484 printk(KERN_INFO PFX "%s: using MSI-X\n",
6485 bp->dev->name);
6486 break; 6520 break;
6487 6521
6488 case LOAD_DIAG: 6522 case LOAD_DIAG:
6523 /* Initialize the receive filter. */
6489 bnx2x_set_rx_mode(bp->dev); 6524 bnx2x_set_rx_mode(bp->dev);
6490 bp->state = BNX2X_STATE_DIAG; 6525 bp->state = BNX2X_STATE_DIAG;
6491 break; 6526 break;
@@ -6503,20 +6538,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6503 6538
6504 return 0; 6539 return 0;
6505 6540
6506load_netif_stop: 6541load_error3:
6507 bnx2x_napi_disable(bp); 6542 bnx2x_int_disable_sync(bp, 1);
6508load_rings_free: 6543 if (!BP_NOMCP(bp)) {
6544 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6546 }
6547 bp->port.pmf = 0;
6509 /* Free SKBs, SGEs, TPA pool and driver internals */ 6548 /* Free SKBs, SGEs, TPA pool and driver internals */
6510 bnx2x_free_skbs(bp); 6549 bnx2x_free_skbs(bp);
6511 for_each_queue(bp, i) 6550 for_each_queue(bp, i)
6512 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6551 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6513load_int_disable: 6552load_error2:
6514 bnx2x_int_disable_sync(bp, 1);
6515 /* Release IRQs */ 6553 /* Release IRQs */
6516 bnx2x_free_irq(bp); 6554 bnx2x_free_irq(bp);
6517load_error: 6555load_error1:
6556 bnx2x_napi_disable(bp);
6557 for_each_queue(bp, i)
6558 netif_napi_del(&bnx2x_fp(bp, i, napi));
6518 bnx2x_free_mem(bp); 6559 bnx2x_free_mem(bp);
6519 bp->port.pmf = 0;
6520 6560
6521 /* TBD we really need to reset the chip 6561 /* TBD we really need to reset the chip
6522 if we want to recover from this */ 6562 if we want to recover from this */
@@ -6589,6 +6629,7 @@ static int bnx2x_stop_leading(struct bnx2x *bp)
6589 } 6629 }
6590 cnt--; 6630 cnt--;
6591 msleep(1); 6631 msleep(1);
6632 rmb(); /* Refresh the dsb_sp_prod */
6592 } 6633 }
6593 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6634 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6594 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6635 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
@@ -6640,14 +6681,6 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6640 /* TODO: Close Doorbell port? */ 6681 /* TODO: Close Doorbell port? */
6641} 6682}
6642 6683
6643static void bnx2x_reset_common(struct bnx2x *bp)
6644{
6645 /* reset_common */
6646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6647 0xd3ffff7f);
6648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6649}
6650
6651static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6684static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6652{ 6685{
6653 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6686 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
@@ -6688,8 +6721,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6688 bnx2x_set_storm_rx_mode(bp); 6721 bnx2x_set_storm_rx_mode(bp);
6689 6722
6690 bnx2x_netif_stop(bp, 1); 6723 bnx2x_netif_stop(bp, 1);
6691 if (!netif_running(bp->dev)) 6724
6692 bnx2x_napi_disable(bp);
6693 del_timer_sync(&bp->timer); 6725 del_timer_sync(&bp->timer);
6694 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6726 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6695 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
@@ -6704,7 +6736,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6704 6736
6705 cnt = 1000; 6737 cnt = 1000;
6706 smp_rmb(); 6738 smp_rmb();
6707 while (bnx2x_has_tx_work(fp)) { 6739 while (bnx2x_has_tx_work_unload(fp)) {
6708 6740
6709 bnx2x_tx_int(fp, 1000); 6741 bnx2x_tx_int(fp, 1000);
6710 if (!cnt) { 6742 if (!cnt) {
@@ -6833,6 +6865,8 @@ unload_error:
6833 bnx2x_free_skbs(bp); 6865 bnx2x_free_skbs(bp);
6834 for_each_queue(bp, i) 6866 for_each_queue(bp, i)
6835 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6868 for_each_queue(bp, i)
6869 netif_napi_del(&bnx2x_fp(bp, i, napi));
6836 bnx2x_free_mem(bp); 6870 bnx2x_free_mem(bp);
6837 6871
6838 bp->state = BNX2X_STATE_CLOSED; 6872 bp->state = BNX2X_STATE_CLOSED;
@@ -8723,18 +8757,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8723 8757
8724 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8725 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8759 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8726 bnx2x_acquire_phy_lock(bp);
8727 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8760 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8728 bnx2x_release_phy_lock(bp);
8729 8761
8730 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8762 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8763 u16 cnt = 1000;
8731 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8764 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8732 bnx2x_acquire_phy_lock(bp);
8733 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8765 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8734 bnx2x_release_phy_lock(bp);
8735 /* wait until link state is restored */ 8766 /* wait until link state is restored */
8736 bnx2x_wait_for_link(bp, link_up); 8767 if (link_up)
8737 8768 while (cnt-- && bnx2x_test_link(&bp->link_params,
8769 &bp->link_vars))
8770 msleep(10);
8738 } else 8771 } else
8739 return -EINVAL; 8772 return -EINVAL;
8740 8773
@@ -8840,6 +8873,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8840 return BNX2X_LOOPBACK_FAILED; 8873 return BNX2X_LOOPBACK_FAILED;
8841 8874
8842 bnx2x_netif_stop(bp, 1); 8875 bnx2x_netif_stop(bp, 1);
8876 bnx2x_acquire_phy_lock(bp);
8843 8877
8844 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8878 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8845 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); 8879 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -8851,6 +8885,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8851 rc |= BNX2X_PHY_LOOPBACK_FAILED; 8885 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8852 } 8886 }
8853 8887
8888 bnx2x_release_phy_lock(bp);
8854 bnx2x_netif_start(bp); 8889 bnx2x_netif_start(bp);
8855 8890
8856 return rc; 8891 return rc;
@@ -9805,6 +9840,8 @@ static int bnx2x_open(struct net_device *dev)
9805{ 9840{
9806 struct bnx2x *bp = netdev_priv(dev); 9841 struct bnx2x *bp = netdev_priv(dev);
9807 9842
9843 netif_carrier_off(dev);
9844
9808 bnx2x_set_power_state(bp, PCI_D0); 9845 bnx2x_set_power_state(bp, PCI_D0);
9809 9846
9810 return bnx2x_nic_load(bp, LOAD_OPEN); 9847 return bnx2x_nic_load(bp, LOAD_OPEN);
@@ -10310,8 +10347,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10310 goto init_one_exit; 10347 goto init_one_exit;
10311 } 10348 }
10312 10349
10313 netif_carrier_off(dev);
10314
10315 bp->common.name = board_info[ent->driver_data].name; 10350 bp->common.name = board_info[ent->driver_data].name;
10316 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10351 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10317 " IRQ %d, ", dev->name, bp->common.name, 10352 " IRQ %d, ", dev->name, bp->common.name,
@@ -10459,6 +10494,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10459 bnx2x_free_skbs(bp); 10494 bnx2x_free_skbs(bp);
10460 for_each_queue(bp, i) 10495 for_each_queue(bp, i)
10461 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10497 for_each_queue(bp, i)
10498 netif_napi_del(&bnx2x_fp(bp, i, napi));
10462 bnx2x_free_mem(bp); 10499 bnx2x_free_mem(bp);
10463 10500
10464 bp->state = BNX2X_STATE_CLOSED; 10501 bp->state = BNX2X_STATE_CLOSED;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index cf43ee743b3c..0890162953e9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -981,11 +981,15 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
981 ew32(PBA_ECC, reg); 981 ew32(PBA_ECC, reg);
982 } 982 }
983 983
984 /* PCI-Ex Control Register */ 984 /* PCI-Ex Control Registers */
985 if (hw->mac.type == e1000_82574) { 985 if (hw->mac.type == e1000_82574) {
986 reg = er32(GCR); 986 reg = er32(GCR);
987 reg |= (1 << 22); 987 reg |= (1 << 22);
988 ew32(GCR, reg); 988 ew32(GCR, reg);
989
990 reg = er32(GCR2);
991 reg |= 1;
992 ew32(GCR2, reg);
989 } 993 }
990 994
991 return; 995 return;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index f25e961c6b3b..2d4ce0492df0 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -206,6 +206,7 @@ enum e1e_registers {
206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
208 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 208 E1000_GCR = 0x05B00, /* PCI-Ex Control */
209 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
209 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 210 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
210 E1000_SWSM = 0x05B50, /* SW Semaphore */ 211 E1000_SWSM = 0x05B50, /* SW Semaphore */
211 E1000_FWSM = 0x05B54, /* FW Semaphore */ 212 E1000_FWSM = 0x05B54, /* FW Semaphore */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2e76699f8104..f5e6068f9b07 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1423,15 +1423,11 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1423{ 1423{
1424 struct gfar_private *priv = netdev_priv(dev); 1424 struct gfar_private *priv = netdev_priv(dev);
1425 unsigned long flags; 1425 unsigned long flags;
1426 struct vlan_group *old_grp;
1427 u32 tempval; 1426 u32 tempval;
1428 1427
1429 spin_lock_irqsave(&priv->rxlock, flags); 1428 spin_lock_irqsave(&priv->rxlock, flags);
1430 1429
1431 old_grp = priv->vlgrp; 1430 priv->vlgrp = grp;
1432
1433 if (old_grp == grp)
1434 return;
1435 1431
1436 if (grp) { 1432 if (grp) {
1437 /* Enable VLAN tag insertion */ 1433 /* Enable VLAN tag insertion */
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index c11c568fd7db..a75a31005fd3 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -146,7 +146,7 @@
146 146
147#define MAX_RX_BUFFER_LENGTH 1760 147#define MAX_RX_BUFFER_LENGTH 1760
148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062 148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
149#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) 149#define MAX_RX_LRO_BUFFER_LENGTH (8062)
150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) 150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2)
151#define RX_JUMBO_DMA_MAP_LEN \ 151#define RX_JUMBO_DMA_MAP_LEN \
152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2) 152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2)
@@ -207,11 +207,11 @@
207 207
208#define MAX_CMD_DESCRIPTORS 4096 208#define MAX_CMD_DESCRIPTORS 4096
209#define MAX_RCV_DESCRIPTORS 16384 209#define MAX_RCV_DESCRIPTORS 16384
210#define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) 210#define MAX_CMD_DESCRIPTORS_HOST 1024
211#define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) 211#define MAX_RCV_DESCRIPTORS_1G 2048
212#define MAX_RCV_DESCRIPTORS_10G 8192 212#define MAX_RCV_DESCRIPTORS_10G 4096
213#define MAX_JUMBO_RCV_DESCRIPTORS 1024 213#define MAX_JUMBO_RCV_DESCRIPTORS 512
214#define MAX_LRO_RCV_DESCRIPTORS 64 214#define MAX_LRO_RCV_DESCRIPTORS 8
215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS 217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index c0bd40fcf708..0894a7be0225 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -561,7 +561,10 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
561 } 561 }
562 ring->tx_pending = adapter->max_tx_desc_count; 562 ring->tx_pending = adapter->max_tx_desc_count;
563 563
564 ring->rx_max_pending = MAX_RCV_DESCRIPTORS; 564 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
565 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
566 else
567 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
565 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; 568 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST;
566 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; 569 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS;
567 ring->rx_mini_max_pending = 0; 570 ring->rx_mini_max_pending = 0;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1139e637f5da..cc06cc5429fe 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -735,17 +735,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
735 735
736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
737 737
738 /* ScatterGather support */ 738 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
739 netdev->features = NETIF_F_SG; 739 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
740 netdev->features |= NETIF_F_IP_CSUM; 740
741 netdev->features |= NETIF_F_TSO;
742 if (NX_IS_REVISION_P3(revision_id)) { 741 if (NX_IS_REVISION_P3(revision_id)) {
743 netdev->features |= NETIF_F_IPV6_CSUM; 742 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
744 netdev->features |= NETIF_F_TSO6; 743 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
745 } 744 }
746 745
747 if (adapter->pci_using_dac) 746 if (adapter->pci_using_dac) {
748 netdev->features |= NETIF_F_HIGHDMA; 747 netdev->features |= NETIF_F_HIGHDMA;
748 netdev->vlan_features |= NETIF_F_HIGHDMA;
749 }
749 750
750 /* 751 /*
751 * Set the CRB window to invalid. If any register in window 0 is 752 * Set the CRB window to invalid. If any register in window 0 is
@@ -1166,6 +1167,14 @@ static bool netxen_tso_check(struct net_device *netdev,
1166{ 1167{
1167 bool tso = false; 1168 bool tso = false;
1168 u8 opcode = TX_ETHER_PKT; 1169 u8 opcode = TX_ETHER_PKT;
1170 __be16 protocol = skb->protocol;
1171 u16 flags = 0;
1172
1173 if (protocol == __constant_htons(ETH_P_8021Q)) {
1174 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1175 protocol = vh->h_vlan_encapsulated_proto;
1176 flags = FLAGS_VLAN_TAGGED;
1177 }
1169 1178
1170 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1179 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1171 skb_shinfo(skb)->gso_size > 0) { 1180 skb_shinfo(skb)->gso_size > 0) {
@@ -1174,21 +1183,21 @@ static bool netxen_tso_check(struct net_device *netdev,
1174 desc->total_hdr_length = 1183 desc->total_hdr_length =
1175 skb_transport_offset(skb) + tcp_hdrlen(skb); 1184 skb_transport_offset(skb) + tcp_hdrlen(skb);
1176 1185
1177 opcode = (skb->protocol == htons(ETH_P_IPV6)) ? 1186 opcode = (protocol == __constant_htons(ETH_P_IPV6)) ?
1178 TX_TCP_LSO6 : TX_TCP_LSO; 1187 TX_TCP_LSO6 : TX_TCP_LSO;
1179 tso = true; 1188 tso = true;
1180 1189
1181 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1190 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1182 u8 l4proto; 1191 u8 l4proto;
1183 1192
1184 if (skb->protocol == htons(ETH_P_IP)) { 1193 if (protocol == __constant_htons(ETH_P_IP)) {
1185 l4proto = ip_hdr(skb)->protocol; 1194 l4proto = ip_hdr(skb)->protocol;
1186 1195
1187 if (l4proto == IPPROTO_TCP) 1196 if (l4proto == IPPROTO_TCP)
1188 opcode = TX_TCP_PKT; 1197 opcode = TX_TCP_PKT;
1189 else if(l4proto == IPPROTO_UDP) 1198 else if(l4proto == IPPROTO_UDP)
1190 opcode = TX_UDP_PKT; 1199 opcode = TX_UDP_PKT;
1191 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1200 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1192 l4proto = ipv6_hdr(skb)->nexthdr; 1201 l4proto = ipv6_hdr(skb)->nexthdr;
1193 1202
1194 if (l4proto == IPPROTO_TCP) 1203 if (l4proto == IPPROTO_TCP)
@@ -1199,7 +1208,7 @@ static bool netxen_tso_check(struct net_device *netdev,
1199 } 1208 }
1200 desc->tcp_hdr_offset = skb_transport_offset(skb); 1209 desc->tcp_hdr_offset = skb_transport_offset(skb);
1201 desc->ip_hdr_offset = skb_network_offset(skb); 1210 desc->ip_hdr_offset = skb_network_offset(skb);
1202 netxen_set_tx_flags_opcode(desc, 0, opcode); 1211 netxen_set_tx_flags_opcode(desc, flags, opcode);
1203 return tso; 1212 return tso;
1204} 1213}
1205 1214
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 11adf6ed4628..811a637695ca 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -296,9 +296,8 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
296 struct phy_driver *phydrv = to_phy_driver(drv); 296 struct phy_driver *phydrv = to_phy_driver(drv);
297 struct phy_device *phydev = to_phy_device(dev); 297 struct phy_device *phydev = to_phy_device(dev);
298 298
299 if ((!device_may_wakeup(phydev->dev.parent)) && 299 if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent))
300 (phydrv && phydrv->suspend)) 300 ret = phydrv->suspend(phydev);
301 ret = phydrv->suspend(phydev);
302 301
303 return ret; 302 return ret;
304} 303}
@@ -310,8 +309,7 @@ static int mdio_bus_resume(struct device * dev)
310 struct phy_driver *phydrv = to_phy_driver(drv); 309 struct phy_driver *phydrv = to_phy_driver(drv);
311 struct phy_device *phydev = to_phy_device(dev); 310 struct phy_device *phydev = to_phy_device(dev);
312 311
313 if ((!device_may_wakeup(phydev->dev.parent)) && 312 if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent))
314 (phydrv && phydrv->resume))
315 ret = phydrv->resume(phydev); 313 ret = phydrv->resume(phydev);
316 314
317 return ret; 315 return ret;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c05d38d46350..1387187543e4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -81,6 +81,9 @@ static struct phy_driver lan83c185_driver = {
81 .ack_interrupt = smsc_phy_ack_interrupt, 81 .ack_interrupt = smsc_phy_ack_interrupt,
82 .config_intr = smsc_phy_config_intr, 82 .config_intr = smsc_phy_config_intr,
83 83
84 .suspend = genphy_suspend,
85 .resume = genphy_resume,
86
84 .driver = { .owner = THIS_MODULE, } 87 .driver = { .owner = THIS_MODULE, }
85}; 88};
86 89
@@ -102,6 +105,9 @@ static struct phy_driver lan8187_driver = {
102 .ack_interrupt = smsc_phy_ack_interrupt, 105 .ack_interrupt = smsc_phy_ack_interrupt,
103 .config_intr = smsc_phy_config_intr, 106 .config_intr = smsc_phy_config_intr,
104 107
108 .suspend = genphy_suspend,
109 .resume = genphy_resume,
110
105 .driver = { .owner = THIS_MODULE, } 111 .driver = { .owner = THIS_MODULE, }
106}; 112};
107 113
@@ -123,6 +129,9 @@ static struct phy_driver lan8700_driver = {
123 .ack_interrupt = smsc_phy_ack_interrupt, 129 .ack_interrupt = smsc_phy_ack_interrupt,
124 .config_intr = smsc_phy_config_intr, 130 .config_intr = smsc_phy_config_intr,
125 131
132 .suspend = genphy_suspend,
133 .resume = genphy_resume,
134
126 .driver = { .owner = THIS_MODULE, } 135 .driver = { .owner = THIS_MODULE, }
127}; 136};
128 137
@@ -144,6 +153,9 @@ static struct phy_driver lan911x_int_driver = {
144 .ack_interrupt = smsc_phy_ack_interrupt, 153 .ack_interrupt = smsc_phy_ack_interrupt,
145 .config_intr = smsc_phy_config_intr, 154 .config_intr = smsc_phy_config_intr,
146 155
156 .suspend = genphy_suspend,
157 .resume = genphy_resume,
158
147 .driver = { .owner = THIS_MODULE, } 159 .driver = { .owner = THIS_MODULE, }
148}; 160};
149 161
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9b33d6ebf542..3b6225a2c7d2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,7 @@
24#include <linux/virtio.h> 24#include <linux/virtio.h>
25#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/if_vlan.h>
27 28
28static int napi_weight = 128; 29static int napi_weight = 128;
29module_param(napi_weight, int, 0444); 30module_param(napi_weight, int, 0444);
@@ -33,7 +34,7 @@ module_param(csum, bool, 0444);
33module_param(gso, bool, 0444); 34module_param(gso, bool, 0444);
34 35
35/* FIXME: MTU in config. */ 36/* FIXME: MTU in config. */
36#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37#define GOOD_COPY_LEN 128 38#define GOOD_COPY_LEN 128
38 39
39struct virtnet_info 40struct virtnet_info
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 9b60a0c5de5f..21c841847d88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -638,12 +638,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
638 s8 scale_action = 0; 638 s8 scale_action = 0;
639 unsigned long flags; 639 unsigned long flags;
640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
641 u16 fc, rate_mask; 641 u16 fc;
642 u16 rate_mask = 0;
642 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 643 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
644 645
645 IWL_DEBUG_RATE("enter\n"); 646 IWL_DEBUG_RATE("enter\n");
646 647
648 if (sta)
649 rate_mask = sta->supp_rates[sband->band];
650
647 /* Send management frames and broadcast/multicast data using lowest 651 /* Send management frames and broadcast/multicast data using lowest
648 * rate. */ 652 * rate. */
649 fc = le16_to_cpu(hdr->frame_control); 653 fc = le16_to_cpu(hdr->frame_control);
@@ -651,11 +655,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
651 is_multicast_ether_addr(hdr->addr1) || 655 is_multicast_ether_addr(hdr->addr1) ||
652 !sta || !priv_sta) { 656 !sta || !priv_sta) {
653 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 657 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
654 info->control.rates[0].idx = rate_lowest_index(sband, sta); 658 if (!rate_mask)
659 info->control.rates[0].idx =
660 rate_lowest_index(sband, NULL);
661 else
662 info->control.rates[0].idx =
663 rate_lowest_index(sband, sta);
655 return; 664 return;
656 } 665 }
657 666
658 rate_mask = sta->supp_rates[sband->band];
659 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 667 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
660 668
661 if (sband->band == IEEE80211_BAND_5GHZ) 669 if (sband->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index f3f17929ca0b..27f50471aed8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -944,7 +944,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
944 } 944 }
945 945
946 /* See if there's a better rate or modulation mode to try. */ 946 /* See if there's a better rate or modulation mode to try. */
947 rs_rate_scale_perform(priv, hdr, sta, lq_sta); 947 if (sta && sta->supp_rates[sband->band])
948 rs_rate_scale_perform(priv, hdr, sta, lq_sta);
948out: 949out:
949 return; 950 return;
950} 951}
@@ -2101,14 +2102,23 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2102 struct iwl_lq_sta *lq_sta = priv_sta; 2103 struct iwl_lq_sta *lq_sta = priv_sta;
2103 int rate_idx; 2104 int rate_idx;
2105 u64 mask_bit = 0;
2104 2106
2105 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2107 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2106 2108
2109 if (sta)
2110 mask_bit = sta->supp_rates[sband->band];
2111
2107 /* Send management frames and broadcast/multicast data using lowest 2112 /* Send management frames and broadcast/multicast data using lowest
2108 * rate. */ 2113 * rate. */
2109 if (!ieee80211_is_data(hdr->frame_control) || 2114 if (!ieee80211_is_data(hdr->frame_control) ||
2110 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) { 2115 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) {
2111 info->control.rates[0].idx = rate_lowest_index(sband, sta); 2116 if (!mask_bit)
2117 info->control.rates[0].idx =
2118 rate_lowest_index(sband, NULL);
2119 else
2120 info->control.rates[0].idx =
2121 rate_lowest_index(sband, sta);
2112 return; 2122 return;
2113 } 2123 }
2114 2124
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 5da6b35cd26d..0dc8eed16404 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2482,7 +2482,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2482 dev_kfree_skb_any(skb); 2482 dev_kfree_skb_any(skb);
2483 2483
2484 IWL_DEBUG_MACDUMP("leave\n"); 2484 IWL_DEBUG_MACDUMP("leave\n");
2485 return 0; 2485 return NETDEV_TX_OK;
2486} 2486}
2487 2487
2488static int iwl_mac_add_interface(struct ieee80211_hw *hw, 2488static int iwl_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 15f5655c636e..95d01984c80e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -6538,7 +6538,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6538 dev_kfree_skb_any(skb); 6538 dev_kfree_skb_any(skb);
6539 6539
6540 IWL_DEBUG_MAC80211("leave\n"); 6540 IWL_DEBUG_MAC80211("leave\n");
6541 return 0; 6541 return NETDEV_TX_OK;
6542} 6542}
6543 6543
6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw, 6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index c3bb85e0251e..45a04faa7818 100644
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -1673,7 +1673,7 @@ static void print_linkstatus(struct net_device *dev, u16 status)
1673 s = "UNKNOWN"; 1673 s = "UNKNOWN";
1674 } 1674 }
1675 1675
1676 printk(KERN_INFO "%s: New link status: %s (%04x)\n", 1676 printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
1677 dev->name, s, status); 1677 dev->name, s, status);
1678} 1678}
1679 1679
@@ -5068,33 +5068,30 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5068 struct orinoco_private *priv = netdev_priv(dev); 5068 struct orinoco_private *priv = netdev_priv(dev);
5069 u8 *buf; 5069 u8 *buf;
5070 unsigned long flags; 5070 unsigned long flags;
5071 int err = 0;
5072 5071
5073 /* cut off at IEEE80211_MAX_DATA_LEN */ 5072 /* cut off at IEEE80211_MAX_DATA_LEN */
5074 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || 5073 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
5075 (wrqu->data.length && (extra == NULL))) 5074 (wrqu->data.length && (extra == NULL)))
5076 return -EINVAL; 5075 return -EINVAL;
5077 5076
5078 if (orinoco_lock(priv, &flags) != 0)
5079 return -EBUSY;
5080
5081 if (wrqu->data.length) { 5077 if (wrqu->data.length) {
5082 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 5078 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
5083 if (buf == NULL) { 5079 if (buf == NULL)
5084 err = -ENOMEM; 5080 return -ENOMEM;
5085 goto out;
5086 }
5087 5081
5088 memcpy(buf, extra, wrqu->data.length); 5082 memcpy(buf, extra, wrqu->data.length);
5089 kfree(priv->wpa_ie); 5083 } else
5090 priv->wpa_ie = buf; 5084 buf = NULL;
5091 priv->wpa_ie_len = wrqu->data.length; 5085
5092 } else { 5086 if (orinoco_lock(priv, &flags) != 0) {
5093 kfree(priv->wpa_ie); 5087 kfree(buf);
5094 priv->wpa_ie = NULL; 5088 return -EBUSY;
5095 priv->wpa_ie_len = 0;
5096 } 5089 }
5097 5090
5091 kfree(priv->wpa_ie);
5092 priv->wpa_ie = buf;
5093 priv->wpa_ie_len = wrqu->data.length;
5094
5098 if (priv->wpa_ie) { 5095 if (priv->wpa_ie) {
5099 /* Looks like wl_lkm wants to check the auth alg, and 5096 /* Looks like wl_lkm wants to check the auth alg, and
5100 * somehow pass it to the firmware. 5097 * somehow pass it to the firmware.
@@ -5103,9 +5100,8 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5103 */ 5100 */
5104 } 5101 }
5105 5102
5106out:
5107 orinoco_unlock(priv, &flags); 5103 orinoco_unlock(priv, &flags);
5108 return err; 5104 return 0;
5109} 5105}
5110 5106
5111static int orinoco_ioctl_get_genie(struct net_device *dev, 5107static int orinoco_ioctl_get_genie(struct net_device *dev,
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 12d0717c3992..34561e6e816b 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -451,8 +451,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
451 } 451 }
452 if (err) 452 if (err)
453 goto err; 453 goto err;
454 454 }
455 } 455 break;
456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
458 if (!priv->iq_autocal) { 458 if (!priv->iq_autocal) {
@@ -745,7 +745,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
746 struct p54_hdr *entry_hdr; 746 struct p54_hdr *entry_hdr;
747 struct p54_tx_data *entry_data; 747 struct p54_tx_data *entry_data;
748 int pad = 0; 748 unsigned int pad = 0, frame_len;
749 749
750 range = (void *)info->rate_driver_data; 750 range = (void *)info->rate_driver_data;
751 if (range->start_addr != addr) { 751 if (range->start_addr != addr) {
@@ -768,6 +768,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
768 __skb_unlink(entry, &priv->tx_queue); 768 __skb_unlink(entry, &priv->tx_queue);
769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
770 770
771 frame_len = entry->len;
771 entry_hdr = (struct p54_hdr *) entry->data; 772 entry_hdr = (struct p54_hdr *) entry->data;
772 entry_data = (struct p54_tx_data *) entry_hdr->data; 773 entry_data = (struct p54_tx_data *) entry_hdr->data;
773 priv->tx_stats[entry_data->hw_queue].len--; 774 priv->tx_stats[entry_data->hw_queue].len--;
@@ -814,15 +815,28 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
814 info->status.ack_signal = p54_rssi_to_dbm(dev, 815 info->status.ack_signal = p54_rssi_to_dbm(dev,
815 (int)payload->ack_rssi); 816 (int)payload->ack_rssi);
816 817
817 if (entry_data->key_type == P54_CRYPTO_TKIPMICHAEL) { 818 /* Undo all changes to the frame. */
819 switch (entry_data->key_type) {
820 case P54_CRYPTO_TKIPMICHAEL: {
818 u8 *iv = (u8 *)(entry_data->align + pad + 821 u8 *iv = (u8 *)(entry_data->align + pad +
819 entry_data->crypt_offset); 822 entry_data->crypt_offset);
820 823
821 /* Restore the original TKIP IV. */ 824 /* Restore the original TKIP IV. */
822 iv[2] = iv[0]; 825 iv[2] = iv[0];
823 iv[0] = iv[1]; 826 iv[0] = iv[1];
824 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ 827 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
828
829 frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
830 break;
831 }
832 case P54_CRYPTO_AESCCMP:
833 frame_len -= 8; /* remove CCMP_MIC */
834 break;
835 case P54_CRYPTO_WEP:
836 frame_len -= 4; /* remove WEP_ICV */
837 break;
825 } 838 }
839 skb_trim(entry, frame_len);
826 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 840 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
827 ieee80211_tx_status_irqsafe(dev, entry); 841 ieee80211_tx_status_irqsafe(dev, entry);
828 goto out; 842 goto out;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 6a6a72f6f82c..5de2ebfb28c7 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -144,11 +144,8 @@ static void p54u_tx_cb(struct urb *urb)
144 struct sk_buff *skb = urb->context; 144 struct sk_buff *skb = urb->context;
145 struct ieee80211_hw *dev = (struct ieee80211_hw *) 145 struct ieee80211_hw *dev = (struct ieee80211_hw *)
146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
147 struct p54u_priv *priv = dev->priv;
148 147
149 skb_pull(skb, priv->common.tx_hdr_len); 148 p54_free_skb(dev, skb);
150 if (FREE_AFTER_TX(skb))
151 p54_free_skb(dev, skb);
152} 149}
153 150
154static void p54u_tx_dummy_cb(struct urb *urb) { } 151static void p54u_tx_dummy_cb(struct urb *urb) { }
@@ -230,7 +227,10 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
230 p54u_tx_dummy_cb, dev); 227 p54u_tx_dummy_cb, dev);
231 usb_fill_bulk_urb(data_urb, priv->udev, 228 usb_fill_bulk_urb(data_urb, priv->udev,
232 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 229 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
233 skb->data, skb->len, p54u_tx_cb, skb); 230 skb->data, skb->len, FREE_AFTER_TX(skb) ?
231 p54u_tx_cb : p54u_tx_dummy_cb, skb);
232 addr_urb->transfer_flags |= URB_ZERO_PACKET;
233 data_urb->transfer_flags |= URB_ZERO_PACKET;
234 234
235 usb_anchor_urb(addr_urb, &priv->submitted); 235 usb_anchor_urb(addr_urb, &priv->submitted);
236 err = usb_submit_urb(addr_urb, GFP_ATOMIC); 236 err = usb_submit_urb(addr_urb, GFP_ATOMIC);
@@ -239,7 +239,7 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
239 goto out; 239 goto out;
240 } 240 }
241 241
242 usb_anchor_urb(addr_urb, &priv->submitted); 242 usb_anchor_urb(data_urb, &priv->submitted);
243 err = usb_submit_urb(data_urb, GFP_ATOMIC); 243 err = usb_submit_urb(data_urb, GFP_ATOMIC);
244 if (err) 244 if (err)
245 usb_unanchor_urb(data_urb); 245 usb_unanchor_urb(data_urb);
@@ -269,28 +269,24 @@ static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
269{ 269{
270 struct p54u_priv *priv = dev->priv; 270 struct p54u_priv *priv = dev->priv;
271 struct urb *data_urb; 271 struct urb *data_urb;
272 struct lm87_tx_hdr *hdr; 272 struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
273 __le32 checksum;
274 __le32 addr = ((struct p54_hdr *)skb->data)->req_id;
275 273
276 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 274 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
277 if (!data_urb) 275 if (!data_urb)
278 return; 276 return;
279 277
280 checksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); 278 hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len);
281 hdr = (struct lm87_tx_hdr *)skb_push(skb, sizeof(*hdr)); 279 hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id;
282 hdr->chksum = checksum;
283 hdr->device_addr = addr;
284 280
285 usb_fill_bulk_urb(data_urb, priv->udev, 281 usb_fill_bulk_urb(data_urb, priv->udev,
286 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 282 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
287 skb->data, skb->len, p54u_tx_cb, skb); 283 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
284 p54u_tx_cb : p54u_tx_dummy_cb, skb);
288 data_urb->transfer_flags |= URB_ZERO_PACKET; 285 data_urb->transfer_flags |= URB_ZERO_PACKET;
289 286
290 usb_anchor_urb(data_urb, &priv->submitted); 287 usb_anchor_urb(data_urb, &priv->submitted);
291 if (usb_submit_urb(data_urb, GFP_ATOMIC)) { 288 if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
292 usb_unanchor_urb(data_urb); 289 usb_unanchor_urb(data_urb);
293 skb_pull(skb, sizeof(*hdr));
294 p54_free_skb(dev, skb); 290 p54_free_skb(dev, skb);
295 } 291 }
296 usb_free_urb(data_urb); 292 usb_free_urb(data_urb);
@@ -300,11 +296,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
300{ 296{
301 struct p54u_priv *priv = dev->priv; 297 struct p54u_priv *priv = dev->priv;
302 struct urb *int_urb, *data_urb; 298 struct urb *int_urb, *data_urb;
303 struct net2280_tx_hdr *hdr; 299 struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
304 struct net2280_reg_write *reg; 300 struct net2280_reg_write *reg;
305 int err = 0; 301 int err = 0;
306 __le32 addr = ((struct p54_hdr *) skb->data)->req_id;
307 __le16 len = cpu_to_le16(skb->len);
308 302
309 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 303 reg = kmalloc(sizeof(*reg), GFP_ATOMIC);
310 if (!reg) 304 if (!reg)
@@ -327,10 +321,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
327 reg->addr = cpu_to_le32(P54U_DEV_BASE); 321 reg->addr = cpu_to_le32(P54U_DEV_BASE);
328 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 322 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
329 323
330 hdr = (void *)skb_push(skb, sizeof(*hdr));
331 memset(hdr, 0, sizeof(*hdr)); 324 memset(hdr, 0, sizeof(*hdr));
332 hdr->len = len; 325 hdr->len = cpu_to_le16(skb->len);
333 hdr->device_addr = addr; 326 hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id;
334 327
335 usb_fill_bulk_urb(int_urb, priv->udev, 328 usb_fill_bulk_urb(int_urb, priv->udev,
336 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), 329 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg),
@@ -341,11 +334,13 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
341 * free what's inside the transfer_buffer after the callback routine 334 * free what's inside the transfer_buffer after the callback routine
342 * has completed. 335 * has completed.
343 */ 336 */
344 int_urb->transfer_flags |= URB_FREE_BUFFER; 337 int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET;
345 338
346 usb_fill_bulk_urb(data_urb, priv->udev, 339 usb_fill_bulk_urb(data_urb, priv->udev,
347 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 340 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
348 skb->data, skb->len, p54u_tx_cb, skb); 341 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
342 p54u_tx_cb : p54u_tx_dummy_cb, skb);
343 data_urb->transfer_flags |= URB_ZERO_PACKET;
349 344
350 usb_anchor_urb(int_urb, &priv->submitted); 345 usb_anchor_urb(int_urb, &priv->submitted);
351 err = usb_submit_urb(int_urb, GFP_ATOMIC); 346 err = usb_submit_urb(int_urb, GFP_ATOMIC);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 746a8f36b931..0709decec9c2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -154,6 +154,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
157 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
157 struct ieee80211_rate *rate = 158 struct ieee80211_rate *rate =
158 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 159 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
159 const struct rt2x00_rate *hwrate; 160 const struct rt2x00_rate *hwrate;
@@ -313,7 +314,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
313 * When preamble is enabled we should set the 314 * When preamble is enabled we should set the
314 * preamble bit for the signal. 315 * preamble bit for the signal.
315 */ 316 */
316 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 317 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
317 txdesc->signal |= 0x08; 318 txdesc->signal |= 0x08;
318 } 319 }
319} 320}
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 6ad6bac37706..22bc07ef2f37 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -273,6 +273,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
273 273
274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep),
275 buf, skb->len, rtl8187_tx_cb, skb); 275 buf, skb->len, rtl8187_tx_cb, skb);
276 urb->transfer_flags |= URB_ZERO_PACKET;
276 usb_anchor_urb(urb, &priv->anchored); 277 usb_anchor_urb(urb, &priv->anchored);
277 rc = usb_submit_urb(urb, GFP_ATOMIC); 278 rc = usb_submit_urb(urb, GFP_ATOMIC);
278 if (rc < 0) { 279 if (rc < 0) {
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index b5db57d2fcf5..17527f765b39 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -84,6 +84,7 @@ static struct usb_device_id usb_ids[] = {
84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
87 { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
87 /* "Driverless" devices that need ejecting */ 88 /* "Driverless" devices that need ejecting */
88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 90 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 2e03b6d796d3..e76d715e4342 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -393,16 +393,21 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
393 return; 393 return;
394 394
395fail: 395fail:
396 entry->event = NULL;
396 cpu_buf->sample_lost_overflow++; 397 cpu_buf->sample_lost_overflow++;
397} 398}
398 399
399int oprofile_add_data(struct op_entry *entry, unsigned long val) 400int oprofile_add_data(struct op_entry *entry, unsigned long val)
400{ 401{
402 if (!entry->event)
403 return 0;
401 return op_cpu_buffer_add_data(entry, val); 404 return op_cpu_buffer_add_data(entry, val);
402} 405}
403 406
404int oprofile_write_commit(struct op_entry *entry) 407int oprofile_write_commit(struct op_entry *entry)
405{ 408{
409 if (!entry->event)
410 return -EINVAL;
406 return op_cpu_buffer_write_commit(entry); 411 return op_cpu_buffer_write_commit(entry);
407} 412}
408 413
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 63f81c44846a..272995d20293 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -66,6 +66,13 @@ static inline void op_cpu_buffer_reset(int cpu)
66 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
67} 67}
68 68
69/*
70 * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
71 * called only if op_cpu_buffer_write_reserve() did not return NULL or
72 * entry->event != NULL, otherwise entry->size or entry->event will be
73 * used uninitialized.
74 */
75
69struct op_sample 76struct op_sample
70*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); 77*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
71int op_cpu_buffer_write_commit(struct op_entry *entry); 78int op_cpu_buffer_write_commit(struct op_entry *entry);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 5482d4ed8256..c2485542f543 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -126,8 +126,10 @@ static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
126 mutex_lock(&slot->ctrl->crit_sect); 126 mutex_lock(&slot->ctrl->crit_sect);
127 127
128 /* has it been >1 sec since our last toggle? */ 128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) 129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
130 return -EINVAL; 131 return -EINVAL;
132 }
131 133
132 /* see what our current state is */ 134 /* see what our current state is */
133 retval = get_lock_status(hotplug_slot, &value); 135 retval = get_lock_status(hotplug_slot, &value);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index b4a90badd0a6..896a15d70f5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -398,21 +398,19 @@ static int msi_capability_init(struct pci_dev *dev)
398 entry->msi_attrib.masked = 1; 398 entry->msi_attrib.masked = 1;
399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
400 entry->msi_attrib.pos = pos; 400 entry->msi_attrib.pos = pos;
401 if (entry->msi_attrib.maskbit) {
402 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
403 entry->msi_attrib.is_64);
404 }
405 entry->dev = dev; 401 entry->dev = dev;
406 if (entry->msi_attrib.maskbit) { 402 if (entry->msi_attrib.maskbit) {
407 unsigned int maskbits, temp; 403 unsigned int base, maskbits, temp;
404
405 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
406 entry->mask_base = (void __iomem *)(long)base;
407
408 /* All MSIs are unmasked by default, Mask them all */ 408 /* All MSIs are unmasked by default, Mask them all */
409 pci_read_config_dword(dev, 409 pci_read_config_dword(dev, base, &maskbits);
410 msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
411 &maskbits);
412 temp = (1 << multi_msi_capable(control)); 410 temp = (1 << multi_msi_capable(control));
413 temp = ((temp - 1) & ~temp); 411 temp = ((temp - 1) & ~temp);
414 maskbits |= temp; 412 maskbits |= temp;
415 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); 413 pci_write_config_dword(dev, base, maskbits);
416 entry->msi_attrib.maskbits_mask = temp; 414 entry->msi_attrib.maskbits_mask = temp;
417 } 415 }
418 list_add_tail(&entry->list, &dev->msi_list); 416 list_add_tail(&entry->list, &dev->msi_list);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c697f2680856..9de07b75b993 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -355,17 +355,27 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
355 int i = 0; 355 int i = 0;
356 356
357 if (drv && drv->suspend) { 357 if (drv && drv->suspend) {
358 pci_dev->state_saved = false;
359
358 i = drv->suspend(pci_dev, state); 360 i = drv->suspend(pci_dev, state);
359 suspend_report_result(drv->suspend, i); 361 suspend_report_result(drv->suspend, i);
360 } else { 362 if (i)
361 pci_save_state(pci_dev); 363 return i;
362 /* 364
363 * This is for compatibility with existing code with legacy PM 365 if (pci_dev->state_saved)
364 * support. 366 goto Fixup;
365 */ 367
366 pci_pm_set_unknown_state(pci_dev); 368 if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0))
369 goto Fixup;
367 } 370 }
368 371
372 pci_save_state(pci_dev);
373 /*
374 * This is for compatibility with existing code with legacy PM support.
375 */
376 pci_pm_set_unknown_state(pci_dev);
377
378 Fixup:
369 pci_fixup_device(pci_fixup_suspend, pci_dev); 379 pci_fixup_device(pci_fixup_suspend, pci_dev);
370 380
371 return i; 381 return i;
@@ -386,81 +396,34 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
386 396
387static int pci_legacy_resume_early(struct device *dev) 397static int pci_legacy_resume_early(struct device *dev)
388{ 398{
389 int error = 0;
390 struct pci_dev * pci_dev = to_pci_dev(dev); 399 struct pci_dev * pci_dev = to_pci_dev(dev);
391 struct pci_driver * drv = pci_dev->driver; 400 struct pci_driver * drv = pci_dev->driver;
392 401
393 pci_fixup_device(pci_fixup_resume_early, pci_dev); 402 return drv && drv->resume_early ?
394 403 drv->resume_early(pci_dev) : 0;
395 if (drv && drv->resume_early)
396 error = drv->resume_early(pci_dev);
397 return error;
398} 404}
399 405
400static int pci_legacy_resume(struct device *dev) 406static int pci_legacy_resume(struct device *dev)
401{ 407{
402 int error;
403 struct pci_dev * pci_dev = to_pci_dev(dev); 408 struct pci_dev * pci_dev = to_pci_dev(dev);
404 struct pci_driver * drv = pci_dev->driver; 409 struct pci_driver * drv = pci_dev->driver;
405 410
406 pci_fixup_device(pci_fixup_resume, pci_dev); 411 pci_fixup_device(pci_fixup_resume, pci_dev);
407 412
408 if (drv && drv->resume) { 413 return drv && drv->resume ?
409 error = drv->resume(pci_dev); 414 drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
410 } else {
411 /* restore the PCI config space */
412 pci_restore_state(pci_dev);
413 error = pci_pm_reenable_device(pci_dev);
414 }
415 return error;
416} 415}
417 416
418/* Auxiliary functions used by the new power management framework */ 417/* Auxiliary functions used by the new power management framework */
419 418
420static int pci_restore_standard_config(struct pci_dev *pci_dev)
421{
422 struct pci_dev *parent = pci_dev->bus->self;
423 int error = 0;
424
425 /* Check if the device's bus is operational */
426 if (!parent || parent->current_state == PCI_D0) {
427 pci_restore_state(pci_dev);
428 pci_update_current_state(pci_dev, PCI_D0);
429 } else {
430 dev_warn(&pci_dev->dev, "unable to restore config, "
431 "bridge %s in low power state D%d\n", pci_name(parent),
432 parent->current_state);
433 pci_dev->current_state = PCI_UNKNOWN;
434 error = -EAGAIN;
435 }
436
437 return error;
438}
439
440static bool pci_is_bridge(struct pci_dev *pci_dev)
441{
442 return !!(pci_dev->subordinate);
443}
444
445static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 419static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
446{ 420{
447 if (pci_restore_standard_config(pci_dev)) 421 pci_restore_standard_config(pci_dev);
448 pci_fixup_device(pci_fixup_resume_early, pci_dev); 422 pci_fixup_device(pci_fixup_resume_early, pci_dev);
449} 423}
450 424
451static int pci_pm_default_resume(struct pci_dev *pci_dev) 425static int pci_pm_default_resume(struct pci_dev *pci_dev)
452{ 426{
453 /*
454 * pci_restore_standard_config() should have been called once already,
455 * but it would have failed if the device's parent bridge had not been
456 * in power state D0 at that time. Check it and try again if necessary.
457 */
458 if (pci_dev->current_state == PCI_UNKNOWN) {
459 int error = pci_restore_standard_config(pci_dev);
460 if (error)
461 return error;
462 }
463
464 pci_fixup_device(pci_fixup_resume, pci_dev); 427 pci_fixup_device(pci_fixup_resume, pci_dev);
465 428
466 if (!pci_is_bridge(pci_dev)) 429 if (!pci_is_bridge(pci_dev))
@@ -575,11 +538,11 @@ static int pci_pm_resume_noirq(struct device *dev)
575 struct device_driver *drv = dev->driver; 538 struct device_driver *drv = dev->driver;
576 int error = 0; 539 int error = 0;
577 540
541 pci_pm_default_resume_noirq(pci_dev);
542
578 if (pci_has_legacy_pm_support(pci_dev)) 543 if (pci_has_legacy_pm_support(pci_dev))
579 return pci_legacy_resume_early(dev); 544 return pci_legacy_resume_early(dev);
580 545
581 pci_pm_default_resume_noirq(pci_dev);
582
583 if (drv && drv->pm && drv->pm->resume_noirq) 546 if (drv && drv->pm && drv->pm->resume_noirq)
584 error = drv->pm->resume_noirq(dev); 547 error = drv->pm->resume_noirq(dev);
585 548
@@ -730,11 +693,11 @@ static int pci_pm_restore_noirq(struct device *dev)
730 struct device_driver *drv = dev->driver; 693 struct device_driver *drv = dev->driver;
731 int error = 0; 694 int error = 0;
732 695
696 pci_pm_default_resume_noirq(pci_dev);
697
733 if (pci_has_legacy_pm_support(pci_dev)) 698 if (pci_has_legacy_pm_support(pci_dev))
734 return pci_legacy_resume_early(dev); 699 return pci_legacy_resume_early(dev);
735 700
736 pci_pm_default_resume_noirq(pci_dev);
737
738 if (drv && drv->pm && drv->pm->restore_noirq) 701 if (drv && drv->pm && drv->pm->restore_noirq)
739 error = drv->pm->restore_noirq(dev); 702 error = drv->pm->restore_noirq(dev);
740 703
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e491fdedf705..17bd9325a245 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,7 +22,7 @@
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include "pci.h" 23#include "pci.h"
24 24
25unsigned int pci_pm_d3_delay = 10; 25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
26 26
27#ifdef CONFIG_PCI_DOMAINS 27#ifdef CONFIG_PCI_DOMAINS
28int pci_domains_supported = 1; 28int pci_domains_supported = 1;
@@ -426,6 +426,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
426 * given PCI device 426 * given PCI device
427 * @dev: PCI device to handle. 427 * @dev: PCI device to handle.
428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
429 * @wait: If 'true', wait for the device to change its power state
429 * 430 *
430 * RETURN VALUE: 431 * RETURN VALUE:
431 * -EINVAL if the requested state is invalid. 432 * -EINVAL if the requested state is invalid.
@@ -435,7 +436,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
435 * 0 if device's power state has been successfully changed. 436 * 0 if device's power state has been successfully changed.
436 */ 437 */
437static int 438static int
438pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 439pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
439{ 440{
440 u16 pmcsr; 441 u16 pmcsr;
441 bool need_restore = false; 442 bool need_restore = false;
@@ -480,8 +481,10 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
480 break; 481 break;
481 case PCI_UNKNOWN: /* Boot-up */ 482 case PCI_UNKNOWN: /* Boot-up */
482 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
483 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
484 need_restore = true; 485 need_restore = true;
486 wait = true;
487 }
485 /* Fall-through: force to D0 */ 488 /* Fall-through: force to D0 */
486 default: 489 default:
487 pmcsr = 0; 490 pmcsr = 0;
@@ -491,12 +494,15 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
491 /* enter specified state */ 494 /* enter specified state */
492 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
493 496
497 if (!wait)
498 return 0;
499
494 /* Mandatory power management transition delays */ 500 /* Mandatory power management transition delays */
495 /* see PCI PM 1.1 5.6.1 table 18 */ 501 /* see PCI PM 1.1 5.6.1 table 18 */
496 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
497 msleep(pci_pm_d3_delay); 503 msleep(pci_pm_d3_delay);
498 else if (state == PCI_D2 || dev->current_state == PCI_D2) 504 else if (state == PCI_D2 || dev->current_state == PCI_D2)
499 udelay(200); 505 udelay(PCI_PM_D2_DELAY);
500 506
501 dev->current_state = state; 507 dev->current_state = state;
502 508
@@ -515,7 +521,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
515 if (need_restore) 521 if (need_restore)
516 pci_restore_bars(dev); 522 pci_restore_bars(dev);
517 523
518 if (dev->bus->self) 524 if (wait && dev->bus->self)
519 pcie_aspm_pm_state_change(dev->bus->self); 525 pcie_aspm_pm_state_change(dev->bus->self);
520 526
521 return 0; 527 return 0;
@@ -585,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
585 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
586 return 0; 592 return 0;
587 593
588 error = pci_raw_set_power_state(dev, state); 594 error = pci_raw_set_power_state(dev, state, true);
589 595
590 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
591 /* Allow the platform to finalize the transition */ 597 /* Allow the platform to finalize the transition */
@@ -730,6 +736,7 @@ pci_save_state(struct pci_dev *dev)
730 /* XXX: 100% dword access ok here? */ 736 /* XXX: 100% dword access ok here? */
731 for (i = 0; i < 16; i++) 737 for (i = 0; i < 16; i++)
732 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 738 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
739 dev->state_saved = true;
733 if ((i = pci_save_pcie_state(dev)) != 0) 740 if ((i = pci_save_pcie_state(dev)) != 0)
734 return i; 741 return i;
735 if ((i = pci_save_pcix_state(dev)) != 0) 742 if ((i = pci_save_pcix_state(dev)) != 0)
@@ -1374,6 +1381,50 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1374} 1381}
1375 1382
1376/** 1383/**
1384 * pci_restore_standard_config - restore standard config registers of PCI device
1385 * @dev: PCI device to handle
1386 *
1387 * This function assumes that the device's configuration space is accessible.
1388 * If the device needs to be powered up, the function will wait for it to
1389 * change the state.
1390 */
1391int pci_restore_standard_config(struct pci_dev *dev)
1392{
1393 pci_power_t prev_state;
1394 int error;
1395
1396 pci_restore_state(dev);
1397 pci_update_current_state(dev, PCI_D0);
1398
1399 prev_state = dev->current_state;
1400 if (prev_state == PCI_D0)
1401 return 0;
1402
1403 error = pci_raw_set_power_state(dev, PCI_D0, false);
1404 if (error)
1405 return error;
1406
1407 if (pci_is_bridge(dev)) {
1408 if (prev_state > PCI_D1)
1409 mdelay(PCI_PM_BUS_WAIT);
1410 } else {
1411 switch(prev_state) {
1412 case PCI_D3cold:
1413 case PCI_D3hot:
1414 mdelay(pci_pm_d3_delay);
1415 break;
1416 case PCI_D2:
1417 udelay(PCI_PM_D2_DELAY);
1418 break;
1419 }
1420 }
1421
1422 dev->current_state = PCI_D0;
1423
1424 return 0;
1425}
1426
1427/**
1377 * pci_enable_ari - enable ARI forwarding if hardware support it 1428 * pci_enable_ari - enable ARI forwarding if hardware support it
1378 * @dev: the PCI device 1429 * @dev: the PCI device
1379 */ 1430 */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1351bb4addde..26ddf78ac300 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -49,6 +49,12 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
49extern void pci_pm_init(struct pci_dev *dev); 49extern void pci_pm_init(struct pci_dev *dev);
50extern void platform_pci_wakeup_init(struct pci_dev *dev); 50extern void platform_pci_wakeup_init(struct pci_dev *dev);
51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
52extern int pci_restore_standard_config(struct pci_dev *dev);
53
54static inline bool pci_is_bridge(struct pci_dev *pci_dev)
55{
56 return !!(pci_dev->subordinate);
57}
52 58
53extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 59extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
54extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 60extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index d30bb766fcef..b56a704409d2 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -20,7 +20,7 @@
20 20
21static inline unsigned int get_irq_flags(struct resource *res) 21static inline unsigned int get_irq_flags(struct resource *res)
22{ 22{
23 unsigned int flags = IRQF_DISABLED | IRQF_SHARED; 23 unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
24 24
25 flags |= res->flags & IRQF_TRIGGER_MASK; 25 flags |= res->flags & IRQF_TRIGGER_MASK;
26 26
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index bca08eff4a77..083f787d260d 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -70,7 +70,9 @@ static char debug_buffer[255];
70static void lcs_tasklet(unsigned long); 70static void lcs_tasklet(unsigned long);
71static void lcs_start_kernel_thread(struct work_struct *); 71static void lcs_start_kernel_thread(struct work_struct *);
72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
73#ifdef CONFIG_IP_MULTICAST
73static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 74static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
75#endif /* CONFIG_IP_MULTICAST */
74static int lcs_recovery(void *ptr); 76static int lcs_recovery(void *ptr);
75 77
76/** 78/**
@@ -1285,6 +1287,8 @@ out:
1285 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1287 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1286 return 0; 1288 return 0;
1287} 1289}
1290#endif /* CONFIG_IP_MULTICAST */
1291
1288/** 1292/**
1289 * function called by net device to 1293 * function called by net device to
1290 * handle multicast address relevant things 1294 * handle multicast address relevant things
@@ -1292,6 +1296,7 @@ out:
1292static void 1296static void
1293lcs_set_multicast_list(struct net_device *dev) 1297lcs_set_multicast_list(struct net_device *dev)
1294{ 1298{
1299#ifdef CONFIG_IP_MULTICAST
1295 struct lcs_card *card; 1300 struct lcs_card *card;
1296 1301
1297 LCS_DBF_TEXT(4, trace, "setmulti"); 1302 LCS_DBF_TEXT(4, trace, "setmulti");
@@ -1299,9 +1304,8 @@ lcs_set_multicast_list(struct net_device *dev)
1299 1304
1300 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1305 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1301 schedule_work(&card->kernel_thread_starter); 1306 schedule_work(&card->kernel_thread_starter);
1302}
1303
1304#endif /* CONFIG_IP_MULTICAST */ 1307#endif /* CONFIG_IP_MULTICAST */
1308}
1305 1309
1306static long 1310static long
1307lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) 1311lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 38c600c0dbbf..3599828b9766 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -32,7 +32,9 @@
32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
33 defined(CONFIG_CPU_SUBTYPE_SH7721) 33 defined(CONFIG_CPU_SUBTYPE_SH7721)
34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ 34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
35#define SCIF_ORER 0x0200 /* overrun error bit */ 35# define PORT_PTCR 0xA405011EUL
36# define PORT_PVCR 0xA4050122UL
37# define SCIF_ORER 0x0200 /* overrun error bit */
36#elif defined(CONFIG_SH_RTS7751R2D) 38#elif defined(CONFIG_SH_RTS7751R2D)
37# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ 39# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
38# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ 40# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
@@ -393,6 +395,7 @@ SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
393SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) 395SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
394SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) 396SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
395SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) 397SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
398SCIx_FNS(SCSPTR, 0, 0, 0, 0)
396SCIF_FNS(SCTDSR, 0x0c, 8) 399SCIF_FNS(SCTDSR, 0x0c, 8)
397SCIF_FNS(SCFER, 0x10, 16) 400SCIF_FNS(SCFER, 0x10, 16)
398SCIF_FNS(SCFCR, 0x18, 16) 401SCIF_FNS(SCFCR, 0x18, 16)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4a6fe01831a8..83a185d52961 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -230,17 +230,6 @@ config SPI_XILINX
230# 230#
231comment "SPI Protocol Masters" 231comment "SPI Protocol Masters"
232 232
233config SPI_AT25
234 tristate "SPI EEPROMs from most vendors"
235 depends on SYSFS
236 help
237 Enable this driver to get read/write support to most SPI EEPROMs,
238 after you configure the board init code to know about each eeprom
239 on your target board.
240
241 This driver can also be built as a module. If so, the module
242 will be called at25.
243
244config SPI_SPIDEV 233config SPI_SPIDEV
245 tristate "User mode SPI device driver support" 234 tristate "User mode SPI device driver support"
246 depends on EXPERIMENTAL 235 depends on EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5e9f521b8844..5d0451936d86 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
33# ... add above this line ... 33# ... add above this line ...
34 34
35# SPI protocol drivers (device/link on bus) 35# SPI protocol drivers (device/link on bus)
36obj-$(CONFIG_SPI_AT25) += at25.o
37obj-$(CONFIG_SPI_SPIDEV) += spidev.o 36obj-$(CONFIG_SPI_SPIDEV) += spidev.o
38obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o 37obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
39# ... add above this line ... 38# ... add above this line ...
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 4bbddb73abd9..f3aaba35e912 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -315,14 +315,14 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
315 return -ENODEV; 315 return -ENODEV;
316 } 316 }
317 317
318 usb_host_ck = clk_get(0, "usb_hhc_ck"); 318 usb_host_ck = clk_get(&pdev->dev, "usb_hhc_ck");
319 if (IS_ERR(usb_host_ck)) 319 if (IS_ERR(usb_host_ck))
320 return PTR_ERR(usb_host_ck); 320 return PTR_ERR(usb_host_ck);
321 321
322 if (!cpu_is_omap15xx()) 322 if (!cpu_is_omap15xx())
323 usb_dc_ck = clk_get(0, "usb_dc_ck"); 323 usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
324 else 324 else
325 usb_dc_ck = clk_get(0, "lb_ck"); 325 usb_dc_ck = clk_get(&pdev->dev, "lb_ck");
326 326
327 if (IS_ERR(usb_dc_ck)) { 327 if (IS_ERR(usb_dc_ck)) {
328 clk_put(usb_host_ck); 328 clk_put(usb_host_ck);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6372f8b17b45..c94f71980c1b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2123,6 +2123,18 @@ config FB_PRE_INIT_FB
2123 Select this option if display contents should be inherited as set by 2123 Select this option if display contents should be inherited as set by
2124 the bootloader. 2124 the bootloader.
2125 2125
2126config FB_MX3
2127 tristate "MX3 Framebuffer support"
2128 depends on FB && MX3_IPU
2129 select FB_CFB_FILLRECT
2130 select FB_CFB_COPYAREA
2131 select FB_CFB_IMAGEBLIT
2132 default y
2133 help
2134 This is a framebuffer device for the i.MX31 LCD Controller. So
2135 far only synchronous displays are supported. If you plan to use
2136 an LCD display with your i.MX31 system, say Y here.
2137
2126source "drivers/video/omap/Kconfig" 2138source "drivers/video/omap/Kconfig"
2127 2139
2128source "drivers/video/backlight/Kconfig" 2140source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index be2b657546ef..2a998ca6181d 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -132,6 +132,7 @@ obj-$(CONFIG_FB_VGA16) += vga16fb.o
132obj-$(CONFIG_FB_OF) += offb.o 132obj-$(CONFIG_FB_OF) += offb.o
133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o 133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o 134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
135obj-$(CONFIG_FB_MX3) += mx3fb.o
135 136
136# the test framebuffer is last 137# the test framebuffer is last
137obj-$(CONFIG_FB_VIRTUAL) += vfb.o 138obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 2c5567175dca..359fc64e761a 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -72,7 +72,6 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
72 snprintf(chan->adapter.name, sizeof(chan->adapter.name), 72 snprintf(chan->adapter.name, sizeof(chan->adapter.name),
73 "radeonfb %s", name); 73 "radeonfb %s", name);
74 chan->adapter.owner = THIS_MODULE; 74 chan->adapter.owner = THIS_MODULE;
75 chan->adapter.id = I2C_HW_B_RADEON;
76 chan->adapter.algo_data = &chan->algo; 75 chan->adapter.algo_data = &chan->algo;
77 chan->adapter.dev.parent = &chan->rinfo->pdev->dev; 76 chan->adapter.dev.parent = &chan->rinfo->pdev->dev;
78 chan->algo.setsda = radeon_gpio_setsda; 77 chan->algo.setsda = radeon_gpio_setsda;
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index 7787c3322ffb..9dd55e5324a1 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -90,7 +90,6 @@ static int i810_setup_i2c_bus(struct i810fb_i2c_chan *chan, const char *name)
90 chan->adapter.owner = THIS_MODULE; 90 chan->adapter.owner = THIS_MODULE;
91 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
92 chan->adapter.dev.parent = &chan->par->dev->dev; 92 chan->adapter.dev.parent = &chan->par->dev->dev;
93 chan->adapter.id = I2C_HW_B_I810;
94 chan->algo.setsda = i810i2c_setsda; 93 chan->algo.setsda = i810i2c_setsda;
95 chan->algo.setscl = i810i2c_setscl; 94 chan->algo.setscl = i810i2c_setscl;
96 chan->algo.getsda = i810i2c_getsda; 95 chan->algo.getsda = i810i2c_getsda;
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 5d896b81f4e0..b3065492bb20 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -111,7 +111,6 @@ static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo,
111 "intelfb %s", name); 111 "intelfb %s", name);
112 chan->adapter.class = class; 112 chan->adapter.class = class;
113 chan->adapter.owner = THIS_MODULE; 113 chan->adapter.owner = THIS_MODULE;
114 chan->adapter.id = I2C_HW_B_INTELFB;
115 chan->adapter.algo_data = &chan->algo; 114 chan->adapter.algo_data = &chan->algo;
116 chan->adapter.dev.parent = &chan->dinfo->pdev->dev; 115 chan->adapter.dev.parent = &chan->dinfo->pdev->dev;
117 chan->algo.setsda = intelfb_gpio_setsda; 116 chan->algo.setsda = intelfb_gpio_setsda;
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
new file mode 100644
index 000000000000..8a75d05f4334
--- /dev/null
+++ b/drivers/video/mx3fb.c
@@ -0,0 +1,1555 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/platform_device.h>
15#include <linux/sched.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/fb.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24#include <linux/dma-mapping.h>
25#include <linux/dmaengine.h>
26#include <linux/console.h>
27#include <linux/clk.h>
28#include <linux/mutex.h>
29
30#include <mach/hardware.h>
31#include <mach/ipu.h>
32#include <mach/mx3fb.h>
33
34#include <asm/io.h>
35#include <asm/uaccess.h>
36
37#define MX3FB_NAME "mx3_sdc_fb"
38
39#define MX3FB_REG_OFFSET 0xB4
40
41/* SDC Registers */
42#define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET)
43#define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET)
44#define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET)
45#define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET)
46#define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET)
47#define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET)
48#define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET)
49#define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET)
50#define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET)
51#define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET)
52#define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET)
53
54/* Register bits */
55#define SDC_COM_TFT_COLOR 0x00000001UL
56#define SDC_COM_FG_EN 0x00000010UL
57#define SDC_COM_GWSEL 0x00000020UL
58#define SDC_COM_GLB_A 0x00000040UL
59#define SDC_COM_KEY_COLOR_G 0x00000080UL
60#define SDC_COM_BG_EN 0x00000200UL
61#define SDC_COM_SHARP 0x00001000UL
62
63#define SDC_V_SYNC_WIDTH_L 0x00000001UL
64
65/* Display Interface registers */
66#define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET)
67#define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET)
68#define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET)
69#define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET)
70#define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET)
71#define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET)
72#define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET)
73#define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET)
74#define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET)
75#define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET)
76#define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET)
77#define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET)
78#define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET)
79#define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET)
80#define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET)
81#define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET)
82#define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET)
83#define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET)
84#define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET)
85#define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET)
86#define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET)
87#define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET)
88#define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET)
89#define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET)
90#define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET)
91#define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET)
92#define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET)
93#define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET)
94#define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET)
95#define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET)
96#define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET)
97#define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET)
98#define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET)
99#define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET)
100#define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET)
101#define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET)
102#define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET)
103#define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET)
104#define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET)
105
106/* DI_DISP_SIG_POL bits */
107#define DI_D3_VSYNC_POL_SHIFT 28
108#define DI_D3_HSYNC_POL_SHIFT 27
109#define DI_D3_DRDY_SHARP_POL_SHIFT 26
110#define DI_D3_CLK_POL_SHIFT 25
111#define DI_D3_DATA_POL_SHIFT 24
112
113/* DI_DISP_IF_CONF bits */
114#define DI_D3_CLK_IDLE_SHIFT 26
115#define DI_D3_CLK_SEL_SHIFT 25
116#define DI_D3_DATAMSK_SHIFT 24
117
118enum ipu_panel {
119 IPU_PANEL_SHARP_TFT,
120 IPU_PANEL_TFT,
121};
122
123struct ipu_di_signal_cfg {
124 unsigned datamask_en:1;
125 unsigned clksel_en:1;
126 unsigned clkidle_en:1;
127 unsigned data_pol:1; /* true = inverted */
128 unsigned clk_pol:1; /* true = rising edge */
129 unsigned enable_pol:1;
130 unsigned Hsync_pol:1; /* true = active high */
131 unsigned Vsync_pol:1;
132};
133
134static const struct fb_videomode mx3fb_modedb[] = {
135 {
136 /* 240x320 @ 60 Hz */
137 .name = "Sharp-QVGA",
138 .refresh = 60,
139 .xres = 240,
140 .yres = 320,
141 .pixclock = 185925,
142 .left_margin = 9,
143 .right_margin = 16,
144 .upper_margin = 7,
145 .lower_margin = 9,
146 .hsync_len = 1,
147 .vsync_len = 1,
148 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
149 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
150 FB_SYNC_CLK_IDLE_EN,
151 .vmode = FB_VMODE_NONINTERLACED,
152 .flag = 0,
153 }, {
154 /* 240x33 @ 60 Hz */
155 .name = "Sharp-CLI",
156 .refresh = 60,
157 .xres = 240,
158 .yres = 33,
159 .pixclock = 185925,
160 .left_margin = 9,
161 .right_margin = 16,
162 .upper_margin = 7,
163 .lower_margin = 9 + 287,
164 .hsync_len = 1,
165 .vsync_len = 1,
166 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
167 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
168 FB_SYNC_CLK_IDLE_EN,
169 .vmode = FB_VMODE_NONINTERLACED,
170 .flag = 0,
171 }, {
172 /* 640x480 @ 60 Hz */
173 .name = "NEC-VGA",
174 .refresh = 60,
175 .xres = 640,
176 .yres = 480,
177 .pixclock = 38255,
178 .left_margin = 144,
179 .right_margin = 0,
180 .upper_margin = 34,
181 .lower_margin = 40,
182 .hsync_len = 1,
183 .vsync_len = 1,
184 .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
185 .vmode = FB_VMODE_NONINTERLACED,
186 .flag = 0,
187 }, {
188 /* NTSC TV output */
189 .name = "TV-NTSC",
190 .refresh = 60,
191 .xres = 640,
192 .yres = 480,
193 .pixclock = 37538,
194 .left_margin = 38,
195 .right_margin = 858 - 640 - 38 - 3,
196 .upper_margin = 36,
197 .lower_margin = 518 - 480 - 36 - 1,
198 .hsync_len = 3,
199 .vsync_len = 1,
200 .sync = 0,
201 .vmode = FB_VMODE_NONINTERLACED,
202 .flag = 0,
203 }, {
204 /* PAL TV output */
205 .name = "TV-PAL",
206 .refresh = 50,
207 .xres = 640,
208 .yres = 480,
209 .pixclock = 37538,
210 .left_margin = 38,
211 .right_margin = 960 - 640 - 38 - 32,
212 .upper_margin = 32,
213 .lower_margin = 555 - 480 - 32 - 3,
214 .hsync_len = 32,
215 .vsync_len = 3,
216 .sync = 0,
217 .vmode = FB_VMODE_NONINTERLACED,
218 .flag = 0,
219 }, {
220 /* TV output VGA mode, 640x480 @ 65 Hz */
221 .name = "TV-VGA",
222 .refresh = 60,
223 .xres = 640,
224 .yres = 480,
225 .pixclock = 40574,
226 .left_margin = 35,
227 .right_margin = 45,
228 .upper_margin = 9,
229 .lower_margin = 1,
230 .hsync_len = 46,
231 .vsync_len = 5,
232 .sync = 0,
233 .vmode = FB_VMODE_NONINTERLACED,
234 .flag = 0,
235 },
236};
237
238struct mx3fb_data {
239 struct fb_info *fbi;
240 int backlight_level;
241 void __iomem *reg_base;
242 spinlock_t lock;
243 struct device *dev;
244
245 uint32_t h_start_width;
246 uint32_t v_start_width;
247};
248
249struct dma_chan_request {
250 struct mx3fb_data *mx3fb;
251 enum ipu_channel id;
252};
253
254/* MX3 specific framebuffer information. */
255struct mx3fb_info {
256 int blank;
257 enum ipu_channel ipu_ch;
258 uint32_t cur_ipu_buf;
259
260 u32 pseudo_palette[16];
261
262 struct completion flip_cmpl;
263 struct mutex mutex; /* Protects fb-ops */
264 struct mx3fb_data *mx3fb;
265 struct idmac_channel *idmac_channel;
266 struct dma_async_tx_descriptor *txd;
267 dma_cookie_t cookie;
268 struct scatterlist sg[2];
269
270 u32 sync; /* preserve var->sync flags */
271};
272
273static void mx3fb_dma_done(void *);
274
275/* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */
276static const char *fb_mode;
277static unsigned long default_bpp = 16;
278
279static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg)
280{
281 return __raw_readl(mx3fb->reg_base + reg);
282}
283
284static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg)
285{
286 __raw_writel(value, mx3fb->reg_base + reg);
287}
288
289static const uint32_t di_mappings[] = {
290 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
291 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
292 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
293 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
294};
295
296static void sdc_fb_init(struct mx3fb_info *fbi)
297{
298 struct mx3fb_data *mx3fb = fbi->mx3fb;
299 uint32_t reg;
300
301 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
302
303 mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF);
304}
305
306/* Returns enabled flag before uninit */
307static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi)
308{
309 struct mx3fb_data *mx3fb = fbi->mx3fb;
310 uint32_t reg;
311
312 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
313
314 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF);
315
316 return reg & SDC_COM_BG_EN;
317}
318
319static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
320{
321 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
322 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
323 struct dma_chan *dma_chan = &ichan->dma_chan;
324 unsigned long flags;
325 dma_cookie_t cookie;
326
327 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
328 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
329
330 /* This enables the channel */
331 if (mx3_fbi->cookie < 0) {
332 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
333 &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
334 if (!mx3_fbi->txd) {
335 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
336 dma_chan->chan_id);
337 return;
338 }
339
340 mx3_fbi->txd->callback_param = mx3_fbi->txd;
341 mx3_fbi->txd->callback = mx3fb_dma_done;
342
343 cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd);
344 dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__,
345 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
346 } else {
347 if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) {
348 dev_err(mx3fb->dev, "Cannot enable channel %d\n",
349 dma_chan->chan_id);
350 return;
351 }
352
353 /* Just re-activate the same buffer */
354 dma_async_issue_pending(dma_chan);
355 cookie = mx3_fbi->cookie;
356 dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__,
357 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
358 }
359
360 if (cookie >= 0) {
361 spin_lock_irqsave(&mx3fb->lock, flags);
362 sdc_fb_init(mx3_fbi);
363 mx3_fbi->cookie = cookie;
364 spin_unlock_irqrestore(&mx3fb->lock, flags);
365 }
366
367 /*
368 * Attention! Without this msleep the channel keeps generating
369 * interrupts. Next sdc_set_brightness() is going to be called
370 * from mx3fb_blank().
371 */
372 msleep(2);
373}
374
375static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
376{
377 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
378 uint32_t enabled;
379 unsigned long flags;
380
381 spin_lock_irqsave(&mx3fb->lock, flags);
382
383 enabled = sdc_fb_uninit(mx3_fbi);
384
385 spin_unlock_irqrestore(&mx3fb->lock, flags);
386
387 mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
388 mx3_fbi->txd = NULL;
389 mx3_fbi->cookie = -EINVAL;
390}
391
392/**
393 * sdc_set_window_pos() - set window position of the respective plane.
394 * @mx3fb: mx3fb context.
395 * @channel: IPU DMAC channel ID.
396 * @x_pos: X coordinate relative to the top left corner to place window at.
397 * @y_pos: Y coordinate relative to the top left corner to place window at.
398 * @return: 0 on success or negative error code on failure.
399 */
400static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel,
401 int16_t x_pos, int16_t y_pos)
402{
403 x_pos += mx3fb->h_start_width;
404 y_pos += mx3fb->v_start_width;
405
406 if (channel != IDMAC_SDC_0)
407 return -EINVAL;
408
409 mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS);
410 return 0;
411}
412
413/**
414 * sdc_init_panel() - initialize a synchronous LCD panel.
415 * @mx3fb: mx3fb context.
416 * @panel: panel type.
417 * @pixel_clk: desired pixel clock frequency in Hz.
418 * @width: width of panel in pixels.
419 * @height: height of panel in pixels.
420 * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
421 * @h_start_width: number of pixel clocks between the HSYNC signal pulse
422 * and the start of valid data.
423 * @h_sync_width: width of the HSYNC signal in units of pixel clocks.
424 * @h_end_width: number of pixel clocks between the end of valid data
425 * and the HSYNC signal for next line.
426 * @v_start_width: number of lines between the VSYNC signal pulse and the
427 * start of valid data.
428 * @v_sync_width: width of the VSYNC signal in units of lines
429 * @v_end_width: number of lines between the end of valid data and the
430 * VSYNC signal for next frame.
431 * @sig: bitfield of signal polarities for LCD interface.
432 * @return: 0 on success or negative error code on failure.
433 */
434static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
435 uint32_t pixel_clk,
436 uint16_t width, uint16_t height,
437 enum pixel_fmt pixel_fmt,
438 uint16_t h_start_width, uint16_t h_sync_width,
439 uint16_t h_end_width, uint16_t v_start_width,
440 uint16_t v_sync_width, uint16_t v_end_width,
441 struct ipu_di_signal_cfg sig)
442{
443 unsigned long lock_flags;
444 uint32_t reg;
445 uint32_t old_conf;
446 uint32_t div;
447 struct clk *ipu_clk;
448
449 dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
450
451 if (v_sync_width == 0 || h_sync_width == 0)
452 return -EINVAL;
453
454 /* Init panel size and blanking periods */
455 reg = ((uint32_t) (h_sync_width - 1) << 26) |
456 ((uint32_t) (width + h_start_width + h_end_width - 1) << 16);
457 mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF);
458
459#ifdef DEBUG
460 printk(KERN_CONT " hor_conf %x,", reg);
461#endif
462
463 reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L |
464 ((uint32_t) (height + v_start_width + v_end_width - 1) << 16);
465 mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF);
466
467#ifdef DEBUG
468 printk(KERN_CONT " ver_conf %x\n", reg);
469#endif
470
471 mx3fb->h_start_width = h_start_width;
472 mx3fb->v_start_width = v_start_width;
473
474 switch (panel) {
475 case IPU_PANEL_SHARP_TFT:
476 mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1);
477 mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2);
478 mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF);
479 break;
480 case IPU_PANEL_TFT:
481 mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF);
482 break;
483 default:
484 return -EINVAL;
485 }
486
487 /* Init clocking */
488
489 /*
490 * Calculate divider: fractional part is 4 bits so simply multiple by
491 * 24 to get fractional part, as long as we stay under ~250MHz and on
492 * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz
493 */
494 dev_dbg(mx3fb->dev, "pixel clk = %d\n", pixel_clk);
495
496 ipu_clk = clk_get(mx3fb->dev, "ipu_clk");
497 div = clk_get_rate(ipu_clk) * 16 / pixel_clk;
498 clk_put(ipu_clk);
499
500 if (div < 0x40) { /* Divider less than 4 */
501 dev_dbg(mx3fb->dev,
502 "InitPanel() - Pixel clock divider less than 4\n");
503 div = 0x40;
504 }
505
506 spin_lock_irqsave(&mx3fb->lock, lock_flags);
507
508 /*
509 * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits
510 * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing
511 * debug. DISP3_IF_CLK_UP_WR is 0
512 */
513 mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF);
514
515 /* DI settings */
516 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
517 old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
518 sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
519 sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
520 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
521
522 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
523 old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
524 sig.clk_pol << DI_D3_CLK_POL_SHIFT |
525 sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
526 sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
527 sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
528 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
529
530 switch (pixel_fmt) {
531 case IPU_PIX_FMT_RGB24:
532 mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
533 mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
534 mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
535 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
536 ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
537 break;
538 case IPU_PIX_FMT_RGB666:
539 mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
540 mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
541 mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
542 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
543 ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
544 break;
545 case IPU_PIX_FMT_BGR666:
546 mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
547 mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
548 mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
549 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
550 ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
551 break;
552 default:
553 mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
554 mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
555 mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
556 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
557 ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
558 break;
559 }
560
561 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
562
563 dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n",
564 mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF));
565 dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n",
566 mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL));
567 dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n",
568 mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF));
569
570 return 0;
571}
572
573/**
574 * sdc_set_color_key() - set the transparent color key for SDC graphic plane.
575 * @mx3fb: mx3fb context.
576 * @channel: IPU DMAC channel ID.
577 * @enable: boolean to enable or disable color keyl.
578 * @color_key: 24-bit RGB color to use as transparent color key.
579 * @return: 0 on success or negative error code on failure.
580 */
581static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel,
582 bool enable, uint32_t color_key)
583{
584 uint32_t reg, sdc_conf;
585 unsigned long lock_flags;
586
587 spin_lock_irqsave(&mx3fb->lock, lock_flags);
588
589 sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
590 if (channel == IDMAC_SDC_0)
591 sdc_conf &= ~SDC_COM_GWSEL;
592 else
593 sdc_conf |= SDC_COM_GWSEL;
594
595 if (enable) {
596 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L;
597 mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL),
598 SDC_GW_CTRL);
599
600 sdc_conf |= SDC_COM_KEY_COLOR_G;
601 } else {
602 sdc_conf &= ~SDC_COM_KEY_COLOR_G;
603 }
604 mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF);
605
606 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
607
608 return 0;
609}
610
611/**
612 * sdc_set_global_alpha() - set global alpha blending modes.
613 * @mx3fb: mx3fb context.
614 * @enable: boolean to enable or disable global alpha blending. If disabled,
615 * per pixel blending is used.
616 * @alpha: global alpha value.
617 * @return: 0 on success or negative error code on failure.
618 */
619static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha)
620{
621 uint32_t reg;
622 unsigned long lock_flags;
623
624 spin_lock_irqsave(&mx3fb->lock, lock_flags);
625
626 if (enable) {
627 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL;
628 mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL);
629
630 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
631 mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF);
632 } else {
633 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
634 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF);
635 }
636
637 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
638
639 return 0;
640}
641
642static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
643{
644 /* This might be board-specific */
645 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
646 return;
647}
648
649static uint32_t bpp_to_pixfmt(int bpp)
650{
651 uint32_t pixfmt = 0;
652 switch (bpp) {
653 case 24:
654 pixfmt = IPU_PIX_FMT_BGR24;
655 break;
656 case 32:
657 pixfmt = IPU_PIX_FMT_BGR32;
658 break;
659 case 16:
660 pixfmt = IPU_PIX_FMT_RGB565;
661 break;
662 }
663 return pixfmt;
664}
665
666static int mx3fb_blank(int blank, struct fb_info *fbi);
667static int mx3fb_map_video_memory(struct fb_info *fbi);
668static int mx3fb_unmap_video_memory(struct fb_info *fbi);
669
670/**
671 * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
672 * @info: framebuffer information pointer
673 * @return: 0 on success or negative error code on failure.
674 */
675static int mx3fb_set_fix(struct fb_info *fbi)
676{
677 struct fb_fix_screeninfo *fix = &fbi->fix;
678 struct fb_var_screeninfo *var = &fbi->var;
679
680 strncpy(fix->id, "DISP3 BG", 8);
681
682 fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
683
684 fix->type = FB_TYPE_PACKED_PIXELS;
685 fix->accel = FB_ACCEL_NONE;
686 fix->visual = FB_VISUAL_TRUECOLOR;
687 fix->xpanstep = 1;
688 fix->ypanstep = 1;
689
690 return 0;
691}
692
693static void mx3fb_dma_done(void *arg)
694{
695 struct idmac_tx_desc *tx_desc = to_tx_desc(arg);
696 struct dma_chan *chan = tx_desc->txd.chan;
697 struct idmac_channel *ichannel = to_idmac_chan(chan);
698 struct mx3fb_data *mx3fb = ichannel->client;
699 struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
700
701 dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
702
703 /* We only need one interrupt, it will be re-enabled as needed */
704 disable_irq(ichannel->eof_irq);
705
706 complete(&mx3_fbi->flip_cmpl);
707}
708
709/**
710 * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
711 * @fbi: framebuffer information pointer.
712 * @return: 0 on success or negative error code on failure.
713 */
714static int mx3fb_set_par(struct fb_info *fbi)
715{
716 u32 mem_len;
717 struct ipu_di_signal_cfg sig_cfg;
718 enum ipu_panel mode = IPU_PANEL_TFT;
719 struct mx3fb_info *mx3_fbi = fbi->par;
720 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
721 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
722 struct idmac_video_param *video = &ichan->params.video;
723 struct scatterlist *sg = mx3_fbi->sg;
724 size_t screen_size;
725
726 dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
727
728 mutex_lock(&mx3_fbi->mutex);
729
730 /* Total cleanup */
731 if (mx3_fbi->txd)
732 sdc_disable_channel(mx3_fbi);
733
734 mx3fb_set_fix(fbi);
735
736 mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
737 if (mem_len > fbi->fix.smem_len) {
738 if (fbi->fix.smem_start)
739 mx3fb_unmap_video_memory(fbi);
740
741 fbi->fix.smem_len = mem_len;
742 if (mx3fb_map_video_memory(fbi) < 0) {
743 mutex_unlock(&mx3_fbi->mutex);
744 return -ENOMEM;
745 }
746 }
747
748 screen_size = fbi->fix.line_length * fbi->var.yres;
749
750 sg_init_table(&sg[0], 1);
751 sg_init_table(&sg[1], 1);
752
753 sg_dma_address(&sg[0]) = fbi->fix.smem_start;
754 sg_set_page(&sg[0], virt_to_page(fbi->screen_base),
755 fbi->fix.smem_len,
756 offset_in_page(fbi->screen_base));
757
758 if (mx3_fbi->ipu_ch == IDMAC_SDC_0) {
759 memset(&sig_cfg, 0, sizeof(sig_cfg));
760 if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
761 sig_cfg.Hsync_pol = true;
762 if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
763 sig_cfg.Vsync_pol = true;
764 if (fbi->var.sync & FB_SYNC_CLK_INVERT)
765 sig_cfg.clk_pol = true;
766 if (fbi->var.sync & FB_SYNC_DATA_INVERT)
767 sig_cfg.data_pol = true;
768 if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH)
769 sig_cfg.enable_pol = true;
770 if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
771 sig_cfg.clkidle_en = true;
772 if (fbi->var.sync & FB_SYNC_CLK_SEL_EN)
773 sig_cfg.clksel_en = true;
774 if (fbi->var.sync & FB_SYNC_SHARP_MODE)
775 mode = IPU_PANEL_SHARP_TFT;
776
777 dev_dbg(fbi->device, "pixclock = %ul Hz\n",
778 (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
779
780 if (sdc_init_panel(mx3fb, mode,
781 (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
782 fbi->var.xres, fbi->var.yres,
783 (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
784 IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
785 fbi->var.left_margin,
786 fbi->var.hsync_len,
787 fbi->var.right_margin +
788 fbi->var.hsync_len,
789 fbi->var.upper_margin,
790 fbi->var.vsync_len,
791 fbi->var.lower_margin +
792 fbi->var.vsync_len, sig_cfg) != 0) {
793 mutex_unlock(&mx3_fbi->mutex);
794 dev_err(fbi->device,
795 "mx3fb: Error initializing panel.\n");
796 return -EINVAL;
797 }
798 }
799
800 sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0);
801
802 mx3_fbi->cur_ipu_buf = 0;
803
804 video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel);
805 video->out_width = fbi->var.xres;
806 video->out_height = fbi->var.yres;
807 video->out_stride = fbi->var.xres_virtual;
808
809 if (mx3_fbi->blank == FB_BLANK_UNBLANK)
810 sdc_enable_channel(mx3_fbi);
811
812 mutex_unlock(&mx3_fbi->mutex);
813
814 return 0;
815}
816
817/**
818 * mx3fb_check_var() - check and adjust framebuffer variable parameters.
819 * @var: framebuffer variable parameters
820 * @fbi: framebuffer information pointer
821 */
822static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
823{
824 struct mx3fb_info *mx3_fbi = fbi->par;
825 u32 vtotal;
826 u32 htotal;
827
828 dev_dbg(fbi->device, "%s\n", __func__);
829
830 if (var->xres_virtual < var->xres)
831 var->xres_virtual = var->xres;
832 if (var->yres_virtual < var->yres)
833 var->yres_virtual = var->yres;
834
835 if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
836 (var->bits_per_pixel != 16))
837 var->bits_per_pixel = default_bpp;
838
839 switch (var->bits_per_pixel) {
840 case 16:
841 var->red.length = 5;
842 var->red.offset = 11;
843 var->red.msb_right = 0;
844
845 var->green.length = 6;
846 var->green.offset = 5;
847 var->green.msb_right = 0;
848
849 var->blue.length = 5;
850 var->blue.offset = 0;
851 var->blue.msb_right = 0;
852
853 var->transp.length = 0;
854 var->transp.offset = 0;
855 var->transp.msb_right = 0;
856 break;
857 case 24:
858 var->red.length = 8;
859 var->red.offset = 16;
860 var->red.msb_right = 0;
861
862 var->green.length = 8;
863 var->green.offset = 8;
864 var->green.msb_right = 0;
865
866 var->blue.length = 8;
867 var->blue.offset = 0;
868 var->blue.msb_right = 0;
869
870 var->transp.length = 0;
871 var->transp.offset = 0;
872 var->transp.msb_right = 0;
873 break;
874 case 32:
875 var->red.length = 8;
876 var->red.offset = 16;
877 var->red.msb_right = 0;
878
879 var->green.length = 8;
880 var->green.offset = 8;
881 var->green.msb_right = 0;
882
883 var->blue.length = 8;
884 var->blue.offset = 0;
885 var->blue.msb_right = 0;
886
887 var->transp.length = 8;
888 var->transp.offset = 24;
889 var->transp.msb_right = 0;
890 break;
891 }
892
893 if (var->pixclock < 1000) {
894 htotal = var->xres + var->right_margin + var->hsync_len +
895 var->left_margin;
896 vtotal = var->yres + var->lower_margin + var->vsync_len +
897 var->upper_margin;
898 var->pixclock = (vtotal * htotal * 6UL) / 100UL;
899 var->pixclock = KHZ2PICOS(var->pixclock);
900 dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n",
901 var->pixclock);
902 }
903
904 var->height = -1;
905 var->width = -1;
906 var->grayscale = 0;
907
908 /* Preserve sync flags */
909 var->sync |= mx3_fbi->sync;
910 mx3_fbi->sync |= var->sync;
911
912 return 0;
913}
914
915static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf)
916{
917 chan &= 0xffff;
918 chan >>= 16 - bf->length;
919 return chan << bf->offset;
920}
921
922static int mx3fb_setcolreg(unsigned int regno, unsigned int red,
923 unsigned int green, unsigned int blue,
924 unsigned int trans, struct fb_info *fbi)
925{
926 struct mx3fb_info *mx3_fbi = fbi->par;
927 u32 val;
928 int ret = 1;
929
930 dev_dbg(fbi->device, "%s\n", __func__);
931
932 mutex_lock(&mx3_fbi->mutex);
933 /*
934 * If greyscale is true, then we convert the RGB value
935 * to greyscale no matter what visual we are using.
936 */
937 if (fbi->var.grayscale)
938 red = green = blue = (19595 * red + 38470 * green +
939 7471 * blue) >> 16;
940 switch (fbi->fix.visual) {
941 case FB_VISUAL_TRUECOLOR:
942 /*
943 * 16-bit True Colour. We encode the RGB value
944 * according to the RGB bitfield information.
945 */
946 if (regno < 16) {
947 u32 *pal = fbi->pseudo_palette;
948
949 val = chan_to_field(red, &fbi->var.red);
950 val |= chan_to_field(green, &fbi->var.green);
951 val |= chan_to_field(blue, &fbi->var.blue);
952
953 pal[regno] = val;
954
955 ret = 0;
956 }
957 break;
958
959 case FB_VISUAL_STATIC_PSEUDOCOLOR:
960 case FB_VISUAL_PSEUDOCOLOR:
961 break;
962 }
963 mutex_unlock(&mx3_fbi->mutex);
964
965 return ret;
966}
967
968/**
969 * mx3fb_blank() - blank the display.
970 */
971static int mx3fb_blank(int blank, struct fb_info *fbi)
972{
973 struct mx3fb_info *mx3_fbi = fbi->par;
974 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
975
976 dev_dbg(fbi->device, "%s\n", __func__);
977
978 dev_dbg(fbi->device, "blank = %d\n", blank);
979
980 if (mx3_fbi->blank == blank)
981 return 0;
982
983 mutex_lock(&mx3_fbi->mutex);
984 mx3_fbi->blank = blank;
985
986 switch (blank) {
987 case FB_BLANK_POWERDOWN:
988 case FB_BLANK_VSYNC_SUSPEND:
989 case FB_BLANK_HSYNC_SUSPEND:
990 case FB_BLANK_NORMAL:
991 sdc_disable_channel(mx3_fbi);
992 sdc_set_brightness(mx3fb, 0);
993 break;
994 case FB_BLANK_UNBLANK:
995 sdc_enable_channel(mx3_fbi);
996 sdc_set_brightness(mx3fb, mx3fb->backlight_level);
997 break;
998 }
999 mutex_unlock(&mx3_fbi->mutex);
1000
1001 return 0;
1002}
1003
1004/**
1005 * mx3fb_pan_display() - pan or wrap the display
1006 * @var: variable screen buffer information.
1007 * @info: framebuffer information pointer.
1008 *
1009 * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
1010 */
1011static int mx3fb_pan_display(struct fb_var_screeninfo *var,
1012 struct fb_info *fbi)
1013{
1014 struct mx3fb_info *mx3_fbi = fbi->par;
1015 u32 y_bottom;
1016 unsigned long base;
1017 off_t offset;
1018 dma_cookie_t cookie;
1019 struct scatterlist *sg = mx3_fbi->sg;
1020 struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan;
1021 struct dma_async_tx_descriptor *txd;
1022 int ret;
1023
1024 dev_dbg(fbi->device, "%s [%c]\n", __func__,
1025 list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+');
1026
1027 if (var->xoffset > 0) {
1028 dev_dbg(fbi->device, "x panning not supported\n");
1029 return -EINVAL;
1030 }
1031
1032 if (fbi->var.xoffset == var->xoffset &&
1033 fbi->var.yoffset == var->yoffset)
1034 return 0; /* No change, do nothing */
1035
1036 y_bottom = var->yoffset;
1037
1038 if (!(var->vmode & FB_VMODE_YWRAP))
1039 y_bottom += var->yres;
1040
1041 if (y_bottom > fbi->var.yres_virtual)
1042 return -EINVAL;
1043
1044 mutex_lock(&mx3_fbi->mutex);
1045
1046 offset = (var->yoffset * var->xres_virtual + var->xoffset) *
1047 (var->bits_per_pixel / 8);
1048 base = fbi->fix.smem_start + offset;
1049
1050 dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n",
1051 mx3_fbi->cur_ipu_buf, base);
1052
1053 /*
1054 * We enable the End of Frame interrupt, which will free a tx-descriptor,
1055 * which we will need for the next device_prep_slave_sg(). The
1056 * IRQ-handler will disable the IRQ again.
1057 */
1058 init_completion(&mx3_fbi->flip_cmpl);
1059 enable_irq(mx3_fbi->idmac_channel->eof_irq);
1060
1061 ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10);
1062 if (ret <= 0) {
1063 mutex_unlock(&mx3_fbi->mutex);
1064 dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ?
1065 "user interrupt" : "timeout");
1066 return ret ? : -ETIMEDOUT;
1067 }
1068
1069 mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf;
1070
1071 sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base;
1072 sg_set_page(&sg[mx3_fbi->cur_ipu_buf],
1073 virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len,
1074 offset_in_page(fbi->screen_base + offset));
1075
1076 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
1077 mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
1078 if (!txd) {
1079 dev_err(fbi->device,
1080 "Error preparing a DMA transaction descriptor.\n");
1081 mutex_unlock(&mx3_fbi->mutex);
1082 return -EIO;
1083 }
1084
1085 txd->callback_param = txd;
1086 txd->callback = mx3fb_dma_done;
1087
1088 /*
1089 * Emulate original mx3fb behaviour: each new call to idmac_tx_submit()
1090 * should switch to another buffer
1091 */
1092 cookie = txd->tx_submit(txd);
1093 dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie);
1094 if (cookie < 0) {
1095 dev_err(fbi->device,
1096 "Error updating SDC buf %d to address=0x%08lX\n",
1097 mx3_fbi->cur_ipu_buf, base);
1098 mutex_unlock(&mx3_fbi->mutex);
1099 return -EIO;
1100 }
1101
1102 if (mx3_fbi->txd)
1103 async_tx_ack(mx3_fbi->txd);
1104 mx3_fbi->txd = txd;
1105
1106 fbi->var.xoffset = var->xoffset;
1107 fbi->var.yoffset = var->yoffset;
1108
1109 if (var->vmode & FB_VMODE_YWRAP)
1110 fbi->var.vmode |= FB_VMODE_YWRAP;
1111 else
1112 fbi->var.vmode &= ~FB_VMODE_YWRAP;
1113
1114 mutex_unlock(&mx3_fbi->mutex);
1115
1116 dev_dbg(fbi->device, "Update complete\n");
1117
1118 return 0;
1119}
1120
1121/*
1122 * This structure contains the pointers to the control functions that are
1123 * invoked by the core framebuffer driver to perform operations like
1124 * blitting, rectangle filling, copy regions and cursor definition.
1125 */
1126static struct fb_ops mx3fb_ops = {
1127 .owner = THIS_MODULE,
1128 .fb_set_par = mx3fb_set_par,
1129 .fb_check_var = mx3fb_check_var,
1130 .fb_setcolreg = mx3fb_setcolreg,
1131 .fb_pan_display = mx3fb_pan_display,
1132 .fb_fillrect = cfb_fillrect,
1133 .fb_copyarea = cfb_copyarea,
1134 .fb_imageblit = cfb_imageblit,
1135 .fb_blank = mx3fb_blank,
1136};
1137
1138#ifdef CONFIG_PM
1139/*
1140 * Power management hooks. Note that we won't be called from IRQ context,
1141 * unlike the blank functions above, so we may sleep.
1142 */
1143
1144/*
1145 * Suspends the framebuffer and blanks the screen. Power management support
1146 */
1147static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
1148{
1149 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1150 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1151
1152 acquire_console_sem();
1153 fb_set_suspend(drv_data->fbi, 1);
1154 release_console_sem();
1155
1156 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1157 sdc_disable_channel(mx3_fbi);
1158 sdc_set_brightness(mx3fb, 0);
1159
1160 }
1161 return 0;
1162}
1163
1164/*
1165 * Resumes the framebuffer and unblanks the screen. Power management support
1166 */
1167static int mx3fb_resume(struct platform_device *pdev)
1168{
1169 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1170 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1171
1172 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1173 sdc_enable_channel(mx3_fbi);
1174 sdc_set_brightness(mx3fb, drv_data->backlight_level);
1175 }
1176
1177 acquire_console_sem();
1178 fb_set_suspend(drv_data->fbi, 0);
1179 release_console_sem();
1180
1181 return 0;
1182}
1183#else
1184#define mx3fb_suspend NULL
1185#define mx3fb_resume NULL
1186#endif
1187
1188/*
1189 * Main framebuffer functions
1190 */
1191
1192/**
1193 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
1194 * @fbi: framebuffer information pointer
1195 * @return: Error code indicating success or failure
1196 *
1197 * This buffer is remapped into a non-cached, non-buffered, memory region to
1198 * allow palette and pixel writes to occur without flushing the cache. Once this
1199 * area is remapped, all virtual memory access to the video memory should occur
1200 * at the new region.
1201 */
1202static int mx3fb_map_video_memory(struct fb_info *fbi)
1203{
1204 int retval = 0;
1205 dma_addr_t addr;
1206
1207 fbi->screen_base = dma_alloc_writecombine(fbi->device,
1208 fbi->fix.smem_len,
1209 &addr, GFP_DMA);
1210
1211 if (!fbi->screen_base) {
1212 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
1213 fbi->fix.smem_len);
1214 retval = -EBUSY;
1215 goto err0;
1216 }
1217
1218 fbi->fix.smem_start = addr;
1219
1220 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
1221 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
1222
1223 fbi->screen_size = fbi->fix.smem_len;
1224
1225 /* Clear the screen */
1226 memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
1227
1228 return 0;
1229
1230err0:
1231 fbi->fix.smem_len = 0;
1232 fbi->fix.smem_start = 0;
1233 fbi->screen_base = NULL;
1234 return retval;
1235}
1236
1237/**
1238 * mx3fb_unmap_video_memory() - de-allocate frame buffer memory.
1239 * @fbi: framebuffer information pointer
1240 * @return: error code indicating success or failure
1241 */
1242static int mx3fb_unmap_video_memory(struct fb_info *fbi)
1243{
1244 dma_free_writecombine(fbi->device, fbi->fix.smem_len,
1245 fbi->screen_base, fbi->fix.smem_start);
1246
1247 fbi->screen_base = 0;
1248 fbi->fix.smem_start = 0;
1249 fbi->fix.smem_len = 0;
1250 return 0;
1251}
1252
1253/**
1254 * mx3fb_init_fbinfo() - initialize framebuffer information object.
1255 * @return: initialized framebuffer structure.
1256 */
1257static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
1258{
1259 struct fb_info *fbi;
1260 struct mx3fb_info *mx3fbi;
1261 int ret;
1262
1263 /* Allocate sufficient memory for the fb structure */
1264 fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev);
1265 if (!fbi)
1266 return NULL;
1267
1268 mx3fbi = fbi->par;
1269 mx3fbi->cookie = -EINVAL;
1270 mx3fbi->cur_ipu_buf = 0;
1271
1272 fbi->var.activate = FB_ACTIVATE_NOW;
1273
1274 fbi->fbops = ops;
1275 fbi->flags = FBINFO_FLAG_DEFAULT;
1276 fbi->pseudo_palette = mx3fbi->pseudo_palette;
1277
1278 mutex_init(&mx3fbi->mutex);
1279
1280 /* Allocate colormap */
1281 ret = fb_alloc_cmap(&fbi->cmap, 16, 0);
1282 if (ret < 0) {
1283 framebuffer_release(fbi);
1284 return NULL;
1285 }
1286
1287 return fbi;
1288}
1289
1290static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1291{
1292 struct device *dev = mx3fb->dev;
1293 struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
1294 const char *name = mx3fb_pdata->name;
1295 unsigned int irq;
1296 struct fb_info *fbi;
1297 struct mx3fb_info *mx3fbi;
1298 const struct fb_videomode *mode;
1299 int ret, num_modes;
1300
1301 ichan->client = mx3fb;
1302 irq = ichan->eof_irq;
1303
1304 if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
1305 return -EINVAL;
1306
1307 fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops);
1308 if (!fbi)
1309 return -ENOMEM;
1310
1311 if (!fb_mode)
1312 fb_mode = name;
1313
1314 if (!fb_mode) {
1315 ret = -EINVAL;
1316 goto emode;
1317 }
1318
1319 if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) {
1320 mode = mx3fb_pdata->mode;
1321 num_modes = mx3fb_pdata->num_modes;
1322 } else {
1323 mode = mx3fb_modedb;
1324 num_modes = ARRAY_SIZE(mx3fb_modedb);
1325 }
1326
1327 if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode,
1328 num_modes, NULL, default_bpp)) {
1329 ret = -EBUSY;
1330 goto emode;
1331 }
1332
1333 fb_videomode_to_modelist(mode, num_modes, &fbi->modelist);
1334
1335 /* Default Y virtual size is 2x panel size */
1336 fbi->var.yres_virtual = fbi->var.yres * 2;
1337
1338 mx3fb->fbi = fbi;
1339
1340 /* set Display Interface clock period */
1341 mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER);
1342 /* Might need to trigger HSP clock change - see 44.3.3.8.5 */
1343
1344 sdc_set_brightness(mx3fb, 255);
1345 sdc_set_global_alpha(mx3fb, true, 0xFF);
1346 sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0);
1347
1348 mx3fbi = fbi->par;
1349 mx3fbi->idmac_channel = ichan;
1350 mx3fbi->ipu_ch = ichan->dma_chan.chan_id;
1351 mx3fbi->mx3fb = mx3fb;
1352 mx3fbi->blank = FB_BLANK_NORMAL;
1353
1354 init_completion(&mx3fbi->flip_cmpl);
1355 disable_irq(ichan->eof_irq);
1356 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
1357 ret = mx3fb_set_par(fbi);
1358 if (ret < 0)
1359 goto esetpar;
1360
1361 mx3fb_blank(FB_BLANK_UNBLANK, fbi);
1362
1363 dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode);
1364
1365 ret = register_framebuffer(fbi);
1366 if (ret < 0)
1367 goto erfb;
1368
1369 return 0;
1370
1371erfb:
1372esetpar:
1373emode:
1374 fb_dealloc_cmap(&fbi->cmap);
1375 framebuffer_release(fbi);
1376
1377 return ret;
1378}
1379
1380static bool chan_filter(struct dma_chan *chan, void *arg)
1381{
1382 struct dma_chan_request *rq = arg;
1383 struct device *dev;
1384 struct mx3fb_platform_data *mx3fb_pdata;
1385
1386 if (!rq)
1387 return false;
1388
1389 dev = rq->mx3fb->dev;
1390 mx3fb_pdata = dev->platform_data;
1391
1392 return rq->id == chan->chan_id &&
1393 mx3fb_pdata->dma_dev == chan->device->dev;
1394}
1395
1396static void release_fbi(struct fb_info *fbi)
1397{
1398 mx3fb_unmap_video_memory(fbi);
1399
1400 fb_dealloc_cmap(&fbi->cmap);
1401
1402 unregister_framebuffer(fbi);
1403 framebuffer_release(fbi);
1404}
1405
1406static int mx3fb_probe(struct platform_device *pdev)
1407{
1408 struct device *dev = &pdev->dev;
1409 int ret;
1410 struct resource *sdc_reg;
1411 struct mx3fb_data *mx3fb;
1412 dma_cap_mask_t mask;
1413 struct dma_chan *chan;
1414 struct dma_chan_request rq;
1415
1416 /*
1417 * Display Interface (DI) and Synchronous Display Controller (SDC)
1418 * registers
1419 */
1420 sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1421 if (!sdc_reg)
1422 return -EINVAL;
1423
1424 mx3fb = kzalloc(sizeof(*mx3fb), GFP_KERNEL);
1425 if (!mx3fb)
1426 return -ENOMEM;
1427
1428 spin_lock_init(&mx3fb->lock);
1429
1430 mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg));
1431 if (!mx3fb->reg_base) {
1432 ret = -ENOMEM;
1433 goto eremap;
1434 }
1435
1436 pr_debug("Remapped %x to %x at %p\n", sdc_reg->start, sdc_reg->end,
1437 mx3fb->reg_base);
1438
1439 /* IDMAC interface */
1440 dmaengine_get();
1441
1442 mx3fb->dev = dev;
1443 platform_set_drvdata(pdev, mx3fb);
1444
1445 rq.mx3fb = mx3fb;
1446
1447 dma_cap_zero(mask);
1448 dma_cap_set(DMA_SLAVE, mask);
1449 dma_cap_set(DMA_PRIVATE, mask);
1450 rq.id = IDMAC_SDC_0;
1451 chan = dma_request_channel(mask, chan_filter, &rq);
1452 if (!chan) {
1453 ret = -EBUSY;
1454 goto ersdc0;
1455 }
1456
1457 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
1458 if (ret < 0)
1459 goto eisdc0;
1460
1461 mx3fb->backlight_level = 255;
1462
1463 return 0;
1464
1465eisdc0:
1466 dma_release_channel(chan);
1467ersdc0:
1468 dmaengine_put();
1469 iounmap(mx3fb->reg_base);
1470eremap:
1471 kfree(mx3fb);
1472 dev_err(dev, "mx3fb: failed to register fb\n");
1473 return ret;
1474}
1475
1476static int mx3fb_remove(struct platform_device *dev)
1477{
1478 struct mx3fb_data *mx3fb = platform_get_drvdata(dev);
1479 struct fb_info *fbi = mx3fb->fbi;
1480 struct mx3fb_info *mx3_fbi = fbi->par;
1481 struct dma_chan *chan;
1482
1483 chan = &mx3_fbi->idmac_channel->dma_chan;
1484 release_fbi(fbi);
1485
1486 dma_release_channel(chan);
1487 dmaengine_put();
1488
1489 iounmap(mx3fb->reg_base);
1490 kfree(mx3fb);
1491 return 0;
1492}
1493
1494static struct platform_driver mx3fb_driver = {
1495 .driver = {
1496 .name = MX3FB_NAME,
1497 },
1498 .probe = mx3fb_probe,
1499 .remove = mx3fb_remove,
1500 .suspend = mx3fb_suspend,
1501 .resume = mx3fb_resume,
1502};
1503
1504/*
1505 * Parse user specified options (`video=mx3fb:')
1506 * example:
1507 * video=mx3fb:bpp=16
1508 */
1509static int mx3fb_setup(void)
1510{
1511#ifndef MODULE
1512 char *opt, *options = NULL;
1513
1514 if (fb_get_options("mx3fb", &options))
1515 return -ENODEV;
1516
1517 if (!options || !*options)
1518 return 0;
1519
1520 while ((opt = strsep(&options, ",")) != NULL) {
1521 if (!*opt)
1522 continue;
1523 if (!strncmp(opt, "bpp=", 4))
1524 default_bpp = simple_strtoul(opt + 4, NULL, 0);
1525 else
1526 fb_mode = opt;
1527 }
1528#endif
1529
1530 return 0;
1531}
1532
1533static int __init mx3fb_init(void)
1534{
1535 int ret = mx3fb_setup();
1536
1537 if (ret < 0)
1538 return ret;
1539
1540 ret = platform_driver_register(&mx3fb_driver);
1541 return ret;
1542}
1543
1544static void __exit mx3fb_exit(void)
1545{
1546 platform_driver_unregister(&mx3fb_driver);
1547}
1548
1549module_init(mx3fb_init);
1550module_exit(mx3fb_exit);
1551
1552MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1553MODULE_DESCRIPTION("MX3 framebuffer driver");
1554MODULE_ALIAS("platform:" MX3FB_NAME);
1555MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 6fd7cb8f9b8e..6aaddb4f6788 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -87,7 +87,6 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
87 87
88 strcpy(chan->adapter.name, name); 88 strcpy(chan->adapter.name, name);
89 chan->adapter.owner = THIS_MODULE; 89 chan->adapter.owner = THIS_MODULE;
90 chan->adapter.id = I2C_HW_B_NVIDIA;
91 chan->adapter.class = i2c_class; 90 chan->adapter.class = i2c_class;
92 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
93 chan->adapter.dev.parent = &chan->par->pci_dev->dev; 92 chan->adapter.dev.parent = &chan->par->pci_dev->dev;
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 6e2ea7518761..ab3949256677 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -800,14 +800,14 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
800 /* FIXME: 800 /* FIXME:
801 * According to errata some platforms have a clock rate limitiation 801 * According to errata some platforms have a clock rate limitiation
802 */ 802 */
803 lcdc.lcd_ck = clk_get(NULL, "lcd_ck"); 803 lcdc.lcd_ck = clk_get(fbdev->dev, "lcd_ck");
804 if (IS_ERR(lcdc.lcd_ck)) { 804 if (IS_ERR(lcdc.lcd_ck)) {
805 dev_err(fbdev->dev, "unable to access LCD clock\n"); 805 dev_err(fbdev->dev, "unable to access LCD clock\n");
806 r = PTR_ERR(lcdc.lcd_ck); 806 r = PTR_ERR(lcdc.lcd_ck);
807 goto fail0; 807 goto fail0;
808 } 808 }
809 809
810 tc_ck = clk_get(NULL, "tc_ck"); 810 tc_ck = clk_get(fbdev->dev, "tc_ck");
811 if (IS_ERR(tc_ck)) { 811 if (IS_ERR(tc_ck)) {
812 dev_err(fbdev->dev, "unable to access TC clock\n"); 812 dev_err(fbdev->dev, "unable to access TC clock\n");
813 r = PTR_ERR(tc_ck); 813 r = PTR_ERR(tc_ck);
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 783d4adffb93..574b29e9f8f2 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -137,7 +137,6 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
137 if (chan->par) { 137 if (chan->par) {
138 strcpy(chan->adapter.name, name); 138 strcpy(chan->adapter.name, name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140 chan->adapter.id = I2C_HW_B_SAVAGE;
141 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
142 chan->adapter.dev.parent = &chan->par->pcidev->dev; 141 chan->adapter.dev.parent = &chan->par->pcidev->dev;
143 chan->algo.udelay = 10; 142 chan->algo.udelay = 10;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3efa12f9ee50..09a3d5522b43 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -187,10 +187,10 @@ config EP93XX_WATCHDOG
187 187
188config OMAP_WATCHDOG 188config OMAP_WATCHDOG
189 tristate "OMAP Watchdog" 189 tristate "OMAP Watchdog"
190 depends on ARCH_OMAP16XX || ARCH_OMAP24XX 190 depends on ARCH_OMAP16XX || ARCH_OMAP24XX || ARCH_OMAP34XX
191 help 191 help
192 Support for TI OMAP1610/OMAP1710/OMAP2420 watchdog. Say 'Y' here to 192 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y'
193 enable the OMAP1610/OMAP1710 watchdog timer. 193 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer.
194 194
195config PNX4008_WATCHDOG 195config PNX4008_WATCHDOG
196 tristate "PNX4008 Watchdog" 196 tristate "PNX4008 Watchdog"
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 993e5f52afef..5531691f46ea 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/io.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
18#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8dc7109d61b7..2ba8f95516a0 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -298,6 +298,14 @@ static int decrease_reservation(unsigned long nr_pages)
298 frame_list[i] = pfn_to_mfn(pfn); 298 frame_list[i] = pfn_to_mfn(pfn);
299 299
300 scrub_page(page); 300 scrub_page(page);
301
302 if (!PageHighMem(page)) {
303 ret = HYPERVISOR_update_va_mapping(
304 (unsigned long)__va(pfn << PAGE_SHIFT),
305 __pte_ma(0), 0);
306 BUG_ON(ret);
307 }
308
301 } 309 }
302 310
303 /* Ensure that ballooned highmem pages don't have kmaps. */ 311 /* Ensure that ballooned highmem pages don't have kmaps. */
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 875a4c59c594..a9592d981b10 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch,
291static int xenbus_write_transaction(unsigned msg_type, 291static int xenbus_write_transaction(unsigned msg_type,
292 struct xenbus_file_priv *u) 292 struct xenbus_file_priv *u)
293{ 293{
294 int rc, ret; 294 int rc;
295 void *reply; 295 void *reply;
296 struct xenbus_transaction_holder *trans = NULL; 296 struct xenbus_transaction_holder *trans = NULL;
297 LIST_HEAD(staging_q); 297 LIST_HEAD(staging_q);
@@ -326,15 +326,14 @@ static int xenbus_write_transaction(unsigned msg_type,
326 } 326 }
327 327
328 mutex_lock(&u->reply_mutex); 328 mutex_lock(&u->reply_mutex);
329 ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); 329 rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
330 if (!ret) 330 if (!rc)
331 ret = queue_reply(&staging_q, reply, u->u.msg.len); 331 rc = queue_reply(&staging_q, reply, u->u.msg.len);
332 if (!ret) { 332 if (!rc) {
333 list_splice_tail(&staging_q, &u->read_buffers); 333 list_splice_tail(&staging_q, &u->read_buffers);
334 wake_up(&u->read_waitq); 334 wake_up(&u->read_waitq);
335 } else { 335 } else {
336 queue_cleanup(&staging_q); 336 queue_cleanup(&staging_q);
337 rc = ret;
338 } 337 }
339 mutex_unlock(&u->reply_mutex); 338 mutex_unlock(&u->reply_mutex);
340 339