aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/plat-omap/devices.c4
-rw-r--r--arch/x86/pci/xen.c8
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/setup.c18
-rw-r--r--drivers/char/amiserial.c1
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/serial/8250.c5
-rw-r--r--drivers/serial/8250_pci.c5
-rw-r--r--drivers/serial/bfin_5xx.c31
-rw-r--r--drivers/staging/ath6kl/Kconfig2
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c5
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c7
-rw-r--r--drivers/staging/batman-adv/hard-interface.c15
-rw-r--r--drivers/staging/batman-adv/routing.c12
-rw-r--r--drivers/staging/batman-adv/routing.h4
-rw-r--r--drivers/staging/batman-adv/unicast.c2
-rw-r--r--drivers/staging/bcm/Bcmchar.c49
-rw-r--r--drivers/staging/brcm80211/README2
-rw-r--r--drivers/staging/brcm80211/TODO2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c12
-rw-r--r--drivers/staging/cpia/cpia.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c1
-rw-r--r--drivers/staging/hv/hv_utils.c3
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c284
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h4
-rw-r--r--drivers/staging/keucr/init.c18
-rw-r--r--drivers/staging/keucr/ms.c14
-rw-r--r--drivers/staging/keucr/msscsi.c6
-rw-r--r--drivers/staging/keucr/sdscsi.c4
-rw-r--r--drivers/staging/keucr/smilsub.c18
-rw-r--r--drivers/staging/keucr/transport.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c3
-rw-r--r--drivers/staging/stradis/stradis.c11
-rw-r--r--drivers/staging/tidspbridge/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/Makefile7
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h5
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h19
-rw-r--r--drivers/staging/tidspbridge/core/dsp-mmu.c317
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c180
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1083
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c17
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c115
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h41
-rw-r--r--drivers/staging/tidspbridge/hw/MMUAccInt.h76
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h225
-rw-r--r--drivers/staging/tidspbridge/hw/hw_defs.h58
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c562
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h163
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h75
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h10
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h67
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h44
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c63
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c533
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c34
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c197
-rw-r--r--drivers/staging/udlfb/udlfb.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/tty/n_gsm.c5
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c49
-rw-r--r--drivers/tty/vt/vc_screen.c6
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/goku_udc.h3
-rw-r--r--drivers/usb/gadget/u_serial.c54
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-mxc.c14
-rw-r--r--drivers/usb/host/ohci-jz4740.c2
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/blackfin.c80
-rw-r--r--drivers/usb/musb/musb_core.c41
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c41
-rw-r--r--drivers/usb/musb/musb_regs.h3
-rw-r--r--drivers/usb/musb/musbhsdma.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/uwb/allocator.c3
-rw-r--r--drivers/xen/events.c25
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h2
102 files changed, 4078 insertions, 948 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 0094224ca79b..88b74a75d932 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
161L: linux-serial@vger.kernel.org 161L: linux-serial@vger.kernel.org
162W: http://serial.sourceforge.net 162W: http://serial.sourceforge.net
163S: Maintained 163S: Maintained
164T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 164T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
165F: drivers/serial/8250* 165F: drivers/serial/8250*
166F: include/linux/serial_8250.h 166F: include/linux/serial_8250.h
167 167
@@ -5676,7 +5676,7 @@ S: Maintained
5676 5676
5677STAGING SUBSYSTEM 5677STAGING SUBSYSTEM
5678M: Greg Kroah-Hartman <gregkh@suse.de> 5678M: Greg Kroah-Hartman <gregkh@suse.de>
5679T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git 5679T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
5680L: devel@driverdev.osuosl.org 5680L: devel@driverdev.osuosl.org
5681S: Maintained 5681S: Maintained
5682F: drivers/staging/ 5682F: drivers/staging/
@@ -5910,7 +5910,7 @@ S: Maintained
5910TTY LAYER 5910TTY LAYER
5911M: Greg Kroah-Hartman <gregkh@suse.de> 5911M: Greg Kroah-Hartman <gregkh@suse.de>
5912S: Maintained 5912S: Maintained
5913T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 5913T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
5914F: drivers/char/tty_* 5914F: drivers/char/tty_*
5915F: drivers/serial/serial_core.c 5915F: drivers/serial/serial_core.c
5916F: include/linux/serial_core.h 5916F: include/linux/serial_core.h
@@ -6233,7 +6233,7 @@ USB SUBSYSTEM
6233M: Greg Kroah-Hartman <gregkh@suse.de> 6233M: Greg Kroah-Hartman <gregkh@suse.de>
6234L: linux-usb@vger.kernel.org 6234L: linux-usb@vger.kernel.org
6235W: http://www.linux-usb.org 6235W: http://www.linux-usb.org
6236T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 6236T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
6237S: Supported 6237S: Supported
6238F: Documentation/usb/ 6238F: Documentation/usb/
6239F: drivers/net/usb/ 6239F: drivers/net/usb/
@@ -6598,14 +6598,14 @@ F: drivers/platform/x86
6598 6598
6599XEN PCI SUBSYSTEM 6599XEN PCI SUBSYSTEM
6600M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6600M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6601L: xen-devel@lists.xensource.com 6601L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6602S: Supported 6602S: Supported
6603F: arch/x86/pci/*xen* 6603F: arch/x86/pci/*xen*
6604F: drivers/pci/*xen* 6604F: drivers/pci/*xen*
6605 6605
6606XEN SWIOTLB SUBSYSTEM 6606XEN SWIOTLB SUBSYSTEM
6607M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6607M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6608L: xen-devel@lists.xensource.com 6608L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6609S: Supported 6609S: Supported
6610F: arch/x86/xen/*swiotlb* 6610F: arch/x86/xen/*swiotlb*
6611F: drivers/xen/*swiotlb* 6611F: drivers/xen/*swiotlb*
@@ -6613,7 +6613,7 @@ F: drivers/xen/*swiotlb*
6613XEN HYPERVISOR INTERFACE 6613XEN HYPERVISOR INTERFACE
6614M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6614M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6616L: xen-devel@lists.xen.org 6616L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6617L: virtualization@lists.osdl.org 6617L: virtualization@lists.osdl.org
6618S: Supported 6618S: Supported
6619F: arch/x86/xen/ 6619F: arch/x86/xen/
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18b8aa4..fc819120978d 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
284 if (!size) 284 if (!size)
285 return; 285 return;
286 286
287 paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); 287 paddr = memblock_alloc(size, SZ_1M);
288 if (!paddr) { 288 if (!paddr) {
289 pr_err("%s: failed to reserve %x bytes\n", 289 pr_err("%s: failed to reserve %x bytes\n",
290 __func__, size); 290 __func__, size);
291 return; 291 return;
292 } 292 }
293 memblock_free(paddr, size);
294 memblock_remove(paddr, size);
293 295
294 omap_dsp_phys_mempool_base = paddr; 296 omap_dsp_phys_mempool_base = paddr;
295} 297}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8daf75..d7b5109f7a9c 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */ 147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */
148 (type == PCI_CAP_ID_MSIX) ? 148 (type == PCI_CAP_ID_MSIX) ?
149 "pcifront-msi-x" : "pcifront-msi"); 149 "pcifront-msi-x" : "pcifront-msi");
150 if (irq < 0) 150 if (irq < 0) {
151 return -1; 151 ret = -1;
152 goto free;
153 }
152 154
153 ret = set_irq_msi(irq, msidesc); 155 ret = set_irq_msi(irq, msidesc);
154 if (ret) 156 if (ret)
@@ -164,7 +166,7 @@ error:
164 if (ret == -ENODEV) 166 if (ret == -ENODEV)
165 dev_err(&dev->dev, "Xen PCI frontend has not registered" \ 167 dev_err(&dev->dev, "Xen PCI frontend has not registered" \
166 " MSI/MSI-X support!\n"); 168 " MSI/MSI-X support!\n");
167 169free:
168 kfree(v); 170 kfree(v);
169 return ret; 171 return ret;
170} 172}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b810b03f..21ed8d7f75a5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2126{ 2126{
2127 pmd_t *kernel_pmd; 2127 pmd_t *kernel_pmd;
2128 2128
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); 2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2130
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2132 xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa23ecc..769c4b01fa32 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
118 const struct e820map *e820) 118 const struct e820map *e820)
119{ 119{
120 phys_addr_t max_addr = PFN_PHYS(max_pfn); 120 phys_addr_t max_addr = PFN_PHYS(max_pfn);
121 phys_addr_t last_end = 0; 121 phys_addr_t last_end = ISA_END_ADDRESS;
122 unsigned long released = 0; 122 unsigned long released = 0;
123 int i; 123 int i;
124 124
125 /* Free any unused memory above the low 1Mbyte. */
125 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { 126 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
126 phys_addr_t end = e820->map[i].addr; 127 phys_addr_t end = e820->map[i].addr;
127 end = min(max_addr, end); 128 end = min(max_addr, end);
128 129
129 released += xen_release_chunk(last_end, end); 130 if (last_end < end)
130 last_end = e820->map[i].addr + e820->map[i].size; 131 released += xen_release_chunk(last_end, end);
132 last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
131 } 133 }
132 134
133 if (last_end < max_addr) 135 if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
164 XENMEM_memory_map; 166 XENMEM_memory_map;
165 rc = HYPERVISOR_memory_op(op, &memmap); 167 rc = HYPERVISOR_memory_op(op, &memmap);
166 if (rc == -ENOSYS) { 168 if (rc == -ENOSYS) {
169 BUG_ON(xen_initial_domain());
167 memmap.nr_entries = 1; 170 memmap.nr_entries = 1;
168 map[0].addr = 0ULL; 171 map[0].addr = 0ULL;
169 map[0].size = mem_end; 172 map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
201 } 204 }
202 205
203 /* 206 /*
204 * Even though this is normal, usable memory under Xen, reserve 207 * In domU, the ISA region is normal, usable memory, but we
205 * ISA memory anyway because too many things think they can poke 208 * reserve ISA memory anyway because too many things poke
206 * about in there. 209 * about in there.
207 * 210 *
208 * In a dom0 kernel, this region is identity mapped with the 211 * In Dom0, the host E820 information can leave gaps in the
209 * hardware ISA area, so it really is out of bounds. 212 * ISA range, which would cause us to release those pages. To
213 * avoid this, we unconditionally reserve them here.
210 */ 214 */
211 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 215 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
212 E820_RESERVED); 216 E820_RESERVED);
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a70461a12c..c0bd6f472c52 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1299{ 1299{
1300 struct async_struct * info = tty->driver_data; 1300 struct async_struct * info = tty->driver_data;
1301 struct async_icount cprev, cnow; /* kernel counter temps */ 1301 struct async_icount cprev, cnow; /* kernel counter temps */
1302 struct serial_icounter_struct icount;
1303 void __user *argp = (void __user *)arg; 1302 void __user *argp = (void __user *)arg;
1304 unsigned long flags; 1303 unsigned long flags;
1305 1304
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1f11b4..294d03e8c61a 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1828 unsigned int cmd, unsigned long arg) 1828 unsigned int cmd, unsigned long arg)
1829{ 1829{
1830 struct port *port = tty->driver_data; 1830 struct port *port = tty->driver_data;
1831 void __user *argp = (void __user *)arg;
1832 int rval = -ENOIOCTLCMD; 1831 int rval = -ENOIOCTLCMD;
1833 1832
1834 DBG1("******** IOCTL, cmd: %d", cmd); 1833 DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f89d951..eaa41992fbe2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
2796 .hangup = mgslpc_hangup, 2796 .hangup = mgslpc_hangup,
2797 .tiocmget = tiocmget, 2797 .tiocmget = tiocmget,
2798 .tiocmset = tiocmset, 2798 .tiocmset = tiocmset,
2799 .get_icount = mgslpc_get_icount,
2799 .proc_fops = &mgslpc_proc_fops, 2800 .proc_fops = &mgslpc_proc_fops,
2800}; 2801};
2801 2802
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c4985326e..3a5a6fcc0ead 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <xen/xenbus.h>
17#include <xen/interface/io/pciif.h> 16#include <xen/interface/io/pciif.h>
18#include <asm/xen/pci.h> 17#include <asm/xen/pci.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd,
576 575
577 pcidev = pci_get_bus_and_slot(bus, devfn); 576 pcidev = pci_get_bus_and_slot(bus, devfn);
578 if (!pcidev || !pcidev->driver) { 577 if (!pcidev || !pcidev->driver) {
579 dev_err(&pcidev->dev, 578 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
580 "device or driver is NULL\n"); 579 if (pcidev)
580 pci_dev_put(pcidev);
581 return result; 581 return result;
582 } 582 }
583 pdrv = pcidev->driver; 583 pdrv = pcidev->driver;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b7aa93..dd5e1ac22251 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2343,8 +2343,11 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
2343 2343
2344 /* 2344 /*
2345 * CTS flow control flag and modem status interrupts 2345 * CTS flow control flag and modem status interrupts
2346 * Only disable MSI if no threads are waiting in
2347 * serial_core::uart_wait_modem_status
2346 */ 2348 */
2347 up->ier &= ~UART_IER_MSI; 2349 if (!waitqueue_active(&up->port.state->port.delta_msr_wait))
2350 up->ier &= ~UART_IER_MSI;
2348 if (!(up->bugs & UART_BUG_NOMSR) && 2351 if (!(up->bugs & UART_BUG_NOMSR) &&
2349 UART_ENABLE_MS(&up->port, termios->c_cflag)) 2352 UART_ENABLE_MS(&up->port, termios->c_cflag))
2350 up->ier |= UART_IER_MSI; 2353 up->ier |= UART_IER_MSI;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d35a0aa..842e3b2a02b1 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2285 2285
2286static const struct pci_device_id softmodem_blacklist[] = { 2286static const struct pci_device_id softmodem_blacklist[] = {
2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ 2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
2288 { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
2289 { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
2288}; 2290};
2289 2291
2290/* 2292/*
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2863 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 2865 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
2864 0, 0, 2866 0, 0,
2865 pbn_b0_4_1152000 }, 2867 pbn_b0_4_1152000 },
2868 { PCI_VENDOR_ID_OXSEMI, 0x9505,
2869 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2870 pbn_b0_bt_2_921600 },
2866 2871
2867 /* 2872 /*
2868 * The below card is a little controversial since it is the 2873 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b18eab..19cac9f610fd 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
23#include <linux/tty.h> 23#include <linux/tty.h>
24#include <linux/tty_flip.h> 24#include <linux/tty_flip.h>
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/dma-mapping.h>
26 27
27#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 28#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
28 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 29 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
33#include <asm/gpio.h> 34#include <asm/gpio.h>
34#include <mach/bfin_serial_5xx.h> 35#include <mach/bfin_serial_5xx.h>
35 36
36#ifdef CONFIG_SERIAL_BFIN_DMA 37#include <asm/dma.h>
37#include <linux/dma-mapping.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#endif
42 41
43#ifdef CONFIG_SERIAL_BFIN_MODULE 42#ifdef CONFIG_SERIAL_BFIN_MODULE
44# undef CONFIG_EARLY_PRINTK 43# undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
360 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); 359 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
361 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 360 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
362 uart->port.icount.tx++; 361 uart->port.icount.tx++;
363 SSYNC();
364 } 362 }
365 363
366 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 364 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port)
688 686
689# ifdef CONFIG_BF54x 687# ifdef CONFIG_BF54x
690 { 688 {
689 /*
690 * UART2 and UART3 on BF548 share interrupt PINs and DMA
691 * controllers with SPORT2 and SPORT3. UART rx and tx
692 * interrupts are generated in PIO mode only when configure
693 * their peripheral mapping registers properly, which means
694 * request corresponding DMA channels in PIO mode as well.
695 */
691 unsigned uart_dma_ch_rx, uart_dma_ch_tx; 696 unsigned uart_dma_ch_rx, uart_dma_ch_tx;
692 697
693 switch (uart->port.irq) { 698 switch (uart->port.irq) {
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port)
734 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 739 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
735 IRQF_DISABLED, "BFIN_UART_CTS", uart)) { 740 IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
736 uart->cts_pin = -1; 741 uart->cts_pin = -1;
737 pr_info("Unable to attach BlackFin UART CTS interrupt.\ 742 pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
738 So, disable it.\n");
739 } 743 }
740 } 744 }
741 if (uart->rts_pin >= 0) { 745 if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port)
747 if (request_irq(uart->status_irq, 751 if (request_irq(uart->status_irq,
748 bfin_serial_mctrl_cts_int, 752 bfin_serial_mctrl_cts_int,
749 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { 753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
750 pr_info("Unable to attach BlackFin UART Modem \ 754 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
751 Status interrupt.\n");
752 } 755 }
753 756
754 /* CTS RTS PINs are negative assertive. */ 757 /* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
846 if (termios->c_cflag & CMSPAR) 849 if (termios->c_cflag & CMSPAR)
847 lcr |= STP; 850 lcr |= STP;
848 851
852 spin_lock_irqsave(&uart->port.lock, flags);
853
849 port->read_status_mask = OE; 854 port->read_status_mask = OE;
850 if (termios->c_iflag & INPCK) 855 if (termios->c_iflag & INPCK)
851 port->read_status_mask |= (FE | PE); 856 port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
875 if (termios->c_line != N_IRDA) 880 if (termios->c_line != N_IRDA)
876 quot -= ANOMALY_05000230; 881 quot -= ANOMALY_05000230;
877 882
878 spin_lock_irqsave(&uart->port.lock, flags);
879
880 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); 883 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
881 884
882 /* Disable UART */ 885 /* Disable UART */
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port,
1321 struct bfin_serial_port *uart; 1324 struct bfin_serial_port *uart;
1322 struct ktermios t; 1325 struct ktermios t;
1323 1326
1327#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1328 /*
1329 * If we are using early serial, don't let the normal console rewind
1330 * log buffer, since that causes things to be printed multiple times
1331 */
1332 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1333#endif
1334
1324 if (port == -1 || port >= nr_active_ports) 1335 if (port == -1 || port >= nr_active_ports)
1325 port = 0; 1336 port = 0;
1326 bfin_serial_init_ports(); 1337 bfin_serial_init_ports();
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf48b74c..8a5caa30b85f 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN
102 102
103config ATH6KL_CFG80211 103config ATH6KL_CFG80211
104 bool "CFG80211 support" 104 bool "CFG80211 support"
105 depends on ATH6K_LEGACY 105 depends on ATH6K_LEGACY && CFG80211
106 help 106 help
107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. 107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
108 108
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c16735..a659f7047373 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A
1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { 1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
1127 A_UINT32 param; 1127 A_UINT32 param;
1128 1128
1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); 1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
1130 1130
1131 if (status != A_OK) { 1131 if (status != A_OK) {
1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); 1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
3030 A_UINT8 csumDest=0; 3030 A_UINT8 csumDest=0;
3031 A_UINT8 csum=skb->ip_summed; 3031 A_UINT8 csum=skb->ip_summed;
3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){ 3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){
3033 csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); 3033 csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
3034 sizeof(ATH_LLC_SNAP_HDR));
3034 csumDest=skb->csum_offset+csumStart; 3035 csumDest=skb->csum_offset+csumStart;
3035 } 3036 }
3036#endif 3037#endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29eeb4d..7269d0a1d618 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
808 808
809static int 809static int
810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
811 A_UINT8 key_index, const A_UINT8 *mac_addr, 811 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
812 struct key_params *params) 812 struct key_params *params)
813{ 813{
814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
901 901
902static int 902static int
903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
904 A_UINT8 key_index, const A_UINT8 *mac_addr) 904 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
905{ 905{
906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
907 907
@@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
936 936
937static int 937static int
938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
939 A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, 939 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
940 void *cookie,
940 void (*callback)(void *cookie, struct key_params*)) 941 void (*callback)(void *cookie, struct key_params*))
941{ 942{
942 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 943 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa8669585..b68a7e5173be 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if)
165 batman_if->net_dev->dev_addr, ETH_ALEN); 165 batman_if->net_dev->dev_addr, ETH_ALEN);
166} 166}
167 167
168static void check_known_mac_addr(uint8_t *addr) 168static void check_known_mac_addr(struct net_device *net_dev)
169{ 169{
170 struct batman_if *batman_if; 170 struct batman_if *batman_if;
171 171
@@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr)
175 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 175 (batman_if->if_status != IF_TO_BE_ACTIVATED))
176 continue; 176 continue;
177 177
178 if (!compare_orig(batman_if->net_dev->dev_addr, addr)) 178 if (batman_if->net_dev == net_dev)
179 continue;
180
181 if (!compare_orig(batman_if->net_dev->dev_addr,
182 net_dev->dev_addr))
179 continue; 183 continue;
180 184
181 pr_warning("The newly added mac address (%pM) already exists " 185 pr_warning("The newly added mac address (%pM) already exists "
182 "on: %s\n", addr, batman_if->net_dev->name); 186 "on: %s\n", net_dev->dev_addr,
187 batman_if->net_dev->name);
183 pr_warning("It is strongly recommended to keep mac addresses " 188 pr_warning("It is strongly recommended to keep mac addresses "
184 "unique to avoid problems!\n"); 189 "unique to avoid problems!\n");
185 } 190 }
@@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
430 atomic_set(&batman_if->refcnt, 0); 435 atomic_set(&batman_if->refcnt, 0);
431 hardif_hold(batman_if); 436 hardif_hold(batman_if);
432 437
433 check_known_mac_addr(batman_if->net_dev->dev_addr); 438 check_known_mac_addr(batman_if->net_dev);
434 439
435 spin_lock(&if_list_lock); 440 spin_lock(&if_list_lock);
436 list_add_tail_rcu(&batman_if->list, &if_list); 441 list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@ static int hard_if_event(struct notifier_block *this,
515 goto out; 520 goto out;
516 } 521 }
517 522
518 check_known_mac_addr(batman_if->net_dev->dev_addr); 523 check_known_mac_addr(batman_if->net_dev);
519 update_mac_addresses(batman_if); 524 update_mac_addresses(batman_if);
520 525
521 bat_priv = netdev_priv(batman_if->soft_iface); 526 bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 90102631330b..657b69e6b957 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1000 1000
1001/* find a suitable router for this originator, and use 1001/* find a suitable router for this originator, and use
1002 * bonding if possible. */ 1002 * bonding if possible. */
1003struct neigh_node *find_router(struct orig_node *orig_node, 1003struct neigh_node *find_router(struct bat_priv *bat_priv,
1004 struct orig_node *orig_node,
1004 struct batman_if *recv_if) 1005 struct batman_if *recv_if)
1005{ 1006{
1006 struct bat_priv *bat_priv;
1007 struct orig_node *primary_orig_node; 1007 struct orig_node *primary_orig_node;
1008 struct orig_node *router_orig; 1008 struct orig_node *router_orig;
1009 struct neigh_node *router, *first_candidate, *best_router; 1009 struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node,
1019 /* without bonding, the first node should 1019 /* without bonding, the first node should
1020 * always choose the default router. */ 1020 * always choose the default router. */
1021 1021
1022 if (!recv_if)
1023 return orig_node->router;
1024
1025 bat_priv = netdev_priv(recv_if->soft_iface);
1026 bonding_enabled = atomic_read(&bat_priv->bonding_enabled); 1022 bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
1027 1023
1028 if (!bonding_enabled) 1024 if ((!recv_if) && (!bonding_enabled))
1029 return orig_node->router; 1025 return orig_node->router;
1030 1026
1031 router_orig = orig_node->router->orig_node; 1027 router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb,
1154 orig_node = ((struct orig_node *) 1150 orig_node = ((struct orig_node *)
1155 hash_find(bat_priv->orig_hash, unicast_packet->dest)); 1151 hash_find(bat_priv->orig_hash, unicast_packet->dest));
1156 1152
1157 router = find_router(orig_node, recv_if); 1153 router = find_router(bat_priv, orig_node, recv_if);
1158 1154
1159 if (!router) { 1155 if (!router) {
1160 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); 1156 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99df3706..92674c8d9c03 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); 38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); 39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); 40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
41struct neigh_node *find_router(struct orig_node *orig_node, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
42 struct batman_if *recv_if); 42 struct orig_node *orig_node, struct batman_if *recv_if);
43void update_bonding_candidates(struct bat_priv *bat_priv, 43void update_bonding_candidates(struct bat_priv *bat_priv,
44 struct orig_node *orig_node); 44 struct orig_node *orig_node);
45 45
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d69c03..0459413ff67f 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
224 if (!orig_node) 224 if (!orig_node)
225 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 225 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
226 226
227 router = find_router(orig_node, NULL); 227 router = find_router(bat_priv, orig_node, NULL);
228 228
229 if (!router) 229 if (!router)
230 goto unlock; 230 goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe24d999..fead9c56162e 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1001 } 1001 }
1002#endif 1002#endif
1003 case IOCTL_BE_BUCKET_SIZE: 1003 case IOCTL_BE_BUCKET_SIZE:
1004 Adapter->BEBucketSize = *(PULONG)arg; 1004 Status = 0;
1005 Status = STATUS_SUCCESS; 1005 if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
1006 Status = -EFAULT;
1006 break; 1007 break;
1007 1008
1008 case IOCTL_RTPS_BUCKET_SIZE: 1009 case IOCTL_RTPS_BUCKET_SIZE:
1009 Adapter->rtPSBucketSize = *(PULONG)arg; 1010 Status = 0;
1010 Status = STATUS_SUCCESS; 1011 if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
1012 Status = -EFAULT;
1011 break; 1013 break;
1012 case IOCTL_CHIP_RESET: 1014 case IOCTL_CHIP_RESET:
1013 { 1015 {
@@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1028 case IOCTL_QOS_THRESHOLD: 1030 case IOCTL_QOS_THRESHOLD:
1029 { 1031 {
1030 USHORT uiLoopIndex; 1032 USHORT uiLoopIndex;
1031 for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) 1033
1032 { 1034 Status = 0;
1033 Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; 1035 for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
1036 if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
1037 (unsigned long __user *)arg)) {
1038 Status = -EFAULT;
1039 break;
1040 }
1034 } 1041 }
1035 Status = STATUS_SUCCESS;
1036 break; 1042 break;
1037 } 1043 }
1038 1044
@@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1093 } 1099 }
1094 case IOCTL_BCM_GET_CURRENT_STATUS: 1100 case IOCTL_BCM_GET_CURRENT_STATUS:
1095 { 1101 {
1096 LINK_STATE *plink_state = NULL; 1102 LINK_STATE plink_state;
1103
1097 /* Copy Ioctl Buffer structure */ 1104 /* Copy Ioctl Buffer structure */
1098 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) 1105 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
1099 { 1106 {
@@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1101 Status = -EFAULT; 1108 Status = -EFAULT;
1102 break; 1109 break;
1103 } 1110 }
1104 plink_state = (LINK_STATE*)arg; 1111 if (IoBuffer.OutputLength != sizeof(plink_state)) {
1105 plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; 1112 Status = -EINVAL;
1106 plink_state->bShutdownMode = Adapter->bShutStatus; 1113 break;
1107 plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; 1114 }
1108 if(copy_to_user(IoBuffer.OutputBuffer, 1115
1109 (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) 1116 if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
1110 { 1117 Status = -EFAULT;
1118 break;
1119 }
1120 plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
1121 plink_state.bShutdownMode = Adapter->bShutStatus;
1122 plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
1123 if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
1111 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); 1124 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
1112 Status = -EFAULT; 1125 Status = -EFAULT;
1113 break; 1126 break;
@@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1331 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); 1344 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
1332 return -EFAULT; 1345 return -EFAULT;
1333 } 1346 }
1334 uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ 1347 if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
1348 return -EFAULT;
1349
1335 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) 1350 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
1336 { 1351 {
1337 1352
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb9b116..c8f1cf1b4409 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@ Contact Info:
90============= 90=============
91Brett Rudley brudley@broadcom.com 91Brett Rudley brudley@broadcom.com
92Henry Ptasinski henryp@broadcom.com 92Henry Ptasinski henryp@broadcom.com
93Nohee Ko noheek@broadcom.com 93Dowan Kim dowan@broadcom.com
94 94
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d300b531..dbf904184899 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@ Contact
45===== 45=====
46Brett Rudley <brudley@broadcom.com> 46Brett Rudley <brudley@broadcom.com>
47Henry Ptasinski <henryp@broadcom.com> 47Henry Ptasinski <henryp@broadcom.com>
48Nohee Ko <noheek@broadcom.com> 48Dowan Kim <dowan@broadcom.com>
49 49
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5f7492..9335f02029aa 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
2222 ASSERT(net); 2222 ASSERT(net);
2223 2223
2224 ASSERT(!net->netdev_ops); 2224 ASSERT(!net->netdev_ops);
2225 net->netdev_ops = &dhd_ops_virt;
2226
2227 net->netdev_ops = &dhd_ops_pri; 2225 net->netdev_ops = &dhd_ops_pri;
2228 2226
2229 /* 2227 /*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488d9c72..ea0825238d53 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
95 struct net_device *dev, 95 struct net_device *dev,
96 u8 key_idx); 96 u8 key_idx);
97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
98 u8 key_idx, const u8 *mac_addr, 98 u8 key_idx, bool pairwise, const u8 *mac_addr,
99 struct key_params *params); 99 struct key_params *params);
100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
101 u8 key_idx, const u8 *mac_addr); 101 u8 key_idx, bool pairwise, const u8 *mac_addr);
102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
103 u8 key_idx, const u8 *mac_addr, 103 u8 key_idx, bool pairwise, const u8 *mac_addr,
104 void *cookie, void (*callback) (void *cookie, 104 void *cookie, void (*callback) (void *cookie,
105 struct 105 struct
106 key_params * 106 key_params *
@@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
1615 1615
1616static s32 1616static s32
1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1618 u8 key_idx, const u8 *mac_addr, 1618 u8 key_idx, bool pairwise, const u8 *mac_addr,
1619 struct key_params *params) 1619 struct key_params *params)
1620{ 1620{
1621 struct wl_wsec_key key; 1621 struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1700 1700
1701static s32 1701static s32
1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1703 u8 key_idx, const u8 *mac_addr) 1703 u8 key_idx, bool pairwise, const u8 *mac_addr)
1704{ 1704{
1705 struct wl_wsec_key key; 1705 struct wl_wsec_key key;
1706 s32 err = 0; 1706 s32 err = 0;
@@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1756 1756
1757static s32 1757static s32
1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
1759 u8 key_idx, const u8 *mac_addr, void *cookie, 1759 u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
1760 void (*callback) (void *cookie, struct key_params * params)) 1760 void (*callback) (void *cookie, struct key_params * params))
1761{ 1761{
1762 struct key_params params; 1762 struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c8cb9a..0e740b8dafc3 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file)
3184 goto oops; 3184 goto oops;
3185 } 3185 }
3186 3186
3187 err = -EINTR;
3188 if(signal_pending(current))
3189 goto oops;
3190
3191 /* Set ownership of /proc/cpia/videoX to current user */ 3187 /* Set ownership of /proc/cpia/videoX to current user */
3192 if(cam->proc_entry) 3188 if(cam->proc_entry)
3193 cam->proc_entry->uid = current_uid(); 3189 cam->proc_entry->uid = current_euid();
3194 3190
3195 /* set mark for loading first frame uncompressed */ 3191 /* set mark for loading first frame uncompressed */
3196 cam->first_frame = 1; 3192 cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487531c2..20d509836d9e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
286 pid = kernel_thread (exec_mknod, (void *)info, 0); 286 pid = kernel_thread (exec_mknod, (void *)info, 0);
287 287
288 // initialize application information 288 // initialize application information
289 info->appcnt = 0;
290 289
291// if (ft1000_flarion_cnt == 0) { 290// if (ft1000_flarion_cnt == 0) {
292// 291//
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478d5542..a99e900ec4c9 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context)
212 recvlen, requestid); 212 recvlen, requestid);
213 213
214 icmsghdrp = (struct icmsg_hdr *)&buf[ 214 icmsghdrp = (struct icmsg_hdr *)&buf[
215 sizeof(struct vmbuspipe_hdr)];
216
217 icmsghdrp = (struct icmsg_hdr *)&buf[
218 sizeof(struct vmbuspipe_hdr)]; 215 sizeof(struct vmbuspipe_hdr)];
219 216
220 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 217 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cba8307..9618c7997461 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@ static int intel_sst_mmap_play_capture(u32 str_id,
244 int retval, i; 244 int retval, i;
245 struct stream_info *stream; 245 struct stream_info *stream;
246 struct snd_sst_mmap_buff_entry *buf_entry; 246 struct snd_sst_mmap_buff_entry *buf_entry;
247 struct snd_sst_mmap_buff_entry *tmp_buf;
247 248
248 pr_debug("sst:called for str_id %d\n", str_id); 249 pr_debug("sst:called for str_id %d\n", str_id);
249 retval = sst_validate_strid(str_id); 250 retval = sst_validate_strid(str_id);
250 if (retval) 251 if (retval)
251 return -EINVAL; 252 return -EINVAL;
252 BUG_ON(!mmap_buf);
253 253
254 stream = &sst_drv_ctx->streams[str_id]; 254 stream = &sst_drv_ctx->streams[str_id];
255 if (stream->mmapped != true) 255 if (stream->mmapped != true)
@@ -262,14 +262,24 @@ static int intel_sst_mmap_play_capture(u32 str_id,
262 stream->curr_bytes = 0; 262 stream->curr_bytes = 0;
263 stream->cumm_bytes = 0; 263 stream->cumm_bytes = 0;
264 264
265 tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
266 if (!tmp_buf)
267 return -ENOMEM;
268 if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
269 mmap_buf->entries * sizeof(*tmp_buf))) {
270 retval = -EFAULT;
271 goto out_free;
272 }
273
265 pr_debug("sst:new buffers count %d status %d\n", 274 pr_debug("sst:new buffers count %d status %d\n",
266 mmap_buf->entries, stream->status); 275 mmap_buf->entries, stream->status);
267 buf_entry = mmap_buf->buff; 276 buf_entry = tmp_buf;
268 for (i = 0; i < mmap_buf->entries; i++) { 277 for (i = 0; i < mmap_buf->entries; i++) {
269 BUG_ON(!buf_entry);
270 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); 278 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
271 if (!bufs) 279 if (!bufs) {
272 return -ENOMEM; 280 retval = -ENOMEM;
281 goto out_free;
282 }
273 bufs->size = buf_entry->size; 283 bufs->size = buf_entry->size;
274 bufs->offset = buf_entry->offset; 284 bufs->offset = buf_entry->offset;
275 bufs->addr = sst_drv_ctx->mmap_mem; 285 bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@ static int intel_sst_mmap_play_capture(u32 str_id,
293 if (sst_play_frame(str_id) < 0) { 303 if (sst_play_frame(str_id) < 0) {
294 pr_warn("sst: play frames fail\n"); 304 pr_warn("sst: play frames fail\n");
295 mutex_unlock(&stream->lock); 305 mutex_unlock(&stream->lock);
296 return -EIO; 306 retval = -EIO;
307 goto out_free;
297 } 308 }
298 } else if (stream->ops == STREAM_OPS_CAPTURE) { 309 } else if (stream->ops == STREAM_OPS_CAPTURE) {
299 if (sst_capture_frame(str_id) < 0) { 310 if (sst_capture_frame(str_id) < 0) {
300 pr_warn("sst: capture frame fail\n"); 311 pr_warn("sst: capture frame fail\n");
301 mutex_unlock(&stream->lock); 312 mutex_unlock(&stream->lock);
302 return -EIO; 313 retval = -EIO;
314 goto out_free;
303 } 315 }
304 } 316 }
305 } 317 }
@@ -314,6 +326,9 @@ static int intel_sst_mmap_play_capture(u32 str_id,
314 if (retval >= 0) 326 if (retval >= 0)
315 retval = stream->cumm_bytes; 327 retval = stream->cumm_bytes;
316 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); 328 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
329
330out_free:
331 kfree(tmp_buf);
317 return retval; 332 return retval;
318} 333}
319 334
@@ -377,7 +392,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
377{ 392{
378 struct sst_stream_bufs *stream_bufs; 393 struct sst_stream_bufs *stream_bufs;
379 unsigned long index, mmap_len; 394 unsigned long index, mmap_len;
380 unsigned char *bufp; 395 unsigned char __user *bufp;
381 unsigned long size, copied_size; 396 unsigned long size, copied_size;
382 int retval = 0, add_to_list = 0; 397 int retval = 0, add_to_list = 0;
383 static int sent_offset; 398 static int sent_offset;
@@ -512,9 +527,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
512 /* copy to user */ 527 /* copy to user */
513 list_for_each_entry_safe(entry, _entry, 528 list_for_each_entry_safe(entry, _entry,
514 copy_to_list, node) { 529 copy_to_list, node) {
515 if (copy_to_user((void *) 530 if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
516 iovec[entry->iov_index].iov_base +
517 entry->iov_offset,
518 kbufs->addr + entry->offset, 531 kbufs->addr + entry->offset,
519 entry->size)) { 532 entry->size)) {
520 /* Clean up the list and return error */ 533 /* Clean up the list and return error */
@@ -590,7 +603,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
590 buf, (int) count, (int) stream->status); 603 buf, (int) count, (int) stream->status);
591 604
592 stream->buf_type = SST_BUF_USER_STATIC; 605 stream->buf_type = SST_BUF_USER_STATIC;
593 iovec.iov_base = (void *)buf; 606 iovec.iov_base = buf;
594 iovec.iov_len = count; 607 iovec.iov_len = count;
595 nr_segs = 1; 608 nr_segs = 1;
596 609
@@ -838,7 +851,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
838 break; 851 break;
839 852
840 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { 853 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
841 struct snd_sst_params *str_param = (struct snd_sst_params *)arg; 854 struct snd_sst_params str_param;
842 855
843 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); 856 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
844 if (minor != STREAM_MODULE) { 857 if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
846 break; 859 break;
847 } 860 }
848 861
862 if (copy_from_user(&str_param, (void __user *)arg,
863 sizeof(str_param))) {
864 retval = -EFAULT;
865 break;
866 }
867
849 if (!str_id) { 868 if (!str_id) {
850 869
851 retval = sst_get_stream(str_param); 870 retval = sst_get_stream(&str_param);
852 if (retval > 0) { 871 if (retval > 0) {
853 struct stream_info *str_info; 872 struct stream_info *str_info;
873 char __user *dest;
874
854 sst_drv_ctx->stream_cnt++; 875 sst_drv_ctx->stream_cnt++;
855 data->str_id = retval; 876 data->str_id = retval;
856 str_info = &sst_drv_ctx->streams[retval]; 877 str_info = &sst_drv_ctx->streams[retval];
857 str_info->src = SST_DRV; 878 str_info->src = SST_DRV;
858 retval = copy_to_user(&str_param->stream_id, 879 dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
859 &retval, sizeof(__u32)); 880 retval = copy_to_user(dest, &retval, sizeof(__u32));
860 if (retval) 881 if (retval)
861 retval = -EFAULT; 882 retval = -EFAULT;
862 } else { 883 } else {
@@ -866,16 +887,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
866 } else { 887 } else {
867 pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); 888 pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
868 /* allocated set params only */ 889 /* allocated set params only */
869 retval = sst_set_stream_param(str_id, str_param); 890 retval = sst_set_stream_param(str_id, &str_param);
870 /* Block the call for reply */ 891 /* Block the call for reply */
871 if (!retval) { 892 if (!retval) {
872 int sfreq = 0, word_size = 0, num_channel = 0; 893 int sfreq = 0, word_size = 0, num_channel = 0;
873 sfreq = str_param->sparams.uc.pcm_params.sfreq; 894 sfreq = str_param.sparams.uc.pcm_params.sfreq;
874 word_size = str_param->sparams. 895 word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
875 uc.pcm_params.pcm_wd_sz; 896 num_channel = str_param.sparams.uc.pcm_params.num_chan;
876 num_channel = str_param-> 897 if (str_param.ops == STREAM_OPS_CAPTURE) {
877 sparams.uc.pcm_params.num_chan;
878 if (str_param->ops == STREAM_OPS_CAPTURE) {
879 sst_drv_ctx->scard_ops->\ 898 sst_drv_ctx->scard_ops->\
880 set_pcm_audio_params(sfreq, 899 set_pcm_audio_params(sfreq,
881 word_size, num_channel); 900 word_size, num_channel);
@@ -885,41 +904,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
885 break; 904 break;
886 } 905 }
887 case _IOC_NR(SNDRV_SST_SET_VOL): { 906 case _IOC_NR(SNDRV_SST_SET_VOL): {
888 struct snd_sst_vol *set_vol; 907 struct snd_sst_vol set_vol;
889 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; 908
909 if (copy_from_user(&set_vol, (void __user *)arg,
910 sizeof(set_vol))) {
911 pr_debug("sst: copy failed\n");
912 retval = -EFAULT;
913 break;
914 }
890 pr_debug("sst: SET_VOLUME recieved for %d!\n", 915 pr_debug("sst: SET_VOLUME recieved for %d!\n",
891 rec_vol->stream_id); 916 set_vol.stream_id);
892 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 917 if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
893 pr_debug("sst: invalid operation!\n"); 918 pr_debug("sst: invalid operation!\n");
894 retval = -EPERM; 919 retval = -EPERM;
895 break; 920 break;
896 } 921 }
897 set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); 922 retval = sst_set_vol(&set_vol);
898 if (!set_vol) {
899 pr_debug("sst: mem allocation failed\n");
900 retval = -ENOMEM;
901 break;
902 }
903 if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
904 pr_debug("sst: copy failed\n");
905 retval = -EFAULT;
906 break;
907 }
908 retval = sst_set_vol(set_vol);
909 kfree(set_vol);
910 break; 923 break;
911 } 924 }
912 case _IOC_NR(SNDRV_SST_GET_VOL): { 925 case _IOC_NR(SNDRV_SST_GET_VOL): {
913 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
914 struct snd_sst_vol get_vol; 926 struct snd_sst_vol get_vol;
927
928 if (copy_from_user(&get_vol, (void __user *)arg,
929 sizeof(get_vol))) {
930 retval = -EFAULT;
931 break;
932 }
915 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", 933 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
916 rec_vol->stream_id); 934 get_vol.stream_id);
917 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 935 if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
918 pr_debug("sst: invalid operation!\n"); 936 pr_debug("sst: invalid operation!\n");
919 retval = -EPERM; 937 retval = -EPERM;
920 break; 938 break;
921 } 939 }
922 get_vol.stream_id = rec_vol->stream_id;
923 retval = sst_get_vol(&get_vol); 940 retval = sst_get_vol(&get_vol);
924 if (retval) { 941 if (retval) {
925 retval = -EIO; 942 retval = -EIO;
@@ -928,7 +945,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
928 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", 945 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
929 get_vol.stream_id, get_vol.volume, 946 get_vol.stream_id, get_vol.volume,
930 get_vol.ramp_duration, get_vol.ramp_type); 947 get_vol.ramp_duration, get_vol.ramp_type);
931 if (copy_to_user((struct snd_sst_vol *)arg, 948 if (copy_to_user((struct snd_sst_vol __user *)arg,
932 &get_vol, sizeof(get_vol))) { 949 &get_vol, sizeof(get_vol))) {
933 retval = -EFAULT; 950 retval = -EFAULT;
934 break; 951 break;
@@ -938,25 +955,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
938 } 955 }
939 956
940 case _IOC_NR(SNDRV_SST_MUTE): { 957 case _IOC_NR(SNDRV_SST_MUTE): {
941 struct snd_sst_mute *set_mute; 958 struct snd_sst_mute set_mute;
942 struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; 959
943 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", 960 if (copy_from_user(&set_mute, (void __user *)arg,
944 rec_mute->stream_id); 961 sizeof(set_mute))) {
945 if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { 962 retval = -EFAULT;
946 retval = -EPERM;
947 break;
948 }
949 set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
950 if (!set_mute) {
951 retval = -ENOMEM;
952 break; 963 break;
953 } 964 }
954 if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { 965 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
955 retval = -EFAULT; 966 set_mute.stream_id);
967 if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
968 retval = -EPERM;
956 break; 969 break;
957 } 970 }
958 retval = sst_set_mute(set_mute); 971 retval = sst_set_mute(&set_mute);
959 kfree(set_mute);
960 break; 972 break;
961 } 973 }
962 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { 974 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
973 retval = -EIO; 985 retval = -EIO;
974 break; 986 break;
975 } 987 }
976 if (copy_to_user((struct snd_sst_get_stream_params *)arg, 988 if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
977 &get_params, sizeof(get_params))) { 989 &get_params, sizeof(get_params))) {
978 retval = -EFAULT; 990 retval = -EFAULT;
979 break; 991 break;
@@ -983,16 +995,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
983 } 995 }
984 996
985 case _IOC_NR(SNDRV_SST_MMAP_PLAY): 997 case _IOC_NR(SNDRV_SST_MMAP_PLAY):
986 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): 998 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
999 struct snd_sst_mmap_buffs mmap_buf;
1000
987 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); 1001 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
988 if (minor != STREAM_MODULE) { 1002 if (minor != STREAM_MODULE) {
989 retval = -EBADRQC; 1003 retval = -EBADRQC;
990 break; 1004 break;
991 } 1005 }
992 retval = intel_sst_mmap_play_capture(str_id, 1006 if (copy_from_user(&mmap_buf, (void __user *)arg,
993 (struct snd_sst_mmap_buffs *)arg); 1007 sizeof(mmap_buf))) {
1008 retval = -EFAULT;
1009 break;
1010 }
1011 retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
994 break; 1012 break;
995 1013 }
996 case _IOC_NR(SNDRV_SST_STREAM_DROP): 1014 case _IOC_NR(SNDRV_SST_STREAM_DROP):
997 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); 1015 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
998 if (minor != STREAM_MODULE) { 1016 if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1003 break; 1021 break;
1004 1022
1005 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { 1023 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
1006 unsigned long long *ms = (unsigned long long *)arg;
1007 struct snd_sst_tstamp tstamp = {0}; 1024 struct snd_sst_tstamp tstamp = {0};
1008 unsigned long long time, freq, mod; 1025 unsigned long long time, freq, mod;
1009 1026
@@ -1013,14 +1030,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1013 break; 1030 break;
1014 } 1031 }
1015 memcpy_fromio(&tstamp, 1032 memcpy_fromio(&tstamp,
1016 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1033 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1017 +(str_id * sizeof(tstamp))),
1018 sizeof(tstamp)); 1034 sizeof(tstamp));
1019 time = tstamp.samples_rendered; 1035 time = tstamp.samples_rendered;
1020 freq = (unsigned long long) tstamp.sampling_frequency; 1036 freq = (unsigned long long) tstamp.sampling_frequency;
1021 time = time * 1000; /* converting it to ms */ 1037 time = time * 1000; /* converting it to ms */
1022 mod = do_div(time, freq); 1038 mod = do_div(time, freq);
1023 if (copy_to_user(ms, &time, sizeof(*ms))) 1039 if (copy_to_user((void __user *)arg, &time,
1040 sizeof(unsigned long long)))
1024 retval = -EFAULT; 1041 retval = -EFAULT;
1025 break; 1042 break;
1026 } 1043 }
@@ -1065,92 +1082,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1065 } 1082 }
1066 1083
1067 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { 1084 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
1068 struct snd_sst_target_device *target_device; 1085 struct snd_sst_target_device target_device;
1069 1086
1070 pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); 1087 pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
1071 target_device = (struct snd_sst_target_device *)arg; 1088 if (copy_from_user(&target_device, (void __user *)arg,
1072 BUG_ON(!target_device); 1089 sizeof(target_device))) {
1090 retval = -EFAULT;
1091 break;
1092 }
1073 if (minor != AM_MODULE) { 1093 if (minor != AM_MODULE) {
1074 retval = -EBADRQC; 1094 retval = -EBADRQC;
1075 break; 1095 break;
1076 } 1096 }
1077 retval = sst_target_device_select(target_device); 1097 retval = sst_target_device_select(&target_device);
1078 break; 1098 break;
1079 } 1099 }
1080 1100
1081 case _IOC_NR(SNDRV_SST_DRIVER_INFO): { 1101 case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
1082 struct snd_sst_driver_info *info = 1102 struct snd_sst_driver_info info;
1083 (struct snd_sst_driver_info *)arg;
1084 1103
1085 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); 1104 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
1086 info->version = SST_VERSION_NUM; 1105 info.version = SST_VERSION_NUM;
1087 /* hard coding, shud get sumhow later */ 1106 /* hard coding, shud get sumhow later */
1088 info->active_pcm_streams = sst_drv_ctx->stream_cnt - 1107 info.active_pcm_streams = sst_drv_ctx->stream_cnt -
1089 sst_drv_ctx->encoded_cnt; 1108 sst_drv_ctx->encoded_cnt;
1090 info->active_enc_streams = sst_drv_ctx->encoded_cnt; 1109 info.active_enc_streams = sst_drv_ctx->encoded_cnt;
1091 info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; 1110 info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
1092 info->max_enc_streams = MAX_ENC_STREAM; 1111 info.max_enc_streams = MAX_ENC_STREAM;
1093 info->buf_per_stream = sst_drv_ctx->mmap_len; 1112 info.buf_per_stream = sst_drv_ctx->mmap_len;
1113 if (copy_to_user((void __user *)arg, &info,
1114 sizeof(info)))
1115 retval = -EFAULT;
1094 break; 1116 break;
1095 } 1117 }
1096 1118
1097 case _IOC_NR(SNDRV_SST_STREAM_DECODE): { 1119 case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
1098 struct snd_sst_dbufs *param = 1120 struct snd_sst_dbufs param;
1099 (struct snd_sst_dbufs *)arg, dbufs_local; 1121 struct snd_sst_dbufs dbufs_local;
1100 int i;
1101 struct snd_sst_buffs ibufs, obufs; 1122 struct snd_sst_buffs ibufs, obufs;
1102 struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], 1123 struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
1103 obuf_temp[param->obufs->entries]; 1124 char __user *dest;
1104 1125
1105 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); 1126 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
1106 if (minor != STREAM_MODULE) { 1127 if (minor != STREAM_MODULE) {
1107 retval = -EBADRQC; 1128 retval = -EBADRQC;
1108 break; 1129 break;
1109 } 1130 }
1110 if (!param) { 1131 if (copy_from_user(&param, (void __user *)arg,
1111 retval = -EINVAL; 1132 sizeof(param))) {
1133 retval = -EFAULT;
1112 break; 1134 break;
1113 } 1135 }
1114 1136
1115 dbufs_local.input_bytes_consumed = param->input_bytes_consumed; 1137 dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
1116 dbufs_local.output_bytes_produced = 1138 dbufs_local.output_bytes_produced =
1117 param->output_bytes_produced; 1139 param.output_bytes_produced;
1118 dbufs_local.ibufs = &ibufs; 1140
1119 dbufs_local.obufs = &obufs; 1141 if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
1120 dbufs_local.ibufs->entries = param->ibufs->entries; 1142 retval = -EFAULT;
1121 dbufs_local.ibufs->type = param->ibufs->type; 1143 break;
1122 dbufs_local.obufs->entries = param->obufs->entries; 1144 }
1123 dbufs_local.obufs->type = param->obufs->type; 1145 if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
1124 1146 retval = -EFAULT;
1125 dbufs_local.ibufs->buff_entry = ibuf_temp; 1147 break;
1126 for (i = 0; i < dbufs_local.ibufs->entries; i++) {
1127 ibuf_temp[i].buffer =
1128 param->ibufs->buff_entry[i].buffer;
1129 ibuf_temp[i].size =
1130 param->ibufs->buff_entry[i].size;
1131 } 1148 }
1132 dbufs_local.obufs->buff_entry = obuf_temp; 1149
1133 for (i = 0; i < dbufs_local.obufs->entries; i++) { 1150 ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
1134 obuf_temp[i].buffer = 1151 obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
1135 param->obufs->buff_entry[i].buffer; 1152 if (!ibuf_tmp || !obuf_tmp) {
1136 obuf_temp[i].size = 1153 retval = -ENOMEM;
1137 param->obufs->buff_entry[i].size; 1154 goto free_iobufs;
1155 }
1156
1157 if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
1158 ibufs.entries * sizeof(*ibuf_tmp))) {
1159 retval = -EFAULT;
1160 goto free_iobufs;
1138 } 1161 }
1162 ibufs.buff_entry = ibuf_tmp;
1163 dbufs_local.ibufs = &ibufs;
1164
1165 if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
1166 obufs.entries * sizeof(*obuf_tmp))) {
1167 retval = -EFAULT;
1168 goto free_iobufs;
1169 }
1170 obufs.buff_entry = obuf_tmp;
1171 dbufs_local.obufs = &obufs;
1172
1139 retval = sst_decode(str_id, &dbufs_local); 1173 retval = sst_decode(str_id, &dbufs_local);
1140 if (retval) 1174 if (retval) {
1141 retval = -EAGAIN; 1175 retval = -EAGAIN;
1142 if (copy_to_user(&param->input_bytes_consumed, 1176 goto free_iobufs;
1177 }
1178
1179 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1180 if (copy_to_user(dest,
1143 &dbufs_local.input_bytes_consumed, 1181 &dbufs_local.input_bytes_consumed,
1144 sizeof(unsigned long long))) { 1182 sizeof(unsigned long long))) {
1145 retval = -EFAULT; 1183 retval = -EFAULT;
1146 break; 1184 goto free_iobufs;
1147 } 1185 }
1148 if (copy_to_user(&param->output_bytes_produced, 1186
1187 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1188 if (copy_to_user(dest,
1149 &dbufs_local.output_bytes_produced, 1189 &dbufs_local.output_bytes_produced,
1150 sizeof(unsigned long long))) { 1190 sizeof(unsigned long long))) {
1151 retval = -EFAULT; 1191 retval = -EFAULT;
1152 break; 1192 goto free_iobufs;
1153 } 1193 }
1194free_iobufs:
1195 kfree(ibuf_tmp);
1196 kfree(obuf_tmp);
1154 break; 1197 break;
1155 } 1198 }
1156 1199
@@ -1164,7 +1207,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1164 break; 1207 break;
1165 1208
1166 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { 1209 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
1167 unsigned long long *bytes = (unsigned long long *)arg; 1210 unsigned long long __user *bytes = (unsigned long long __user *)arg;
1168 struct snd_sst_tstamp tstamp = {0}; 1211 struct snd_sst_tstamp tstamp = {0};
1169 1212
1170 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); 1213 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1173 break; 1216 break;
1174 } 1217 }
1175 memcpy_fromio(&tstamp, 1218 memcpy_fromio(&tstamp,
1176 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1219 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1177 +(str_id * sizeof(tstamp))),
1178 sizeof(tstamp)); 1220 sizeof(tstamp));
1179 if (copy_to_user(bytes, &tstamp.bytes_processed, 1221 if (copy_to_user(bytes, &tstamp.bytes_processed,
1180 sizeof(*bytes))) 1222 sizeof(*bytes)))
@@ -1197,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1197 kfree(fw_info); 1239 kfree(fw_info);
1198 break; 1240 break;
1199 } 1241 }
1200 if (copy_to_user((struct snd_sst_dbufs *)arg, 1242 if (copy_to_user((struct snd_sst_dbufs __user *)arg,
1201 fw_info, sizeof(*fw_info))) { 1243 fw_info, sizeof(*fw_info))) {
1202 kfree(fw_info); 1244 kfree(fw_info);
1203 retval = -EFAULT; 1245 retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c851e4a..bf0ead78bfae 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@ struct stream_info {
231 spinlock_t pcm_lock; 231 spinlock_t pcm_lock;
232 bool mmapped; 232 bool mmapped;
233 unsigned int sg_index; /* current buf Index */ 233 unsigned int sg_index; /* current buf Index */
234 unsigned char *cur_ptr; /* Current static bufs */ 234 unsigned char __user *cur_ptr; /* Current static bufs */
235 struct snd_sst_buf_entry *buf_entry; 235 struct snd_sst_buf_entry __user *buf_entry;
236 struct sst_block data_blk; /* stream ops block */ 236 struct sst_block data_blk; /* stream ops block */
237 struct sst_block ctrl_blk; /* stream control cmd block */ 237 struct sst_block ctrl_blk; /* stream control cmd block */
238 enum snd_sst_buf_type buf_type; 238 enum snd_sst_buf_type buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805844f2..978bf87ff13d 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us)
22 int result; 22 int result;
23 BYTE MiscReg03 = 0; 23 BYTE MiscReg03 = 0;
24 24
25 printk("--- Initial Nedia ---\n"); 25 printk("--- Init Media ---\n");
26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); 26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
27 if (result != USB_STOR_XFER_GOOD) 27 if (result != USB_STOR_XFER_GOOD)
28 { 28 {
@@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
65 int result; 65 int result;
66 66
67 memset(bcb, 0, sizeof(bcb)); 67 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
69 bcb->DataTransferLength = 0x01; 69 bcb->DataTransferLength = 0x01;
70 bcb->Flags = 0x80; 70 bcb->Flags = 0x80;
@@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us)
92 return USB_STOR_TRANSPORT_ERROR; 92 return USB_STOR_TRANSPORT_ERROR;
93 } 93 }
94 94
95 memset(bcb, 0, sizeof(bcb)); 95 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
97 bcb->Flags = 0x80; 97 bcb->Flags = 0x80;
98 bcb->CDB[0] = 0xF2; 98 bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us)
112 return USB_STOR_TRANSPORT_ERROR; 112 return USB_STOR_TRANSPORT_ERROR;
113 } 113 }
114 114
115 memset(bcb, 0, sizeof(bcb)); 115 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
117 bcb->DataTransferLength = 0x200; 117 bcb->DataTransferLength = 0x200;
118 bcb->Flags = 0x80; 118 bcb->Flags = 0x80;
@@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us)
161 return USB_STOR_TRANSPORT_ERROR; 161 return USB_STOR_TRANSPORT_ERROR;
162 } 162 }
163 163
164 memset(bcb, 0, sizeof(bcb)); 164 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
166 bcb->DataTransferLength = 0x200; 166 bcb->DataTransferLength = 0x200;
167 bcb->Flags = 0x80; 167 bcb->Flags = 0x80;
@@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us)
219 return USB_STOR_TRANSPORT_ERROR; 219 return USB_STOR_TRANSPORT_ERROR;
220 } 220 }
221 221
222 memset(bcb, 0, sizeof(bcb)); 222 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
224 bcb->DataTransferLength = 0x200; 224 bcb->DataTransferLength = 0x200;
225 bcb->Flags = 0x80; 225 bcb->Flags = 0x80;
@@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
341 break; 341 break;
342 } 342 }
343 343
344 memset(bcb, 0, sizeof(bcb)); 344 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
346 bcb->DataTransferLength = 0x800; 346 bcb->DataTransferLength = 0x800;
347 bcb->Flags =0x00; 347 bcb->Flags =0x00;
@@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
433 433
434 //printk("transport --- ENE_Read_Data\n"); 434 //printk("transport --- ENE_Read_Data\n");
435 // set up the command wrapper 435 // set up the command wrapper
436 memset(bcb, 0, sizeof(bcb)); 436 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
438 bcb->DataTransferLength = length; 438 bcb->DataTransferLength = length;
439 bcb->Flags =0x80; 439 bcb->Flags =0x80;
@@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
470 470
471 //printk("transport --- ENE_Write_Data\n"); 471 //printk("transport --- ENE_Write_Data\n");
472 // set up the command wrapper 472 // set up the command wrapper
473 memset(bcb, 0, sizeof(bcb)); 473 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
475 bcb->DataTransferLength = length; 475 bcb->DataTransferLength = length;
476 bcb->Flags =0x00; 476 bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9da87d..9a3fdb4e4fe4 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo
15 if (result != USB_STOR_XFER_GOOD) 15 if (result != USB_STOR_XFER_GOOD)
16 return USB_STOR_TRANSPORT_ERROR; 16 return USB_STOR_TRANSPORT_ERROR;
17 17
18 memset(bcb, 0, sizeof(bcb)); 18 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
20 bcb->DataTransferLength = 0x200*len; 20 bcb->DataTransferLength = 0x200*len;
21 bcb->Flags = 0x00; 21 bcb->Flags = 0x00;
@@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
53 return USB_STOR_TRANSPORT_ERROR; 53 return USB_STOR_TRANSPORT_ERROR;
54 54
55 // Read Page Data 55 // Read Page Data
56 memset(bcb, 0, sizeof(bcb)); 56 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
58 bcb->DataTransferLength = 0x200; 58 bcb->DataTransferLength = 0x200;
59 bcb->Flags = 0x80; 59 bcb->Flags = 0x80;
@@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
69 return USB_STOR_TRANSPORT_ERROR; 69 return USB_STOR_TRANSPORT_ERROR;
70 70
71 // Read Extra Data 71 // Read Extra Data
72 memset(bcb, 0, sizeof(bcb)); 72 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
74 bcb->DataTransferLength = 0x4; 74 bcb->DataTransferLength = 0x4;
75 bcb->Flags = 0x80; 75 bcb->Flags = 0x80;
@@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr)
108 if (result != USB_STOR_XFER_GOOD) 108 if (result != USB_STOR_XFER_GOOD)
109 return USB_STOR_TRANSPORT_ERROR; 109 return USB_STOR_TRANSPORT_ERROR;
110 110
111 memset(bcb, 0, sizeof(bcb)); 111 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
113 bcb->DataTransferLength = 0x200; 113 bcb->DataTransferLength = 0x200;
114 bcb->Flags = 0x80; 114 bcb->Flags = 0x80;
@@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE
673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); 673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
674 674
675 // Read Extra Data 675 // Read Extra Data
676 memset(bcb, 0, sizeof(bcb)); 676 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
678 bcb->DataTransferLength = 0x4 * blen; 678 bcb->DataTransferLength = 0x4 * blen;
679 bcb->Flags = 0x80; 679 bcb->Flags = 0x80;
@@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType
700 BYTE ExtBuf[4]; 700 BYTE ExtBuf[4];
701 701
702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); 702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
703 memset(bcb, 0, sizeof(bcb)); 703 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
705 bcb->DataTransferLength = 0x4; 705 bcb->DataTransferLength = 0x4;
706 bcb->Flags = 0x80; 706 bcb->Flags = 0x80;
@@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B
807 if (result != USB_STOR_XFER_GOOD) 807 if (result != USB_STOR_XFER_GOOD)
808 return USB_STOR_TRANSPORT_ERROR; 808 return USB_STOR_TRANSPORT_ERROR;
809 809
810 memset(bcb, 0, sizeof(bcb)); 810 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
812 bcb->DataTransferLength = 0x4; 812 bcb->DataTransferLength = 0x4;
813 bcb->Flags = 0x80; 813 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c629935..cb92d25acee0 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
145 } 145 }
146 146
147 // set up the command wrapper 147 // set up the command wrapper
148 memset(bcb, 0, sizeof(bcb)); 148 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
150 bcb->DataTransferLength = blenByte; 150 bcb->DataTransferLength = blenByte;
151 bcb->Flags = 0x80; 151 bcb->Flags = 0x80;
@@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
193 blkno = phyblk * 0x20 + PageNum; 193 blkno = phyblk * 0x20 + PageNum;
194 194
195 // set up the command wrapper 195 // set up the command wrapper
196 memset(bcb, 0, sizeof(bcb)); 196 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
198 bcb->DataTransferLength = 0x200 * len; 198 bcb->DataTransferLength = 0x200 * len;
199 bcb->Flags = 0x80; 199 bcb->Flags = 0x80;
@@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
250 } 250 }
251 251
252 // set up the command wrapper 252 // set up the command wrapper
253 memset(bcb, 0, sizeof(bcb)); 253 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
255 bcb->DataTransferLength = blenByte; 255 bcb->DataTransferLength = blenByte;
256 bcb->Flags = 0x00; 256 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f850ebe..d646507a3611 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
152 bnByte = bn; 152 bnByte = bn;
153 153
154 // set up the command wrapper 154 // set up the command wrapper
155 memset(bcb, 0, sizeof(bcb)); 155 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
157 bcb->DataTransferLength = blenByte; 157 bcb->DataTransferLength = blenByte;
158 bcb->Flags = 0x80; 158 bcb->Flags = 0x80;
@@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
192 bnByte = bn; 192 bnByte = bn;
193 193
194 // set up the command wrapper 194 // set up the command wrapper
195 memset(bcb, 0, sizeof(bcb)); 195 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
197 bcb->DataTransferLength = blenByte; 197 bcb->DataTransferLength = blenByte;
198 bcb->Flags = 0x00; 198 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b65988636..1b52535a388f 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
267 267
268 // Read sect data 268 // Read sect data
269 memset(bcb, 0, sizeof(bcb)); 269 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
271 bcb->DataTransferLength = 0x200; 271 bcb->DataTransferLength = 0x200;
272 bcb->Flags = 0x80; 272 bcb->Flags = 0x80;
@@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
281 return USB_STOR_TRANSPORT_ERROR; 281 return USB_STOR_TRANSPORT_ERROR;
282 282
283 // Read redundant 283 // Read redundant
284 memset(bcb, 0, sizeof(bcb)); 284 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
286 bcb->DataTransferLength = 0x10; 286 bcb->DataTransferLength = 0x10;
287 bcb->Flags = 0x80; 287 bcb->Flags = 0x80;
@@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
320 320
321 // Read sect data 321 // Read sect data
322 memset(bcb, 0, sizeof(bcb)); 322 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
324 bcb->DataTransferLength = 0x200*count; 324 bcb->DataTransferLength = 0x200*count;
325 bcb->Flags = 0x80; 325 bcb->Flags = 0x80;
@@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
334 return USB_STOR_TRANSPORT_ERROR; 334 return USB_STOR_TRANSPORT_ERROR;
335 335
336 // Read redundant 336 // Read redundant
337 memset(bcb, 0, sizeof(bcb)); 337 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
339 bcb->DataTransferLength = 0x10; 339 bcb->DataTransferLength = 0x10;
340 bcb->Flags = 0x80; 340 bcb->Flags = 0x80;
@@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; 536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
537 537
538 // Write sect data 538 // Write sect data
539 memset(bcb, 0, sizeof(bcb)); 539 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
541 bcb->DataTransferLength = 0x200*count; 541 bcb->DataTransferLength = 0x200*count;
542 bcb->Flags = 0x00; 542 bcb->Flags = 0x00;
@@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
755 755
756 // Write sect data 756 // Write sect data
757 memset(bcb, 0, sizeof(bcb)); 757 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
759 bcb->DataTransferLength = 0x200; 759 bcb->DataTransferLength = 0x200;
760 bcb->Flags = 0x00; 760 bcb->Flags = 0x00;
@@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
792 addr=addr*(WORD)Ssfdc.MaxSectors; 792 addr=addr*(WORD)Ssfdc.MaxSectors;
793 793
794 memset(bcb, 0, sizeof(bcb)); 794 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
796 bcb->DataTransferLength = 0x200; 796 bcb->DataTransferLength = 0x200;
797 bcb->Flags = 0x80; 797 bcb->Flags = 0x80;
@@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
829 829
830 memset(bcb, 0, sizeof(bcb)); 830 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
832 bcb->DataTransferLength = 0x10; 832 bcb->DataTransferLength = 0x10;
833 bcb->Flags = 0x80; 833 bcb->Flags = 0x80;
@@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
872 872
873 memset(bcb, 0, sizeof(bcb)); 873 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
875 bcb->DataTransferLength = 0x10; 875 bcb->DataTransferLength = 0x10;
876 bcb->Flags = 0x80; 876 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df643ab0..111160cce441 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
40 us->current_urb->error_count = 0; 40 us->current_urb->error_count = 0;
41 us->current_urb->status = 0; 41 us->current_urb->status = 0;
42 42
43// us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; 43 us->current_urb->transfer_flags = 0;
44 if (us->current_urb->transfer_buffer == us->iobuf) 44 if (us->current_urb->transfer_buffer == us->iobuf)
45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
46 us->current_urb->transfer_dma = us->iobuf_dma; 46 us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff82fd2..a99879bada42 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv,
330 for (i = 8; i < 14; i++) 330 for (i = 8; i < 14; i++)
331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ 331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
332#endif 332#endif
333 i = (payload_length / 256);
334 i = (payload_length % 256);
335 mic_iv[14] = (unsigned char)(payload_length / 256); 333 mic_iv[14] = (unsigned char)(payload_length / 256);
336 mic_iv[15] = (unsigned char)(payload_length % 256); 334 mic_iv[15] = (unsigned char)(payload_length % 256);
337 335
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074a9083..ddacfc6c4861 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ 65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ 66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ 67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
68 {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */
68 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ 69 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
69 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ 70 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
70 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ 71 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194b5cbb..b1786dcac245 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev)
5829 } 5829 }
5830 } 5830 }
5831 5831
5832 pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
5833 priv->rxbuffersize, PCI_DMA_FROMDEVICE);
5834
5832 skb = new_skb; 5835 skb = new_skb;
5833 priv->rx_buf[priv->rx_idx] = skb; 5836 priv->rx_buf[priv->rx_idx] = skb;
5834 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); 5837 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824e7ebc..807dd7eb748f 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file,
1286 case VIDIOCGCAP: 1286 case VIDIOCGCAP:
1287 { 1287 {
1288 struct video_capability b; 1288 struct video_capability b;
1289 memset(&b, 0, sizeof(b));
1289 strcpy(b.name, saa->video_dev.name); 1290 strcpy(b.name, saa->video_dev.name);
1290 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | 1291 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
1291 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | 1292 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file,
1416 case VIDIOCGWIN: 1417 case VIDIOCGWIN:
1417 { 1418 {
1418 struct video_window vw; 1419 struct video_window vw;
1420 memset(&vw, 0, sizeof(vw));
1419 vw.x = saa->win.x; 1421 vw.x = saa->win.x;
1420 vw.y = saa->win.y; 1422 vw.y = saa->win.y;
1421 vw.width = saa->win.width; 1423 vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file,
1448 case VIDIOCGFBUF: 1450 case VIDIOCGFBUF:
1449 { 1451 {
1450 struct video_buffer v; 1452 struct video_buffer v;
1453 memset(&v, 0, sizeof(v));
1451 v.base = (void *)saa->win.vidadr; 1454 v.base = (void *)saa->win.vidadr;
1452 v.height = saa->win.sheight; 1455 v.height = saa->win.sheight;
1453 v.width = saa->win.swidth; 1456 v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file,
1492 case VIDIOCGAUDIO: 1495 case VIDIOCGAUDIO:
1493 { 1496 {
1494 struct video_audio v; 1497 struct video_audio v;
1498 memset(&v, 0, sizeof(v));
1495 v = saa->audio_dev; 1499 v = saa->audio_dev;
1496 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); 1500 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
1497 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; 1501 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file,
1534 case VIDIOCGUNIT: 1538 case VIDIOCGUNIT:
1535 { 1539 {
1536 struct video_unit vu; 1540 struct video_unit vu;
1541 memset(&vu, 0, sizeof(vu));
1537 vu.video = saa->video_dev.minor; 1542 vu.video = saa->video_dev.minor;
1538 vu.vbi = VIDEO_NO_UNIT; 1543 vu.vbi = VIDEO_NO_UNIT;
1539 vu.radio = VIDEO_NO_UNIT; 1544 vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@ static int saa_open(struct file *file)
1888 1893
1889 saa->user++; 1894 saa->user++;
1890 if (saa->user > 1) { 1895 if (saa->user > 1) {
1896 saa->user--;
1891 unlock_kernel(); 1897 unlock_kernel();
1892 return 0; /* device open already, don't reset */ 1898 return 0; /* device open already, don't reset */
1893 } 1899 }
@@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
2000 if (retval < 0) { 2006 if (retval < 0) {
2001 dev_err(&pdev->dev, "%d: error in registering video device!\n", 2007 dev_err(&pdev->dev, "%d: error in registering video device!\n",
2002 num); 2008 num);
2003 goto errio; 2009 goto errirq;
2004 } 2010 }
2005 2011
2006 return 0; 2012 return 0;
2013
2014errirq:
2015 free_irq(saa->irq, saa);
2007errio: 2016errio:
2008 iounmap(saa->saa7146_mem); 2017 iounmap(saa->saa7146_mem);
2009err: 2018err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d464143c..93de4f2e8bf8 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 7 depends on ARCH_OMAP3
8 select OMAP_MBOX_FWK 8 select OMAP_MBOX_FWK
9 select OMAP_IOMMU
10 help 9 help
11 DSP/BIOS Bridge is designed for platforms that contain a GPP and 10 DSP/BIOS Bridge is designed for platforms that contain a GPP and
12 one or more attached DSPs. The GPP is considered the master or 11 one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2935c5..41c644c3318f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
2 2
3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
5 core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ 5 core/tiomap3430_pwr.o core/tiomap_io.o \
6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ 7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
8 pmgr/cmm.o pmgr/dbll.o 8 pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ 9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ 10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
11 rmgr/nldr.o rmgr/drv_interface.o 11 rmgr/nldr.o rmgr/drv_interface.o
12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ 12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
13 dynload/tramp.o 13 dynload/tramp.o
14libhw = hw/hw_mmu.o
14 15
15bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ 16bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
16 $(libdload) 17 $(libdload) $(libhw)
17 18
18#Machine dependent 19#Machine dependent
19ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ 20ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae263387a87..16723cd34831 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
27struct deh_mgr { 27struct deh_mgr {
28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 struct bridge_dev_context *hbridge_context; /* Bridge context. */
29 struct ntfy_object *ntfy_obj; /* NTFY object */ 29 struct ntfy_object *ntfy_obj; /* NTFY object */
30};
31 30
32int mmu_fault_isr(struct iommu *mmu); 31 /* MMU Fault DPC */
32 struct tasklet_struct dpc_tasklet;
33};
33 34
34#endif /* _DEH_ */ 35#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c1cb98..1c1f157e167a 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <dspbridge/dsp-mmu.h>
27#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
27#include <hw_defs.h>
28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
29#include <dspbridge/sync.h> 29#include <dspbridge/sync.h>
30#include <dspbridge/clk.h> 30#include <dspbridge/clk.h>
@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
306 306
307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) 307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
308 308
309struct shm_segs {
310 u32 seg0_da;
311 u32 seg0_pa;
312 u32 seg0_va;
313 u32 seg0_size;
314 u32 seg1_da;
315 u32 seg1_pa;
316 u32 seg1_va;
317 u32 seg1_size;
318};
319
320
321/* This Bridge driver's device context: */ 309/* This Bridge driver's device context: */
322struct bridge_dev_context { 310struct bridge_dev_context {
323 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 311 struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@@ -328,6 +316,7 @@ struct bridge_dev_context {
328 */ 316 */
329 u32 dw_dsp_ext_base_addr; /* See the comment above */ 317 u32 dw_dsp_ext_base_addr; /* See the comment above */
330 u32 dw_api_reg_base; /* API mem map'd registers */ 318 u32 dw_api_reg_base; /* API mem map'd registers */
319 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 dw_api_clk_base; /* CLK Registers */ 320 u32 dw_api_clk_base; /* CLK Registers */
332 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 321 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 322 u32 dw_public_rhea; /* Pub Rhea */
@@ -339,8 +328,7 @@ struct bridge_dev_context {
339 u32 dw_internal_size; /* Internal memory size */ 328 u32 dw_internal_size; /* Internal memory size */
340 329
341 struct omap_mbox *mbox; /* Mail box handle */ 330 struct omap_mbox *mbox; /* Mail box handle */
342 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 331
343 struct shm_segs sh_s;
344 struct cfg_hostres *resources; /* Host Resources */ 332 struct cfg_hostres *resources; /* Host Resources */
345 333
346 /* 334 /*
@@ -353,6 +341,7 @@ struct bridge_dev_context {
353 341
354 /* TC Settings */ 342 /* TC Settings */
355 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 343 bool tc_word_swap_on; /* Traffic Controller Word Swap */
344 struct pg_table_attrs *pt_attrs;
356 u32 dsp_per_clks; 345 u32 dsp_per_clks;
357}; 346};
358 347
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95adc8ff..000000000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * dsp-mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <dspbridge/host_os.h>
20#include <plat/dmtimer.h>
21#include <dspbridge/dbdefs.h>
22#include <dspbridge/dev.h>
23#include <dspbridge/io_sm.h>
24#include <dspbridge/dspdeh.h>
25#include "_tiomap.h"
26
27#include <dspbridge/dsp-mmu.h>
28
29#define MMU_CNTL_TWL_EN (1 << 2)
30
31static struct tasklet_struct mmu_tasklet;
32
33#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
35{
36 void *dummy_addr;
37 u32 fa, tmp;
38 struct iotlb_entry e;
39 struct iommu *mmu = dev_context->dsp_mmu;
40 dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
41
42 /*
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
46 */
47 tmp = iommu_read_reg(mmu, MMU_CNTL);
48 tmp &= ~MMU_CNTL_TWL_EN;
49 iommu_write_reg(mmu, tmp, MMU_CNTL);
50 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
51 e.da = fa & PAGE_MASK;
52 e.pa = virt_to_phys(dummy_addr);
53 e.valid = 1;
54 e.prsvd = 1;
55 e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
56 e.endian = MMU_RAM_ENDIAN_LITTLE;
57 e.elsz = MMU_RAM_ELSZ_32;
58 e.mixed = 0;
59
60 load_iotlb_entry(mmu, &e);
61
62 dsp_clk_enable(DSP_CLK_GPT8);
63
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
65
66 /* Clear MMU interrupt */
67 tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
68 iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
69
70 dump_dsp_stack(dev_context);
71 dsp_clk_disable(DSP_CLK_GPT8);
72
73 iopgtable_clear_entry(mmu, fa);
74 free_page((unsigned long)dummy_addr);
75}
76#endif
77
78
79static void fault_tasklet(unsigned long data)
80{
81 struct iommu *mmu = (struct iommu *)data;
82 struct bridge_dev_context *dev_ctx;
83 struct deh_mgr *dm;
84 u32 fa;
85 dev_get_deh_mgr(dev_get_first(), &dm);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx);
87
88 if (!dm || !dev_ctx)
89 return;
90
91 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
92
93#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx);
95 dump_dl_modules(dev_ctx);
96 mmu_fault_print_stack(dev_ctx);
97#endif
98
99 bridge_deh_notify(dm, DSP_MMUFAULT, fa);
100}
101
102/*
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
105 */
106static int mmu_fault_callback(struct iommu *mmu)
107{
108 if (!mmu)
109 return -EPERM;
110
111 iommu_write_reg(mmu, 0, MMU_IRQENABLE);
112 tasklet_schedule(&mmu_tasklet);
113 return 0;
114}
115
116/**
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
118 *
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
121 *
122 */
123struct iommu *dsp_mmu_init()
124{
125 struct iommu *mmu;
126
127 mmu = iommu_get("iva2");
128
129 if (!IS_ERR(mmu)) {
130 tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
131 mmu->isr = mmu_fault_callback;
132 }
133
134 return mmu;
135}
136
137/**
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
140 *
141 * This function destroys dsp mmu module.
142 *
143 */
144void dsp_mmu_exit(struct iommu *mmu)
145{
146 if (mmu)
147 iommu_put(mmu);
148 tasklet_kill(&mmu_tasklet);
149}
150
151/**
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
155 *
156 */
157static u32 user_va2_pa(struct mm_struct *mm, u32 address)
158{
159 pgd_t *pgd;
160 pmd_t *pmd;
161 pte_t *ptep, pte;
162
163 pgd = pgd_offset(mm, address);
164 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
165 pmd = pmd_offset(pgd, address);
166 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
167 ptep = pte_offset_map(pmd, address);
168 if (ptep) {
169 pte = *ptep;
170 if (pte_present(pte))
171 return pte & PAGE_MASK;
172 }
173 }
174 }
175
176 return 0;
177}
178
179/**
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
185 *
186 */
187static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
188 struct page **usr_pgs)
189{
190 u32 pa;
191 int i;
192 struct page *pg;
193
194 for (i = 0; i < pages; i++) {
195 pa = user_va2_pa(mm, uva);
196
197 if (!pfn_valid(__phys_to_pfn(pa)))
198 break;
199
200 pg = phys_to_page(pa);
201 usr_pgs[i] = pg;
202 get_page(pg);
203 }
204 return i;
205}
206
207/**
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
211 * @da DSP address
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
214 *
215 * This function maps a user space buffer into DSP virtual address.
216 *
217 */
218u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
219 struct page **usr_pgs)
220{
221 int res, w;
222 unsigned pages;
223 int i;
224 struct vm_area_struct *vma;
225 struct mm_struct *mm = current->mm;
226 struct sg_table *sgt;
227 struct scatterlist *sg;
228
229 if (!size || !usr_pgs)
230 return -EINVAL;
231
232 pages = size / PG_SIZE4K;
233
234 down_read(&mm->mmap_sem);
235 vma = find_vma(mm, uva);
236 while (vma && (uva + size > vma->vm_end))
237 vma = find_vma(mm, vma->vm_end + 1);
238
239 if (!vma) {
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__, uva, size);
242 up_read(&mm->mmap_sem);
243 return -EINVAL;
244 }
245 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
246 w = 1;
247
248 if (vma->vm_flags & VM_IO)
249 i = get_io_pages(mm, uva, pages, usr_pgs);
250 else
251 i = get_user_pages(current, mm, uva, pages, w, 1,
252 usr_pgs, NULL);
253 up_read(&mm->mmap_sem);
254
255 if (i < 0)
256 return i;
257
258 if (i < pages) {
259 res = -EFAULT;
260 goto err_pages;
261 }
262
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
264 if (!sgt) {
265 res = -ENOMEM;
266 goto err_pages;
267 }
268
269 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
270
271 if (res < 0)
272 goto err_sg;
273
274 for_each_sg(sgt->sgl, sg, sgt->nents, i)
275 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
276
277 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
278
279 if (!IS_ERR_VALUE(da))
280 return da;
281 res = (int)da;
282
283 sg_free_table(sgt);
284err_sg:
285 kfree(sgt);
286 i = pages;
287err_pages:
288 while (i--)
289 put_page(usr_pgs[i]);
290 return res;
291}
292
293/**
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
296 * @da DSP address
297 *
298 * This function unmaps a user space buffer into DSP virtual address.
299 *
300 */
301int user_to_dsp_unmap(struct iommu *mmu, u32 da)
302{
303 unsigned i;
304 struct sg_table *sgt;
305 struct scatterlist *sg;
306
307 sgt = iommu_vunmap(mmu, da);
308 if (!sgt)
309 return -EFAULT;
310
311 for_each_sg(sgt->sgl, sg, sgt->nents, i)
312 put_page(sg_page(sg));
313 sg_free_table(sgt);
314 kfree(sgt);
315
316 return 0;
317}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194badaba0ed..571864555ddd 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
39#include <dspbridge/ntfy.h> 39#include <dspbridge/ntfy.h>
40#include <dspbridge/sync.h> 40#include <dspbridge/sync.h>
41 41
42/* Hardware Abstraction Layer */
43#include <hw_defs.h>
44#include <hw_mmu.h>
45
42/* Bridge Driver */ 46/* Bridge Driver */
43#include <dspbridge/dspdeh.h> 47#include <dspbridge/dspdeh.h>
44#include <dspbridge/dspio.h> 48#include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
287 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
288 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
289 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
290 struct shm_segs *sm_sg;
291 u32 ul_shm_base; 294 u32 ul_shm_base;
292 u32 ul_shm_base_offset; 295 u32 ul_shm_base_offset;
293 u32 ul_shm_limit; 296 u32 ul_shm_limit;
@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
310 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
311 struct cfg_hostres *host_res; 314 struct cfg_hostres *host_res;
312 struct bridge_dev_context *pbridge_context; 315 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
313 u32 shm0_end; 317 u32 shm0_end;
314 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
315 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
320 u32 pa_curr = 0;
321 u32 va_curr = 0;
322 u32 gpp_va_curr = 0;
323 u32 num_bytes = 0;
324 u32 all_bits = 0;
325 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 };
316 328
317 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
318 if (!pbridge_context) { 330 if (!pbridge_context) {
@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
325 status = -EFAULT; 337 status = -EFAULT;
326 goto func_end; 338 goto func_end;
327 } 339 }
328 sm_sg = &pbridge_context->sh_s;
329
330 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
331 if (!cod_man) { 341 if (!cod_man) {
332 status = -EFAULT; 342 status = -EFAULT;
@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
461 if (status) 471 if (status)
462 goto func_end; 472 goto func_end;
463 473
464 sm_sg->seg1_pa = ul_gpp_pa; 474 pa_curr = ul_gpp_pa;
465 sm_sg->seg1_da = ul_dyn_ext_base; 475 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
466 sm_sg->seg1_va = ul_gpp_va; 476 gpp_va_curr = ul_gpp_va;
467 sm_sg->seg1_size = ul_seg1_size; 477 num_bytes = ul_seg1_size;
468 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; 478
469 sm_sg->seg0_da = ul_dsp_va; 479 /*
470 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; 480 * Try to fit into TLB entries. If not possible, push them to page
471 sm_sg->seg0_size = ul_seg_size; 481 * tables. It is quite possible that if sections are not on
482 * bigger page boundary, we may end up making several small pages.
483 * So, push them onto page tables, if that is the case.
484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
490
491 while (num_bytes) {
492 /*
493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 }
525 pa_curr += ul_pad_size;
526 va_curr += ul_pad_size;
527 gpp_va_curr += ul_pad_size;
528
529 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes = ul_seg_size;
531 va_curr = ul_dsp_va * hio_mgr->word_size;
532 while (num_bytes) {
533 /*
534 * To find the max. page size with which both PA & VA are
535 * aligned.
536 */
537 all_bits = pa_curr | va_curr;
538 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
539 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
540 va_curr, num_bytes);
541 for (i = 0; i < 4; i++) {
542 if (!(num_bytes >= page_size[i]) ||
543 !((all_bits & (page_size[i] - 1)) == 0))
544 continue;
545 if (ndx < MAX_LOCK_TLB_ENTRIES) {
546 /*
547 * This is the physical address written to
548 * DSP MMU.
549 */
550 ae_proc[ndx].ul_gpp_pa = pa_curr;
551 /*
552 * This is the virtual uncached ioremapped
553 * address!!!
554 */
555 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
556 ae_proc[ndx].ul_dsp_va =
557 va_curr / hio_mgr->word_size;
558 ae_proc[ndx].ul_size = page_size[i];
559 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n",
564 ae_proc[ndx].ul_gpp_pa,
565 ae_proc[ndx].ul_gpp_va,
566 ae_proc[ndx].ul_dsp_va *
567 hio_mgr->word_size, page_size[i]);
568 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
585 }
586 pa_curr += page_size[i];
587 va_curr += page_size[i];
588 gpp_va_curr += page_size[i];
589 num_bytes -= page_size[i];
590 /*
591 * Don't try smaller sizes. Hopefully we have reached
592 * an address aligned to a bigger page size.
593 */
594 break;
595 }
596 }
472 597
473 /* 598 /*
474 * Copy remaining entries from CDB. All entries are 1 MB and 599 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
509 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
510 ae_proc[ndx].ul_dsp_va); 635 ae_proc[ndx].ul_dsp_va);
511 ndx++; 636 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
512 } 645 }
513 } 646 }
514 if (status) 647 if (status)
515 goto func_end; 648 goto func_end;
516 } 649 }
517 650
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
657 /* Map the L4 peripherals */
658 i = 0;
659 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663 map_attrs, NULL);
664 if (status)
665 goto func_end;
666 i++;
667 }
668
518 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
519 ae_proc[i].ul_dsp_va = 0; 670 ae_proc[i].ul_dsp_va = 0;
520 ae_proc[i].ul_gpp_pa = 0; 671 ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
537 status = -EFAULT; 688 status = -EFAULT;
538 goto func_end; 689 goto func_end;
539 } else { 690 } else {
540 if (sm_sg->seg0_da > ul_shm_base) { 691 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
541 status = -EPERM; 692 status = -EPERM;
542 goto func_end; 693 goto func_end;
543 } 694 }
544 /* ul_shm_base may not be at ul_dsp_va address */ 695 /* ul_shm_base may not be at ul_dsp_va address */
545 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * 696 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
546 hio_mgr->word_size; 697 hio_mgr->word_size;
547 /* 698 /*
548 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
566 goto func_end; 717 goto func_end;
567 } 718 }
568 /* Register SM */ 719 /* Register SM */
569 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); 720 status =
721 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
570 } 722 }
571 723
572 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 724 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12bc0d3..1be081f917a7 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
23#include <dspbridge/host_os.h> 23#include <dspbridge/host_os.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/mmzone.h> 25#include <linux/mmzone.h>
26#include <plat/control.h>
27 26
28/* ----------------------------------- DSP/BIOS Bridge */ 27/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h> 28#include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
35#include <dspbridge/drv.h> 34#include <dspbridge/drv.h>
36#include <dspbridge/sync.h> 35#include <dspbridge/sync.h>
37 36
37/* ------------------------------------ Hardware Abstraction Layer */
38#include <hw_defs.h>
39#include <hw_mmu.h>
40
38/* ----------------------------------- Link Driver */ 41/* ----------------------------------- Link Driver */
39#include <dspbridge/dspdefs.h> 42#include <dspbridge/dspdefs.h>
40#include <dspbridge/dspchnl.h> 43#include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
47/* ----------------------------------- Platform Manager */ 50/* ----------------------------------- Platform Manager */
48#include <dspbridge/dev.h> 51#include <dspbridge/dev.h>
49#include <dspbridge/dspapi.h> 52#include <dspbridge/dspapi.h>
53#include <dspbridge/dmm.h>
50#include <dspbridge/wdt.h> 54#include <dspbridge/wdt.h>
51 55
52/* ----------------------------------- Local */ 56/* ----------------------------------- Local */
@@ -67,6 +71,20 @@
67#define MMU_SMALL_PAGE_MASK 0xFFFFF000 71#define MMU_SMALL_PAGE_MASK 0xFFFFF000
68#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 72#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
69#define PAGES_II_LVL_TABLE 512 73#define PAGES_II_LVL_TABLE 512
74#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
75
76/*
77 * This is a totally ugly layer violation, but needed until
78 * omap_ctrl_set_dsp_boot*() are provided.
79 */
80#define OMAP3_IVA2_BOOTMOD_IDLE 1
81#define OMAP2_CONTROL_GENERAL 0x270
82#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
83#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
84
85#define OMAP343X_CTRL_REGADDR(reg) \
86 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
87
70 88
71/* Forward Declarations: */ 89/* Forward Declarations: */
72static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 90static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
91static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, 109static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
92 u8 *host_buff, u32 dsp_addr, 110 u8 *host_buff, u32 dsp_addr,
93 u32 ul_num_bytes, u32 mem_type); 111 u32 ul_num_bytes, u32 mem_type);
112static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
113 u32 ul_mpu_addr, u32 virt_addr,
114 u32 ul_num_bytes, u32 ul_map_attr,
115 struct page **mapped_pages);
116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117 u32 virt_addr, u32 ul_num_bytes);
94static int bridge_dev_create(struct bridge_dev_context 118static int bridge_dev_create(struct bridge_dev_context
95 **dev_cntxt, 119 **dev_cntxt,
96 struct dev_object *hdev_obj, 120 struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
98static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
99 u32 dw_cmd, void *pargs); 123 u32 dw_cmd, void *pargs);
100static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125static u32 user_va2_pa(struct mm_struct *mm, u32 address);
126static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
127 u32 va, u32 size,
128 struct hw_mmu_map_attrs_t *map_attrs);
129static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
130 u32 size, struct hw_mmu_map_attrs_t *attrs);
131static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
132 u32 ul_mpu_addr, u32 virt_addr,
133 u32 ul_num_bytes,
134 struct hw_mmu_map_attrs_t *hw_attrs);
135
101bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 136bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
102 137
138/* ----------------------------------- Globals */
139
140/* Attributes of L2 page tables for DSP MMU */
141struct page_info {
142 u32 num_entries; /* Number of valid PTEs in the L2 PT */
143};
144
145/* Attributes used to manage the DSP MMU page tables */
146struct pg_table_attrs {
147 spinlock_t pg_lock; /* Critical section object handle */
148
149 u32 l1_base_pa; /* Physical address of the L1 PT */
150 u32 l1_base_va; /* Virtual address of the L1 PT */
151 u32 l1_size; /* Size of the L1 PT */
152 u32 l1_tbl_alloc_pa;
153 /* Physical address of Allocated mem for L1 table. May not be aligned */
154 u32 l1_tbl_alloc_va;
155 /* Virtual address of Allocated mem for L1 table. May not be aligned */
156 u32 l1_tbl_alloc_sz;
157 /* Size of consistent memory allocated for L1 table.
158 * May not be aligned */
159
160 u32 l2_base_pa; /* Physical address of the L2 PT */
161 u32 l2_base_va; /* Virtual address of the L2 PT */
162 u32 l2_size; /* Size of the L2 PT */
163 u32 l2_tbl_alloc_pa;
164 /* Physical address of Allocated mem for L2 table. May not be aligned */
165 u32 l2_tbl_alloc_va;
166 /* Virtual address of Allocated mem for L2 table. May not be aligned */
167 u32 l2_tbl_alloc_sz;
168 /* Size of consistent memory allocated for L2 table.
169 * May not be aligned */
170
171 u32 l2_num_pages; /* Number of allocated L2 PT */
172 /* Array [l2_num_pages] of L2 PT info structs */
173 struct page_info *pg_info;
174};
175
103/* 176/*
104 * This Bridge driver's function interface table. 177 * This Bridge driver's function interface table.
105 */ 178 */
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
119 bridge_brd_set_state, 192 bridge_brd_set_state,
120 bridge_brd_mem_copy, 193 bridge_brd_mem_copy,
121 bridge_brd_mem_write, 194 bridge_brd_mem_write,
195 bridge_brd_mem_map,
196 bridge_brd_mem_un_map,
122 /* The following CHNL functions are provided by chnl_io.lib: */ 197 /* The following CHNL functions are provided by chnl_io.lib: */
123 bridge_chnl_create, 198 bridge_chnl_create,
124 bridge_chnl_destroy, 199 bridge_chnl_destroy,
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
148 bridge_msg_set_queue_id, 223 bridge_msg_set_queue_id,
149}; 224};
150 225
226static inline void flush_all(struct bridge_dev_context *dev_context)
227{
228 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
229 dev_context->dw_brd_state == BRD_HIBERNATION)
230 wake_dsp(dev_context, NULL);
231
232 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
233}
234
235static void bad_page_dump(u32 pa, struct page *pg)
236{
237 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
238 pr_emerg("Bad page state in process '%s'\n"
239 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
240 "Backtrace:\n",
241 current->comm, pg, (int)(2 * sizeof(unsigned long)),
242 (unsigned long)pg->flags, pg->mapping,
243 page_mapcount(pg), page_count(pg));
244 dump_stack();
245}
246
151/* 247/*
152 * ======== bridge_drv_entry ======== 248 * ======== bridge_drv_entry ========
153 * purpose: 249 * purpose:
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
203 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 299 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
204 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 300 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
205 } 301 }
206 302 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
303 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
207 dsp_clk_enable(DSP_CLK_IVA2); 304 dsp_clk_enable(DSP_CLK_IVA2);
208 305
209 /* set the device state to IDLE */ 306 /* set the device state to IDLE */
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
274{ 371{
275 int status = 0; 372 int status = 0;
276 struct bridge_dev_context *dev_context = dev_ctxt; 373 struct bridge_dev_context *dev_context = dev_ctxt;
277 struct iommu *mmu = NULL;
278 struct shm_segs *sm_sg;
279 int l4_i = 0, tlb_i = 0;
280 u32 sg0_da = 0, sg1_da = 0;
281 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
282 u32 dw_sync_addr = 0; 374 u32 dw_sync_addr = 0;
283 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 375 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
284 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 376 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
285 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 377 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
286 /* Offset of shm_base_virt from tlb_base_virt */ 378 /* Offset of shm_base_virt from tlb_base_virt */
287 u32 ul_shm_offset_virt; 379 u32 ul_shm_offset_virt;
380 s32 entry_ndx;
381 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
288 struct cfg_hostres *resources = NULL; 382 struct cfg_hostres *resources = NULL;
289 u32 temp; 383 u32 temp;
290 u32 ul_dsp_clk_rate; 384 u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
305 ul_shm_base_virt *= DSPWORDSIZE; 399 ul_shm_base_virt *= DSPWORDSIZE;
306 DBC_ASSERT(ul_shm_base_virt != 0); 400 DBC_ASSERT(ul_shm_base_virt != 0);
307 /* DSP Virtual address */ 401 /* DSP Virtual address */
308 ul_tlb_base_virt = dev_context->sh_s.seg0_da; 402 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
309 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 403 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
310 ul_shm_offset_virt = 404 ul_shm_offset_virt =
311 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 405 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
312 /* Kernel logical address */ 406 /* Kernel logical address */
313 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; 407 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
314 408
315 DBC_ASSERT(ul_shm_base != 0); 409 DBC_ASSERT(ul_shm_base != 0);
316 /* 2nd wd is used as sync field */ 410 /* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
345 OMAP343X_CONTROL_IVA2_BOOTMOD)); 439 OMAP343X_CONTROL_IVA2_BOOTMOD));
346 } 440 }
347 } 441 }
348
349 if (!status) { 442 if (!status) {
443 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
444 * IVA2 SYSC register */
445 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
446 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 udelay(100);
350 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, 448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
351 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
352 mmu = dev_context->dsp_mmu; 450 udelay(100);
353 if (mmu) 451
354 dsp_mmu_exit(mmu); 452 /* Disbale the DSP MMU */
355 mmu = dsp_mmu_init(); 453 hw_mmu_disable(resources->dw_dmmu_base);
356 if (IS_ERR(mmu)) { 454 /* Disable TWL */
357 dev_err(bridge, "dsp_mmu_init failed!\n"); 455 hw_mmu_twl_disable(resources->dw_dmmu_base);
358 dev_context->dsp_mmu = NULL; 456
359 status = (int)mmu; 457 /* Only make TLB entry if both addresses are non-zero */
360 } 458 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
361 } 459 entry_ndx++) {
362 if (!status) { 460 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
363 dev_context->dsp_mmu = mmu; 461 struct hw_mmu_map_attrs_t map_attrs = {
364 sm_sg = &dev_context->sh_s; 462 .endianism = e->endianism,
365 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 463 .element_size = e->elem_size,
366 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 464 .mixed_size = e->mixed_mode,
367 if (IS_ERR_VALUE(sg0_da)) { 465 };
368 status = (int)sg0_da; 466
369 sg0_da = 0; 467 if (!e->ul_gpp_pa || !e->ul_dsp_va)
370 }
371 }
372 if (!status) {
373 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
374 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
375 if (IS_ERR_VALUE(sg1_da)) {
376 status = (int)sg1_da;
377 sg1_da = 0;
378 }
379 }
380 if (!status) {
381 u32 da;
382 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
383 if (!tlb[tlb_i].ul_gpp_pa)
384 continue; 468 continue;
385 469
386 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" 470 dev_dbg(bridge,
387 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, 471 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
388 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); 472 itmp_entry_ndx,
389 473 e->ul_gpp_pa,
390 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, 474 e->ul_dsp_va,
391 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, 475 e->ul_size);
392 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 476
393 if (IS_ERR_VALUE(da)) { 477 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
394 status = (int)da; 478 e->ul_gpp_pa,
395 break; 479 e->ul_dsp_va,
396 } 480 e->ul_size,
397 } 481 itmp_entry_ndx,
398 } 482 &map_attrs, 1, 1);
399 if (!status) { 483
400 u32 da; 484 itmp_entry_ndx++;
401 l4_i = 0;
402 while (l4_peripheral_table[l4_i].phys_addr) {
403 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
404 dsp_virt_addr, l4_peripheral_table[l4_i].
405 phys_addr, PAGE_SIZE,
406 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
407 if (IS_ERR_VALUE(da)) {
408 status = (int)da;
409 break;
410 }
411 l4_i++;
412 } 485 }
413 } 486 }
414 487
415 /* Lock the above TLB entries and get the BIOS and load monitor timer 488 /* Lock the above TLB entries and get the BIOS and load monitor timer
416 * information */ 489 * information */
417 if (!status) { 490 if (!status) {
491 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
492 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
493 hw_mmu_ttb_set(resources->dw_dmmu_base,
494 dev_context->pt_attrs->l1_base_pa);
495 hw_mmu_twl_enable(resources->dw_dmmu_base);
496 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
497
498 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
499 temp = (temp & 0xFFFFFFEF) | 0x11;
500 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
501
502 /* Let the DSP MMU run */
503 hw_mmu_enable(resources->dw_dmmu_base);
504
418 /* Enable the BIOS clock */ 505 /* Enable the BIOS clock */
419 (void)dev_get_symbol(dev_context->hdev_obj, 506 (void)dev_get_symbol(dev_context->hdev_obj,
420 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 507 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
421 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
422 BRIDGEINIT_LOADMON_GPTIMER, 509 BRIDGEINIT_LOADMON_GPTIMER,
423 &ul_load_monitor_timer); 510 &ul_load_monitor_timer);
511 }
424 512
513 if (!status) {
425 if (ul_load_monitor_timer != 0xFFFF) { 514 if (ul_load_monitor_timer != 0xFFFF) {
426 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 515 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
427 ul_load_monitor_timer; 516 ul_load_monitor_timer;
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
430 dev_dbg(bridge, "Not able to get the symbol for Load " 519 dev_dbg(bridge, "Not able to get the symbol for Load "
431 "Monitor Timer\n"); 520 "Monitor Timer\n");
432 } 521 }
522 }
433 523
524 if (!status) {
434 if (ul_bios_gp_timer != 0xFFFF) { 525 if (ul_bios_gp_timer != 0xFFFF) {
435 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 526 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
436 ul_bios_gp_timer; 527 ul_bios_gp_timer;
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
439 dev_dbg(bridge, 530 dev_dbg(bridge,
440 "Not able to get the symbol for BIOS Timer\n"); 531 "Not able to get the symbol for BIOS Timer\n");
441 } 532 }
533 }
442 534
535 if (!status) {
443 /* Set the DSP clock rate */ 536 /* Set the DSP clock rate */
444 (void)dev_get_symbol(dev_context->hdev_obj, 537 (void)dev_get_symbol(dev_context->hdev_obj,
445 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 538 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 585
493 /* Let DSP go */ 586 /* Let DSP go */
494 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base,
590 HW_MMU_ALL_INTERRUPTS);
495 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
496 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
497 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 593 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
521 617
522 /* update board state */ 618 /* update board state */
523 dev_context->dw_brd_state = BRD_RUNNING; 619 dev_context->dw_brd_state = BRD_RUNNING;
524 return 0; 620 /* (void)chnlsm_enable_interrupt(dev_context); */
525 } else { 621 } else {
526 dev_context->dw_brd_state = BRD_UNKNOWN; 622 dev_context->dw_brd_state = BRD_UNKNOWN;
527 } 623 }
528 } 624 }
529
530 while (tlb_i--) {
531 if (!tlb[tlb_i].ul_gpp_pa)
532 continue;
533 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
534 }
535 while (l4_i--)
536 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
537 if (sg0_da)
538 iommu_kunmap(mmu, sg0_da);
539 if (sg1_da)
540 iommu_kunmap(mmu, sg1_da);
541 return status; 625 return status;
542} 626}
543 627
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
553{ 637{
554 int status = 0; 638 int status = 0;
555 struct bridge_dev_context *dev_context = dev_ctxt; 639 struct bridge_dev_context *dev_context = dev_ctxt;
640 struct pg_table_attrs *pt_attrs;
556 u32 dsp_pwr_state; 641 u32 dsp_pwr_state;
557 int i;
558 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
559 struct omap_dsp_platform_data *pdata = 642 struct omap_dsp_platform_data *pdata =
560 omap_dspbridge_dev->dev.platform_data; 643 omap_dspbridge_dev->dev.platform_data;
561 644
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
591 674
592 dsp_wdt_enable(false); 675 dsp_wdt_enable(false);
593 676
594 /* Reset DSP */ 677 /* This is a good place to clear the MMU page tables as well */
595 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 678 if (dev_context->pt_attrs) {
596 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 679 pt_attrs = dev_context->pt_attrs;
597 680 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
681 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
682 memset((u8 *) pt_attrs->pg_info, 0x00,
683 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
684 }
598 /* Disable the mailbox interrupts */ 685 /* Disable the mailbox interrupts */
599 if (dev_context->mbox) { 686 if (dev_context->mbox) {
600 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 687 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
601 omap_mbox_put(dev_context->mbox); 688 omap_mbox_put(dev_context->mbox);
602 dev_context->mbox = NULL; 689 dev_context->mbox = NULL;
603 } 690 }
604 if (dev_context->dsp_mmu) { 691 /* Reset IVA2 clocks*/
605 pr_err("Proc stop mmu if statement\n"); 692 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
606 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { 693 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
607 if (!tlb[i].ul_gpp_pa)
608 continue;
609 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
610 }
611 i = 0;
612 while (l4_peripheral_table[i].phys_addr) {
613 iommu_kunmap(dev_context->dsp_mmu,
614 l4_peripheral_table[i].dsp_virt_addr);
615 i++;
616 }
617 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
618 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
619 dsp_mmu_exit(dev_context->dsp_mmu);
620 dev_context->dsp_mmu = NULL;
621 }
622 /* Reset IVA IOMMU*/
623 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
624 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
625 694
626 dsp_clock_disable_all(dev_context->dsp_per_clks); 695 dsp_clock_disable_all(dev_context->dsp_per_clks);
627 dsp_clk_disable(DSP_CLK_IVA2); 696 dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
681 struct bridge_dev_context *dev_context = NULL; 750 struct bridge_dev_context *dev_context = NULL;
682 s32 entry_ndx; 751 s32 entry_ndx;
683 struct cfg_hostres *resources = config_param; 752 struct cfg_hostres *resources = config_param;
753 struct pg_table_attrs *pt_attrs;
754 u32 pg_tbl_pa;
755 u32 pg_tbl_va;
756 u32 align_size;
684 struct drv_data *drv_datap = dev_get_drvdata(bridge); 757 struct drv_data *drv_datap = dev_get_drvdata(bridge);
685 758
686 /* Allocate and initialize a data structure to contain the bridge driver 759 /* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
711 if (!dev_context->dw_dsp_base_addr) 784 if (!dev_context->dw_dsp_base_addr)
712 status = -EPERM; 785 status = -EPERM;
713 786
787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
788 if (pt_attrs != NULL) {
789 /* Assuming that we use only DSP's memory map
790 * until 0x4000:0000 , we would need only 1024
791 * L1 enties i.e L1 size = 4K */
792 pt_attrs->l1_size = 0x1000;
793 align_size = pt_attrs->l1_size;
794 /* Align sizes are expected to be power of 2 */
795 /* we like to get aligned on L1 table size */
796 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
797 align_size, &pg_tbl_pa);
798
799 /* Check if the PA is aligned for us */
800 if ((pg_tbl_pa) & (align_size - 1)) {
801 /* PA not aligned to page table size ,
802 * try with more allocation and align */
803 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
804 pt_attrs->l1_size);
805 /* we like to get aligned on L1 table size */
806 pg_tbl_va =
807 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
808 align_size, &pg_tbl_pa);
809 /* We should be able to get aligned table now */
810 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
811 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
812 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
813 /* Align the PA to the next 'align' boundary */
814 pt_attrs->l1_base_pa =
815 ((pg_tbl_pa) +
816 (align_size - 1)) & (~(align_size - 1));
817 pt_attrs->l1_base_va =
818 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
819 } else {
820 /* We got aligned PA, cool */
821 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
822 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
823 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
824 pt_attrs->l1_base_pa = pg_tbl_pa;
825 pt_attrs->l1_base_va = pg_tbl_va;
826 }
827 if (pt_attrs->l1_base_va)
828 memset((u8 *) pt_attrs->l1_base_va, 0x00,
829 pt_attrs->l1_size);
830
831 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
832 * L4 pages */
833 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
834 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
835 pt_attrs->l2_num_pages;
836 align_size = 4; /* Make it u32 aligned */
837 /* we like to get aligned on L1 table size */
838 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
839 align_size, &pg_tbl_pa);
840 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
841 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
842 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
843 pt_attrs->l2_base_pa = pg_tbl_pa;
844 pt_attrs->l2_base_va = pg_tbl_va;
845
846 if (pt_attrs->l2_base_va)
847 memset((u8 *) pt_attrs->l2_base_va, 0x00,
848 pt_attrs->l2_size);
849
850 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
851 sizeof(struct page_info), GFP_KERNEL);
852 dev_dbg(bridge,
853 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
854 "%x, size %x\n", pt_attrs->l1_base_pa,
855 pt_attrs->l1_base_va, pt_attrs->l1_size,
856 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
857 pt_attrs->l2_size);
858 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
859 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
860 }
861 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
862 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
863 dev_context->pt_attrs = pt_attrs;
864 else
865 status = -ENOMEM;
866
714 if (!status) { 867 if (!status) {
868 spin_lock_init(&pt_attrs->pg_lock);
715 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 869 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
870
871 /* Set the Clock Divisor for the DSP module */
872 udelay(5);
873 /* MMU address is obtained from the host
874 * resources struct */
875 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
876 }
877 if (!status) {
716 dev_context->hdev_obj = hdev_obj; 878 dev_context->hdev_obj = hdev_obj;
717 /* Store current board state. */ 879 /* Store current board state. */
718 dev_context->dw_brd_state = BRD_UNKNOWN; 880 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
722 /* Return ptr to our device state to the DSP API for storage */ 884 /* Return ptr to our device state to the DSP API for storage */
723 *dev_cntxt = dev_context; 885 *dev_cntxt = dev_context;
724 } else { 886 } else {
887 if (pt_attrs != NULL) {
888 kfree(pt_attrs->pg_info);
889
890 if (pt_attrs->l2_tbl_alloc_va) {
891 mem_free_phys_mem((void *)
892 pt_attrs->l2_tbl_alloc_va,
893 pt_attrs->l2_tbl_alloc_pa,
894 pt_attrs->l2_tbl_alloc_sz);
895 }
896 if (pt_attrs->l1_tbl_alloc_va) {
897 mem_free_phys_mem((void *)
898 pt_attrs->l1_tbl_alloc_va,
899 pt_attrs->l1_tbl_alloc_pa,
900 pt_attrs->l1_tbl_alloc_sz);
901 }
902 }
903 kfree(pt_attrs);
725 kfree(dev_context); 904 kfree(dev_context);
726 } 905 }
727func_end: 906func_end:
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
789 */ 968 */
790static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 969static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
791{ 970{
971 struct pg_table_attrs *pt_attrs;
792 int status = 0; 972 int status = 0;
793 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 973 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
794 dev_ctxt; 974 dev_ctxt;
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
802 982
803 /* first put the device to stop state */ 983 /* first put the device to stop state */
804 bridge_brd_stop(dev_context); 984 bridge_brd_stop(dev_context);
985 if (dev_context->pt_attrs) {
986 pt_attrs = dev_context->pt_attrs;
987 kfree(pt_attrs->pg_info);
988
989 if (pt_attrs->l2_tbl_alloc_va) {
990 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
991 pt_attrs->l2_tbl_alloc_pa,
992 pt_attrs->l2_tbl_alloc_sz);
993 }
994 if (pt_attrs->l1_tbl_alloc_va) {
995 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
996 pt_attrs->l1_tbl_alloc_pa,
997 pt_attrs->l1_tbl_alloc_sz);
998 }
999 kfree(pt_attrs);
1000
1001 }
805 1002
806 if (dev_context->resources) { 1003 if (dev_context->resources) {
807 host_res = dev_context->resources; 1004 host_res = dev_context->resources;
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
832 iounmap((void *)host_res->dw_mem_base[3]); 1029 iounmap((void *)host_res->dw_mem_base[3]);
833 if (host_res->dw_mem_base[4]) 1030 if (host_res->dw_mem_base[4])
834 iounmap((void *)host_res->dw_mem_base[4]); 1031 iounmap((void *)host_res->dw_mem_base[4]);
1032 if (host_res->dw_dmmu_base)
1033 iounmap(host_res->dw_dmmu_base);
835 if (host_res->dw_per_base) 1034 if (host_res->dw_per_base)
836 iounmap(host_res->dw_per_base); 1035 iounmap(host_res->dw_per_base);
837 if (host_res->dw_per_pm_base) 1036 if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
845 host_res->dw_mem_base[2] = (u32) NULL; 1044 host_res->dw_mem_base[2] = (u32) NULL;
846 host_res->dw_mem_base[3] = (u32) NULL; 1045 host_res->dw_mem_base[3] = (u32) NULL;
847 host_res->dw_mem_base[4] = (u32) NULL; 1046 host_res->dw_mem_base[4] = (u32) NULL;
1047 host_res->dw_dmmu_base = NULL;
848 host_res->dw_sys_ctrl_base = NULL; 1048 host_res->dw_sys_ctrl_base = NULL;
849 1049
850 kfree(host_res); 1050 kfree(host_res);
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
928} 1128}
929 1129
930/* 1130/*
1131 * ======== bridge_brd_mem_map ========
1132 * This function maps MPU buffer to the DSP address space. It performs
1133 * linear to physical address translation if required. It translates each
1134 * page since linear addresses can be physically non-contiguous
1135 * All address & size arguments are assumed to be page aligned (in proc.c)
1136 *
1137 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1138 */
1139static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1140 u32 ul_mpu_addr, u32 virt_addr,
1141 u32 ul_num_bytes, u32 ul_map_attr,
1142 struct page **mapped_pages)
1143{
1144 u32 attrs;
1145 int status = 0;
1146 struct bridge_dev_context *dev_context = dev_ctxt;
1147 struct hw_mmu_map_attrs_t hw_attrs;
1148 struct vm_area_struct *vma;
1149 struct mm_struct *mm = current->mm;
1150 u32 write = 0;
1151 u32 num_usr_pgs = 0;
1152 struct page *mapped_page, *pg;
1153 s32 pg_num;
1154 u32 va = virt_addr;
1155 struct task_struct *curr_task = current;
1156 u32 pg_i = 0;
1157 u32 mpu_addr, pa;
1158
1159 dev_dbg(bridge,
1160 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1161 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1162 ul_map_attr);
1163 if (ul_num_bytes == 0)
1164 return -EINVAL;
1165
1166 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1167 attrs = ul_map_attr;
1168 } else {
1169 /* Assign default attributes */
1170 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1171 }
1172 /* Take mapping properties */
1173 if (attrs & DSP_MAPBIGENDIAN)
1174 hw_attrs.endianism = HW_BIG_ENDIAN;
1175 else
1176 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1177
1178 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1179 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1180 /* Ignore element_size if mixed_size is enabled */
1181 if (hw_attrs.mixed_size == 0) {
1182 if (attrs & DSP_MAPELEMSIZE8) {
1183 /* Size is 8 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1185 } else if (attrs & DSP_MAPELEMSIZE16) {
1186 /* Size is 16 bit */
1187 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1188 } else if (attrs & DSP_MAPELEMSIZE32) {
1189 /* Size is 32 bit */
1190 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1191 } else if (attrs & DSP_MAPELEMSIZE64) {
1192 /* Size is 64 bit */
1193 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1194 } else {
1195 /*
1196 * Mixedsize isn't enabled, so size can't be
1197 * zero here
1198 */
1199 return -EINVAL;
1200 }
1201 }
1202 if (attrs & DSP_MAPDONOTLOCK)
1203 hw_attrs.donotlockmpupage = 1;
1204 else
1205 hw_attrs.donotlockmpupage = 0;
1206
1207 if (attrs & DSP_MAPVMALLOCADDR) {
1208 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 }
1211 /*
1212 * Do OS-specific user-va to pa translation.
1213 * Combine physically contiguous regions to reduce TLBs.
1214 * Pass the translated pa to pte_update.
1215 */
1216 if ((attrs & DSP_MAPPHYSICALADDR)) {
1217 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1218 ul_num_bytes, &hw_attrs);
1219 goto func_cont;
1220 }
1221
1222 /*
1223 * Important Note: ul_mpu_addr is mapped from user application process
1224 * to current process - it must lie completely within the current
1225 * virtual memory address space in order to be of use to us here!
1226 */
1227 down_read(&mm->mmap_sem);
1228 vma = find_vma(mm, ul_mpu_addr);
1229 if (vma)
1230 dev_dbg(bridge,
1231 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1235
1236 /*
1237 * It is observed that under some circumstances, the user buffer is
1238 * spread across several VMAs. So loop through and check if the entire
1239 * user buffer is covered
1240 */
1241 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1242 /* jump to the next VMA region */
1243 vma = find_vma(mm, vma->vm_end + 1);
1244 dev_dbg(bridge,
1245 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1246 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1247 ul_num_bytes, vma->vm_start, vma->vm_end,
1248 vma->vm_flags);
1249 }
1250 if (!vma) {
1251 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1252 __func__, ul_mpu_addr, ul_num_bytes);
1253 status = -EINVAL;
1254 up_read(&mm->mmap_sem);
1255 goto func_cont;
1256 }
1257
1258 if (vma->vm_flags & VM_IO) {
1259 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1260 mpu_addr = ul_mpu_addr;
1261
1262 /* Get the physical addresses for user buffer */
1263 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1264 pa = user_va2_pa(mm, mpu_addr);
1265 if (!pa) {
1266 status = -EPERM;
1267 pr_err("DSPBRIDGE: VM_IO mapping physical"
1268 "address is invalid\n");
1269 break;
1270 }
1271 if (pfn_valid(__phys_to_pfn(pa))) {
1272 pg = PHYS_TO_PAGE(pa);
1273 get_page(pg);
1274 if (page_count(pg) < 1) {
1275 pr_err("Bad page in VM_IO buffer\n");
1276 bad_page_dump(pa, pg);
1277 }
1278 }
1279 status = pte_set(dev_context->pt_attrs, pa,
1280 va, HW_PAGE_SIZE4KB, &hw_attrs);
1281 if (status)
1282 break;
1283
1284 va += HW_PAGE_SIZE4KB;
1285 mpu_addr += HW_PAGE_SIZE4KB;
1286 pa += HW_PAGE_SIZE4KB;
1287 }
1288 } else {
1289 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1290 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1291 write = 1;
1292
1293 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1294 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1295 write, 1, &mapped_page, NULL);
1296 if (pg_num > 0) {
1297 if (page_count(mapped_page) < 1) {
1298 pr_err("Bad page count after doing"
1299 "get_user_pages on"
1300 "user buffer\n");
1301 bad_page_dump(page_to_phys(mapped_page),
1302 mapped_page);
1303 }
1304 status = pte_set(dev_context->pt_attrs,
1305 page_to_phys(mapped_page), va,
1306 HW_PAGE_SIZE4KB, &hw_attrs);
1307 if (status)
1308 break;
1309
1310 if (mapped_pages)
1311 mapped_pages[pg_i] = mapped_page;
1312
1313 va += HW_PAGE_SIZE4KB;
1314 ul_mpu_addr += HW_PAGE_SIZE4KB;
1315 } else {
1316 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1317 "MPU addr = 0x%x,"
1318 "vma->vm_flags = 0x%lx,"
1319 "get_user_pages Err"
1320 "Value = %d, Buffer"
1321 "size=0x%x\n", ul_mpu_addr,
1322 vma->vm_flags, pg_num, ul_num_bytes);
1323 status = -EPERM;
1324 break;
1325 }
1326 }
1327 }
1328 up_read(&mm->mmap_sem);
1329func_cont:
1330 if (status) {
1331 /*
1332 * Roll out the mapped pages incase it failed in middle of
1333 * mapping
1334 */
1335 if (pg_i) {
1336 bridge_brd_mem_un_map(dev_context, virt_addr,
1337 (pg_i * PG_SIZE4K));
1338 }
1339 status = -EPERM;
1340 }
1341 /*
1342 * In any case, flush the TLB
1343 * This is called from here instead from pte_update to avoid unnecessary
1344 * repetition while mapping non-contiguous physical regions of a virtual
1345 * region
1346 */
1347 flush_all(dev_context);
1348 dev_dbg(bridge, "%s status %x\n", __func__, status);
1349 return status;
1350}
1351
1352/*
1353 * ======== bridge_brd_mem_un_map ========
1354 * Invalidate the PTEs for the DSP VA block to be unmapped.
1355 *
1356 * PTEs of a mapped memory block are contiguous in any page table
1357 * So, instead of looking up the PTE address for every 4K block,
1358 * we clear consecutive PTEs until we unmap all the bytes
1359 */
1360static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1361 u32 virt_addr, u32 ul_num_bytes)
1362{
1363 u32 l1_base_va;
1364 u32 l2_base_va;
1365 u32 l2_base_pa;
1366 u32 l2_page_num;
1367 u32 pte_val;
1368 u32 pte_size;
1369 u32 pte_count;
1370 u32 pte_addr_l1;
1371 u32 pte_addr_l2 = 0;
1372 u32 rem_bytes;
1373 u32 rem_bytes_l2;
1374 u32 va_curr;
1375 struct page *pg = NULL;
1376 int status = 0;
1377 struct bridge_dev_context *dev_context = dev_ctxt;
1378 struct pg_table_attrs *pt = dev_context->pt_attrs;
1379 u32 temp;
1380 u32 paddr;
1381 u32 numof4k_pages = 0;
1382
1383 va_curr = virt_addr;
1384 rem_bytes = ul_num_bytes;
1385 rem_bytes_l2 = 0;
1386 l1_base_va = pt->l1_base_va;
1387 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1388 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1389 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1390 ul_num_bytes, l1_base_va, pte_addr_l1);
1391
1392 while (rem_bytes && !status) {
1393 u32 va_curr_orig = va_curr;
1394 /* Find whether the L1 PTE points to a valid L2 PT */
1395 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1396 pte_val = *(u32 *) pte_addr_l1;
1397 pte_size = hw_mmu_pte_size_l1(pte_val);
1398
1399 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1400 goto skip_coarse_page;
1401
1402 /*
1403 * Get the L2 PA from the L1 PTE, and find
1404 * corresponding L2 VA
1405 */
1406 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1407 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1408 l2_page_num =
1409 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1410 /*
1411 * Find the L2 PTE address from which we will start
1412 * clearing, the number of PTEs to be cleared on this
1413 * page, and the size of VA space that needs to be
1414 * cleared on this L2 page
1415 */
1416 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1417 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1418 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1419 if (rem_bytes < (pte_count * PG_SIZE4K))
1420 pte_count = rem_bytes / PG_SIZE4K;
1421 rem_bytes_l2 = pte_count * PG_SIZE4K;
1422
1423 /*
1424 * Unmap the VA space on this L2 PT. A quicker way
1425 * would be to clear pte_count entries starting from
1426 * pte_addr_l2. However, below code checks that we don't
1427 * clear invalid entries or less than 64KB for a 64KB
1428 * entry. Similar checking is done for L1 PTEs too
1429 * below
1430 */
1431 while (rem_bytes_l2 && !status) {
1432 pte_val = *(u32 *) pte_addr_l2;
1433 pte_size = hw_mmu_pte_size_l2(pte_val);
1434 /* va_curr aligned to pte_size? */
1435 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1436 va_curr & (pte_size - 1)) {
1437 status = -EPERM;
1438 break;
1439 }
1440
1441 /* Collect Physical addresses from VA */
1442 paddr = (pte_val & ~(pte_size - 1));
1443 if (pte_size == HW_PAGE_SIZE64KB)
1444 numof4k_pages = 16;
1445 else
1446 numof4k_pages = 1;
1447 temp = 0;
1448 while (temp++ < numof4k_pages) {
1449 if (!pfn_valid(__phys_to_pfn(paddr))) {
1450 paddr += HW_PAGE_SIZE4KB;
1451 continue;
1452 }
1453 pg = PHYS_TO_PAGE(paddr);
1454 if (page_count(pg) < 1) {
1455 pr_info("DSPBRIDGE: UNMAP function: "
1456 "COUNT 0 FOR PA 0x%x, size = "
1457 "0x%x\n", paddr, ul_num_bytes);
1458 bad_page_dump(paddr, pg);
1459 } else {
1460 set_page_dirty(pg);
1461 page_cache_release(pg);
1462 }
1463 paddr += HW_PAGE_SIZE4KB;
1464 }
1465 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1466 status = -EPERM;
1467 goto EXIT_LOOP;
1468 }
1469
1470 status = 0;
1471 rem_bytes_l2 -= pte_size;
1472 va_curr += pte_size;
1473 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1474 }
1475 spin_lock(&pt->pg_lock);
1476 if (rem_bytes_l2 == 0) {
1477 pt->pg_info[l2_page_num].num_entries -= pte_count;
1478 if (pt->pg_info[l2_page_num].num_entries == 0) {
1479 /*
1480 * Clear the L1 PTE pointing to the L2 PT
1481 */
1482 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1483 HW_MMU_COARSE_PAGE_SIZE))
1484 status = 0;
1485 else {
1486 status = -EPERM;
1487 spin_unlock(&pt->pg_lock);
1488 goto EXIT_LOOP;
1489 }
1490 }
1491 rem_bytes -= pte_count * PG_SIZE4K;
1492 } else
1493 status = -EPERM;
1494
1495 spin_unlock(&pt->pg_lock);
1496 continue;
1497skip_coarse_page:
1498 /* va_curr aligned to pte_size? */
1499 /* pte_size = 1 MB or 16 MB */
1500 if (pte_size == 0 || rem_bytes < pte_size ||
1501 va_curr & (pte_size - 1)) {
1502 status = -EPERM;
1503 break;
1504 }
1505
1506 if (pte_size == HW_PAGE_SIZE1MB)
1507 numof4k_pages = 256;
1508 else
1509 numof4k_pages = 4096;
1510 temp = 0;
1511 /* Collect Physical addresses from VA */
1512 paddr = (pte_val & ~(pte_size - 1));
1513 while (temp++ < numof4k_pages) {
1514 if (pfn_valid(__phys_to_pfn(paddr))) {
1515 pg = PHYS_TO_PAGE(paddr);
1516 if (page_count(pg) < 1) {
1517 pr_info("DSPBRIDGE: UNMAP function: "
1518 "COUNT 0 FOR PA 0x%x, size = "
1519 "0x%x\n", paddr, ul_num_bytes);
1520 bad_page_dump(paddr, pg);
1521 } else {
1522 set_page_dirty(pg);
1523 page_cache_release(pg);
1524 }
1525 }
1526 paddr += HW_PAGE_SIZE4KB;
1527 }
1528 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1529 status = 0;
1530 rem_bytes -= pte_size;
1531 va_curr += pte_size;
1532 } else {
1533 status = -EPERM;
1534 goto EXIT_LOOP;
1535 }
1536 }
1537 /*
1538 * It is better to flush the TLB here, so that any stale old entries
1539 * get flushed
1540 */
1541EXIT_LOOP:
1542 flush_all(dev_context);
1543 dev_dbg(bridge,
1544 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1545 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1546 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1547 return status;
1548}
1549
1550/*
1551 * ======== user_va2_pa ========
1552 * Purpose:
1553 * This function walks through the page tables to convert a userland
1554 * virtual address to physical address
1555 */
1556static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1557{
1558 pgd_t *pgd;
1559 pmd_t *pmd;
1560 pte_t *ptep, pte;
1561
1562 pgd = pgd_offset(mm, address);
1563 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1564 pmd = pmd_offset(pgd, address);
1565 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1566 ptep = pte_offset_map(pmd, address);
1567 if (ptep) {
1568 pte = *ptep;
1569 if (pte_present(pte))
1570 return pte & PAGE_MASK;
1571 }
1572 }
1573 }
1574
1575 return 0;
1576}
1577
1578/*
1579 * ======== pte_update ========
1580 * This function calculates the optimum page-aligned addresses and sizes
1581 * Caller must pass page-aligned values
1582 */
1583static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1584 u32 va, u32 size,
1585 struct hw_mmu_map_attrs_t *map_attrs)
1586{
1587 u32 i;
1588 u32 all_bits;
1589 u32 pa_curr = pa;
1590 u32 va_curr = va;
1591 u32 num_bytes = size;
1592 struct bridge_dev_context *dev_context = dev_ctxt;
1593 int status = 0;
1594 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1595 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1596 };
1597
1598 while (num_bytes && !status) {
1599 /* To find the max. page size with which both PA & VA are
1600 * aligned */
1601 all_bits = pa_curr | va_curr;
1602
1603 for (i = 0; i < 4; i++) {
1604 if ((num_bytes >= page_size[i]) && ((all_bits &
1605 (page_size[i] -
1606 1)) == 0)) {
1607 status =
1608 pte_set(dev_context->pt_attrs, pa_curr,
1609 va_curr, page_size[i], map_attrs);
1610 pa_curr += page_size[i];
1611 va_curr += page_size[i];
1612 num_bytes -= page_size[i];
1613 /* Don't try smaller sizes. Hopefully we have
1614 * reached an address aligned to a bigger page
1615 * size */
1616 break;
1617 }
1618 }
1619 }
1620
1621 return status;
1622}
1623
1624/*
1625 * ======== pte_set ========
1626 * This function calculates PTE address (MPU virtual) to be updated
1627 * It also manages the L2 page tables
1628 */
1629static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1630 u32 size, struct hw_mmu_map_attrs_t *attrs)
1631{
1632 u32 i;
1633 u32 pte_val;
1634 u32 pte_addr_l1;
1635 u32 pte_size;
1636 /* Base address of the PT that will be updated */
1637 u32 pg_tbl_va;
1638 u32 l1_base_va;
1639 /* Compiler warns that the next three variables might be used
1640 * uninitialized in this function. Doesn't seem so. Working around,
1641 * anyways. */
1642 u32 l2_base_va = 0;
1643 u32 l2_base_pa = 0;
1644 u32 l2_page_num = 0;
1645 int status = 0;
1646
1647 l1_base_va = pt->l1_base_va;
1648 pg_tbl_va = l1_base_va;
1649 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1650 /* Find whether the L1 PTE points to a valid L2 PT */
1651 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1652 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1653 pte_val = *(u32 *) pte_addr_l1;
1654 pte_size = hw_mmu_pte_size_l1(pte_val);
1655 } else {
1656 return -EPERM;
1657 }
1658 spin_lock(&pt->pg_lock);
1659 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1660 /* Get the L2 PA from the L1 PTE, and find
1661 * corresponding L2 VA */
1662 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1663 l2_base_va =
1664 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1665 l2_page_num =
1666 (l2_base_pa -
1667 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1668 } else if (pte_size == 0) {
1669 /* L1 PTE is invalid. Allocate a L2 PT and
1670 * point the L1 PTE to it */
1671 /* Find a free L2 PT. */
1672 for (i = 0; (i < pt->l2_num_pages) &&
1673 (pt->pg_info[i].num_entries != 0); i++)
1674 ;;
1675 if (i < pt->l2_num_pages) {
1676 l2_page_num = i;
1677 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1678 HW_MMU_COARSE_PAGE_SIZE);
1679 l2_base_va = pt->l2_base_va + (l2_page_num *
1680 HW_MMU_COARSE_PAGE_SIZE);
1681 /* Endianness attributes are ignored for
1682 * HW_MMU_COARSE_PAGE_SIZE */
1683 status =
1684 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1685 HW_MMU_COARSE_PAGE_SIZE,
1686 attrs);
1687 } else {
1688 status = -ENOMEM;
1689 }
1690 } else {
1691 /* Found valid L1 PTE of another size.
1692 * Should not overwrite it. */
1693 status = -EPERM;
1694 }
1695 if (!status) {
1696 pg_tbl_va = l2_base_va;
1697 if (size == HW_PAGE_SIZE64KB)
1698 pt->pg_info[l2_page_num].num_entries += 16;
1699 else
1700 pt->pg_info[l2_page_num].num_entries++;
1701 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1702 "%x, num_entries %x\n", l2_base_va,
1703 l2_base_pa, l2_page_num,
1704 pt->pg_info[l2_page_num].num_entries);
1705 }
1706 spin_unlock(&pt->pg_lock);
1707 }
1708 if (!status) {
1709 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1710 pg_tbl_va, pa, va, size);
1711 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1712 "mixed_size %x\n", attrs->endianism,
1713 attrs->element_size, attrs->mixed_size);
1714 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1715 }
1716
1717 return status;
1718}
1719
1720/* Memory map kernel VA -- memory allocated with vmalloc */
1721static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1722 u32 ul_mpu_addr, u32 virt_addr,
1723 u32 ul_num_bytes,
1724 struct hw_mmu_map_attrs_t *hw_attrs)
1725{
1726 int status = 0;
1727 struct page *page[1];
1728 u32 i;
1729 u32 pa_curr;
1730 u32 pa_next;
1731 u32 va_curr;
1732 u32 size_curr;
1733 u32 num_pages;
1734 u32 pa;
1735 u32 num_of4k_pages;
1736 u32 temp = 0;
1737
1738 /*
1739 * Do Kernel va to pa translation.
1740 * Combine physically contiguous regions to reduce TLBs.
1741 * Pass the translated pa to pte_update.
1742 */
1743 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1744 i = 0;
1745 va_curr = ul_mpu_addr;
1746 page[0] = vmalloc_to_page((void *)va_curr);
1747 pa_next = page_to_phys(page[0]);
1748 while (!status && (i < num_pages)) {
1749 /*
1750 * Reuse pa_next from the previous iteraion to avoid
1751 * an extra va2pa call
1752 */
1753 pa_curr = pa_next;
1754 size_curr = PAGE_SIZE;
1755 /*
1756 * If the next page is physically contiguous,
1757 * map it with the current one by increasing
1758 * the size of the region to be mapped
1759 */
1760 while (++i < num_pages) {
1761 page[0] =
1762 vmalloc_to_page((void *)(va_curr + size_curr));
1763 pa_next = page_to_phys(page[0]);
1764
1765 if (pa_next == (pa_curr + size_curr))
1766 size_curr += PAGE_SIZE;
1767 else
1768 break;
1769
1770 }
1771 if (pa_next == 0) {
1772 status = -ENOMEM;
1773 break;
1774 }
1775 pa = pa_curr;
1776 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1777 while (temp++ < num_of4k_pages) {
1778 get_page(PHYS_TO_PAGE(pa));
1779 pa += HW_PAGE_SIZE4KB;
1780 }
1781 status = pte_update(dev_context, pa_curr, virt_addr +
1782 (va_curr - ul_mpu_addr), size_curr,
1783 hw_attrs);
1784 va_curr += size_curr;
1785 }
1786 /*
1787 * In any case, flush the TLB
1788 * This is called from here instead from pte_update to avoid unnecessary
1789 * repetition while mapping non-contiguous physical regions of a virtual
1790 * region
1791 */
1792 flush_all(dev_context);
1793 dev_dbg(bridge, "%s status %x\n", __func__, status);
1794 return status;
1795}
1796
1797/*
931 * ======== wait_for_start ======== 1798 * ======== wait_for_start ========
932 * Wait for the singal from DSP that it has started, or time out. 1799 * Wait for the singal from DSP that it has started, or time out.
933 */ 1800 */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd5e757..fb9026e1403c 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
31#include <dspbridge/dev.h> 31#include <dspbridge/dev.h>
32#include <dspbridge/iodefs.h> 32#include <dspbridge/iodefs.h>
33 33
34/* ------------------------------------ Hardware Abstraction Layer */
35#include <hw_defs.h>
36#include <hw_mmu.h>
37
34#include <dspbridge/pwr_sh.h> 38#include <dspbridge/pwr_sh.h>
35 39
36/* ----------------------------------- Bridge Driver */ 40/* ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02549e4..ba2961049dad 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->sh_s.seg0_da * DSPWORDSIZE; 137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va;
140 141
141 if (!trace_read) { 142 if (!trace_read) {
142 ul_shm_offset_virt = 143 ul_shm_offset_virt =
143 ul_shm_base_virt - ul_tlb_base_virt; 144 ul_shm_base_virt - ul_tlb_base_virt;
144 ul_shm_offset_virt += 145 ul_shm_offset_virt +=
145 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + 146 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
146 1, PAGE_SIZE * 16); 147 1, HW_PAGE_SIZE64KB);
147 dw_ext_prog_virt_mem -= ul_shm_offset_virt; 148 dw_ext_prog_virt_mem -= ul_shm_offset_virt;
148 dw_ext_prog_virt_mem += 149 dw_ext_prog_virt_mem +=
149 (ul_ext_base - ul_dyn_ext_base); 150 (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
317 ret = -EPERM; 318 ret = -EPERM;
318 319
319 if (!ret) { 320 if (!ret) {
320 ul_tlb_base_virt = dev_context->sh_s.seg0_da * 321 ul_tlb_base_virt =
321 DSPWORDSIZE; 322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
322
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->sh_s.seg0_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
393 omap_dspbridge_dev->dev.platform_data; 393 omap_dspbridge_dev->dev.platform_data;
394 struct cfg_hostres *resources = dev_context->resources; 394 struct cfg_hostres *resources = dev_context->resources;
395 int status = 0; 395 int status = 0;
396 u32 temp;
396 397
397 if (!dev_context->mbox) 398 if (!dev_context->mbox)
398 return 0; 399 return 0;
@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
436 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
437 438
438 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
439 iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); 440 temp = readl(resources->dw_dmmu_base + 0x10);
440 441
441 dev_context->dw_brd_state = BRD_RUNNING; 442 dev_context->dw_brd_state = BRD_RUNNING;
442 } else if (dev_context->dw_brd_state == BRD_RETENTION) { 443 } else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c73914..3430418190da 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
31#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
32#include <dspbridge/wdt.h> 32#include <dspbridge/wdt.h>
33 33
34static u32 fault_addr;
35
36static void mmu_fault_dpc(unsigned long data)
37{
38 struct deh_mgr *deh = (void *)data;
39
40 if (!deh)
41 return;
42
43 bridge_deh_notify(deh, DSP_MMUFAULT, 0);
44}
45
46static irqreturn_t mmu_fault_isr(int irq, void *data)
47{
48 struct deh_mgr *deh = data;
49 struct cfg_hostres *resources;
50 u32 event;
51
52 if (!deh)
53 return IRQ_HANDLED;
54
55 resources = deh->hbridge_context->resources;
56 if (!resources) {
57 dev_dbg(bridge, "%s: Failed to get Host Resources\n",
58 __func__);
59 return IRQ_HANDLED;
60 }
61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr);
67 /*
68 * Schedule a DPC directly. In the future, it may be
69 * necessary to check if DSP MMU fault is intended for
70 * Bridge.
71 */
72 tasklet_schedule(&deh->dpc_tasklet);
73
74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base,
77 HW_MMU_TRANSLATION_FAULT);
78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base,
80 HW_MMU_ALL_INTERRUPTS);
81 }
82 return IRQ_HANDLED;
83}
84
34int bridge_deh_create(struct deh_mgr **ret_deh, 85int bridge_deh_create(struct deh_mgr **ret_deh,
35 struct dev_object *hdev_obj) 86 struct dev_object *hdev_obj)
36{ 87{
@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
58 } 109 }
59 ntfy_init(deh->ntfy_obj); 110 ntfy_init(deh->ntfy_obj);
60 111
112 /* Create a MMUfault DPC */
113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
114
61 /* Fill in context structure */ 115 /* Fill in context structure */
62 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
63 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
64 *ret_deh = deh; 124 *ret_deh = deh;
65 return 0; 125 return 0;
66 126
@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
80 ntfy_delete(deh->ntfy_obj); 140 ntfy_delete(deh->ntfy_obj);
81 kfree(deh->ntfy_obj); 141 kfree(deh->ntfy_obj);
82 } 142 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
145
146 /* Free DPC object */
147 tasklet_kill(&deh->dpc_tasklet);
83 148
84 /* Deallocate the DEH manager object */ 149 /* Deallocate the DEH manager object */
85 kfree(deh); 150 kfree(deh);
@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
101 return ntfy_unregister(deh->ntfy_obj, hnotification); 166 return ntfy_unregister(deh->ntfy_obj, hnotification);
102} 167}
103 168
169#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
170static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
171{
172 struct cfg_hostres *resources;
173 struct hw_mmu_map_attrs_t map_attrs = {
174 .endianism = HW_LITTLE_ENDIAN,
175 .element_size = HW_ELEM_SIZE16BIT,
176 .mixed_size = HW_MMU_CPUES,
177 };
178 void *dummy_va_addr;
179
180 resources = dev_context->resources;
181 dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
182
183 /*
184 * Before acking the MMU fault, let's make sure MMU can only
185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack.
187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
190
191 hw_mmu_tlb_add(resources->dw_dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET);
195
196 dsp_clk_enable(DSP_CLK_GPT8);
197
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199
200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base,
202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8);
205
206 hw_mmu_disable(resources->dw_dmmu_base);
207 free_page((unsigned long)dummy_va_addr);
208}
209#endif
210
104static inline const char *event_to_string(int event) 211static inline const char *event_to_string(int event)
105{ 212{
106 switch (event) { 213 switch (event) {
@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
133#endif 240#endif
134 break; 241 break;
135 case DSP_MMUFAULT: 242 case DSP_MMUFAULT:
136 dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); 243 dev_err(bridge, "%s: %s, addr=0x%x", __func__,
244 str, fault_addr);
245#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
246 print_dsp_trace_buffer(dev_context);
247 dump_dl_modules(dev_context);
248 mmu_fault_print_stack(dev_context);
249#endif
137 break; 250 break;
138 default: 251 default:
139 dev_err(bridge, "%s: %s", __func__, str); 252 dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 000000000000..e48d7f67c60a
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
1/*
2 * EasiGlobal.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _EASIGLOBAL_H
18#define _EASIGLOBAL_H
19#include <linux/types.h>
20
21/*
22 * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
23 *
24 * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
25 */
26
27#define READ_ONLY 1
28#define WRITE_ONLY 2
29#define READ_WRITE 3
30
31/*
32 * MACRO: _DEBUG_LEVEL1_EASI
33 *
34 * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
35 * register access function was called.
36 *
37 * NOTE: We currently dont use this functionality.
38 */
39#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
40
41#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 000000000000..1cefca321d71
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
1/*
2 * MMUAccInt.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_ACC_INT_H
18#define _MMU_ACC_INT_H
19
20/* Mappings of level 1 EASI function numbers to function names */
21
22#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
23#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
24#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
25#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
26#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
27#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
28#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
29#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
30#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
31#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
32#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
33#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
34#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
35#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
36#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
37#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
38#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
39#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
40#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
41#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
42#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
43#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
44#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
45
46/* Register offset address definitions */
47#define MMU_MMU_SYSCONFIG_OFFSET 0x10
48#define MMU_MMU_IRQSTATUS_OFFSET 0x18
49#define MMU_MMU_IRQENABLE_OFFSET 0x1c
50#define MMU_MMU_WALKING_ST_OFFSET 0x40
51#define MMU_MMU_CNTL_OFFSET 0x44
52#define MMU_MMU_FAULT_AD_OFFSET 0x48
53#define MMU_MMU_TTB_OFFSET 0x4c
54#define MMU_MMU_LOCK_OFFSET 0x50
55#define MMU_MMU_LD_TLB_OFFSET 0x54
56#define MMU_MMU_CAM_OFFSET 0x58
57#define MMU_MMU_RAM_OFFSET 0x5c
58#define MMU_MMU_GFLUSH_OFFSET 0x60
59#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
60/* Bitfield mask and offset declarations */
61#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
62#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
63#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
64#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
65#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
66#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
67#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
68#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
69#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
70#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
71#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
72#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
73#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
74#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
75
76#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 000000000000..ab1a16da731c
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
1/*
2 * MMURegAcM.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_REG_ACM_H
18#define _MMU_REG_ACM_H
19
20#include <linux/io.h>
21#include <EasiGlobal.h>
22
23#include "MMUAccInt.h"
24
25#if defined(USE_LEVEL_1_MACROS)
26
27#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
28 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
29 __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
30
31#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
32{\
33 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
34 register u32 data = __raw_readl((base_address)+offset);\
35 register u32 new_value = (value);\
36 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
37 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
38 new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
39 new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
40 new_value |= data;\
41 __raw_writel(new_value, base_address+offset);\
42}
43
44#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
45{\
46 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
47 register u32 data = __raw_readl((base_address)+offset);\
48 register u32 new_value = (value);\
49 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
50 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
51 new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
52 new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
53 new_value |= data;\
54 __raw_writel(new_value, base_address+offset);\
55}
56
57#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
58 (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
59 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
60
61#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
62{\
63 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
64 register u32 new_value = (value);\
65 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
66 __raw_writel(new_value, (base_address)+offset);\
67}
68
69#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
70 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
71 __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
72
73#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
74{\
75 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
76 register u32 new_value = (value);\
77 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
78 __raw_writel(new_value, (base_address)+offset);\
79}
80
81#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
82 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
83 (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
84 & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
85 MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
86
87#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
88 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
89 (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
90 MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
91 MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
92
93#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
94{\
95 const u32 offset = MMU_MMU_CNTL_OFFSET;\
96 register u32 data = __raw_readl((base_address)+offset);\
97 register u32 new_value = (value);\
98 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
99 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
100 new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
101 new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
102 new_value |= data;\
103 __raw_writel(new_value, base_address+offset);\
104}
105
106#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
107{\
108 const u32 offset = MMU_MMU_CNTL_OFFSET;\
109 register u32 data = __raw_readl((base_address)+offset);\
110 register u32 new_value = (value);\
111 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
112 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
113 new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
114 new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
115 new_value |= data;\
116 __raw_writel(new_value, base_address+offset);\
117}
118
119#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
120 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
121 __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
122
123#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
124{\
125 const u32 offset = MMU_MMU_TTB_OFFSET;\
126 register u32 new_value = (value);\
127 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
128 __raw_writel(new_value, (base_address)+offset);\
129}
130
131#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
132 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
133 __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
134
135#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
136{\
137 const u32 offset = MMU_MMU_LOCK_OFFSET;\
138 register u32 new_value = (value);\
139 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
140 __raw_writel(new_value, (base_address)+offset);\
141}
142
143#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
144 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
145 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
146 MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
147 MMU_MMU_LOCK_BASE_VALUE_OFFSET))
148
149#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
150{\
151 const u32 offset = MMU_MMU_LOCK_OFFSET;\
152 register u32 data = __raw_readl((base_address)+offset);\
153 register u32 new_value = (value);\
154 _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
155 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
156 new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
157 new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
158 new_value |= data;\
159 __raw_writel(new_value, base_address+offset);\
160}
161
162#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
163 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
164 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
165 MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
166 MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
167
168#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
169{\
170 const u32 offset = MMU_MMU_LOCK_OFFSET;\
171 register u32 data = __raw_readl((base_address)+offset);\
172 register u32 new_value = (value);\
173 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
174 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
175 new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
176 new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
177 new_value |= data;\
178 __raw_writel(new_value, base_address+offset);\
179}
180
181#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
182 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
183 (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
184 (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
185 MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
186
187#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
188 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
189 __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
190
191#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
192{\
193 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
194 register u32 new_value = (value);\
195 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
196 __raw_writel(new_value, (base_address)+offset);\
197}
198
199#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
200{\
201 const u32 offset = MMU_MMU_CAM_OFFSET;\
202 register u32 new_value = (value);\
203 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
204 __raw_writel(new_value, (base_address)+offset);\
205}
206
207#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
208{\
209 const u32 offset = MMU_MMU_RAM_OFFSET;\
210 register u32 new_value = (value);\
211 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
212 __raw_writel(new_value, (base_address)+offset);\
213}
214
215#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
216{\
217 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
218 register u32 new_value = (value);\
219 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
220 __raw_writel(new_value, (base_address)+offset);\
221}
222
223#endif /* USE_LEVEL_1_MACROS */
224
225#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 000000000000..d5266d4c163f
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
1/*
2 * hw_defs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global HW definitions
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_DEFS_H
20#define _HW_DEFS_H
21
22/* Page size */
23#define HW_PAGE_SIZE4KB 0x1000
24#define HW_PAGE_SIZE64KB 0x10000
25#define HW_PAGE_SIZE1MB 0x100000
26#define HW_PAGE_SIZE16MB 0x1000000
27
28/* hw_status: return type for HW API */
29typedef long hw_status;
30
31/* Macro used to set and clear any bit */
32#define HW_CLEAR 0
33#define HW_SET 1
34
35/* hw_endianism_t: Enumerated Type used to specify the endianism
36 * Do NOT change these values. They are used as bit fields. */
37enum hw_endianism_t {
38 HW_LITTLE_ENDIAN,
39 HW_BIG_ENDIAN
40};
41
42/* hw_element_size_t: Enumerated Type used to specify the element size
43 * Do NOT change these values. They are used as bit fields. */
44enum hw_element_size_t {
45 HW_ELEM_SIZE8BIT,
46 HW_ELEM_SIZE16BIT,
47 HW_ELEM_SIZE32BIT,
48 HW_ELEM_SIZE64BIT
49};
50
51/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
52enum hw_idle_mode_t {
53 HW_FORCE_IDLE,
54 HW_NO_IDLE,
55 HW_SMART_IDLE
56};
57
58#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 000000000000..014f5d5293ae
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
1/*
2 * hw_mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * API definitions to setup MMU TLB and PTE
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/io.h>
20#include "MMURegAcM.h"
21#include <hw_defs.h>
22#include <hw_mmu.h>
23#include <linux/types.h>
24#include <linux/err.h>
25
26#define MMU_BASE_VAL_MASK 0xFC00
27#define MMU_PAGE_MAX 3
28#define MMU_ELEMENTSIZE_MAX 3
29#define MMU_ADDR_MASK 0xFFFFF000
30#define MMU_TTB_MASK 0xFFFFC000
31#define MMU_SECTION_ADDR_MASK 0xFFF00000
32#define MMU_SSECTION_ADDR_MASK 0xFF000000
33#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35#define MMU_SMALL_PAGE_MASK 0xFFFFF000
36
37#define MMU_LOAD_TLB 0x00000001
38#define MMU_GFLUSH 0x60
39
40/*
41 * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42 */
43enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
48};
49
50/*
51 * FUNCTION : mmu_flush_entry
52 *
53 * INPUTS:
54 *
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
58 *
59 * RETURNS:
60 *
61 * Type : hw_status
62 * Description : 0 -- No errors occured
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Paramater was set to NULL
65 *
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
69 *
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
72 */
73static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75/*
76 * FUNCTION : mmu_set_cam_entry
77 *
78 * INPUTS:
79 *
80 * Identifier : base_address
81 * TypE : const u32
82 * Description : Base Address of instance of MMU module
83 *
84 * Identifier : page_sz
85 * TypE : const u32
86 * Description : It indicates the page size
87 *
88 * Identifier : preserved_bit
89 * Type : const u32
90 * Description : It indicates the TLB entry is preserved entry
91 * or not
92 *
93 * Identifier : valid_bit
94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not
96 *
97 *
98 * Identifier : virtual_addr_tag
99 * Type : const u32
100 * Description : virtual Address
101 *
102 * RETURNS:
103 *
104 * Type : hw_status
105 * Description : 0 -- No errors occured
106 * RET_BAD_NULL_PARAM -- A Pointer Paramater
107 * was set to NULL
108 * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109 * of Range
110 *
111 * PURPOSE: : Set MMU_CAM reg
112 *
113 * METHOD: : Check the Input parameters and set the CAM entry.
114 */
115static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz,
117 const u32 preserved_bit,
118 const u32 valid_bit,
119 const u32 virtual_addr_tag);
120
121/*
122 * FUNCTION : mmu_set_ram_entry
123 *
124 * INPUTS:
125 *
126 * Identifier : base_address
127 * Type : const u32
128 * Description : Base Address of instance of MMU module
129 *
130 * Identifier : physical_addr
131 * Type : const u32
132 * Description : Physical Address to which the corresponding
133 * virtual Address shouldpoint
134 *
135 * Identifier : endianism
136 * Type : hw_endianism_t
137 * Description : endianism for the given page
138 *
139 * Identifier : element_size
140 * Type : hw_element_size_t
141 * Description : The element size ( 8,16, 32 or 64 bit)
142 *
143 * Identifier : mixed_size
144 * Type : hw_mmu_mixed_size_t
145 * Description : Element Size to follow CPU or TLB
146 *
147 * RETURNS:
148 *
149 * Type : hw_status
150 * Description : 0 -- No errors occured
151 * RET_BAD_NULL_PARAM -- A Pointer Paramater
152 * was set to NULL
153 * RET_PARAM_OUT_OF_RANGE -- Input Parameter
154 * out of Range
155 *
156 * PURPOSE: : Set MMU_CAM reg
157 *
158 * METHOD: : Check the Input parameters and set the RAM entry.
159 */
160static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161 const u32 physical_addr,
162 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size,
164 enum hw_mmu_mixed_size_t mixed_size);
165
166/* HW FUNCTIONS */
167
168hw_status hw_mmu_enable(const void __iomem *base_address)
169{
170 hw_status status = 0;
171
172 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
173
174 return status;
175}
176
177hw_status hw_mmu_disable(const void __iomem *base_address)
178{
179 hw_status status = 0;
180
181 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
182
183 return status;
184}
185
186hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187 u32 num_locked_entries)
188{
189 hw_status status = 0;
190
191 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
192
193 return status;
194}
195
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victim_entry_num)
198{
199 hw_status status = 0;
200
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202
203 return status;
204}
205
206hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
207{
208 hw_status status = 0;
209
210 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
211
212 return status;
213}
214
215hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
216{
217 hw_status status = 0;
218 u32 irq_reg;
219
220 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
221
222 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
223
224 return status;
225}
226
227hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
228{
229 hw_status status = 0;
230 u32 irq_reg;
231
232 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
233
234 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
235
236 return status;
237}
238
239hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
240{
241 hw_status status = 0;
242
243 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
244
245 return status;
246}
247
248hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
249{
250 hw_status status = 0;
251
252 /* read values from register */
253 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
254
255 return status;
256}
257
258hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
259{
260 hw_status status = 0;
261 u32 load_ttb;
262
263 load_ttb = ttb_phys_addr & ~0x7FUL;
264 /* write values to register */
265 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
266
267 return status;
268}
269
270hw_status hw_mmu_twl_enable(const void __iomem *base_address)
271{
272 hw_status status = 0;
273
274 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
275
276 return status;
277}
278
279hw_status hw_mmu_twl_disable(const void __iomem *base_address)
280{
281 hw_status status = 0;
282
283 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
284
285 return status;
286}
287
288hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289 u32 page_sz)
290{
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
294
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
299
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
303
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
307
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
311
312 default:
313 return -EINVAL;
314 }
315
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321 mmu_flush_entry(base_address);
322
323 return status;
324}
325
326hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr,
328 u32 virtual_addr,
329 u32 page_sz,
330 u32 entry_num,
331 struct hw_mmu_map_attrs_t *map_attrs,
332 s8 preserved_bit, s8 valid_bit)
333{
334 hw_status status = 0;
335 u32 lock_reg;
336 u32 virtual_addr_tag;
337 enum hw_mmu_page_size_t mmu_pg_size;
338
339 /*Check the input Parameters */
340 switch (page_sz) {
341 case HW_PAGE_SIZE4KB:
342 mmu_pg_size = HW_MMU_SMALL_PAGE;
343 break;
344
345 case HW_PAGE_SIZE64KB:
346 mmu_pg_size = HW_MMU_LARGE_PAGE;
347 break;
348
349 case HW_PAGE_SIZE1MB:
350 mmu_pg_size = HW_MMU_SECTION;
351 break;
352
353 case HW_PAGE_SIZE16MB:
354 mmu_pg_size = HW_MMU_SUPERSECTION;
355 break;
356
357 default:
358 return -EINVAL;
359 }
360
361 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
362
363 /* Generate the 20-bit tag from virtual address */
364 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
365
366 /* Write the fields in the CAM Entry Register */
367 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368 virtual_addr_tag);
369
370 /* Write the different fields of the RAM Entry Register */
371 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373 map_attrs->element_size, map_attrs->mixed_size);
374
375 /* Update the MMU Lock Register */
376 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
378
379 /* Enable loading of an entry in TLB by writing 1
380 into LD_TLB_REG register */
381 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
382
383 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
384
385 return status;
386}
387
388hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389 u32 physical_addr,
390 u32 virtual_addr,
391 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
392{
393 hw_status status = 0;
394 u32 pte_addr, pte_val;
395 s32 num_entries = 1;
396
397 switch (page_sz) {
398 case HW_PAGE_SIZE4KB:
399 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400 virtual_addr &
401 MMU_SMALL_PAGE_MASK);
402 pte_val =
403 ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) | (map_attrs->
405 element_size << 4) |
406 (map_attrs->mixed_size << 11) | 2);
407 break;
408
409 case HW_PAGE_SIZE64KB:
410 num_entries = 16;
411 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412 virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val =
415 ((physical_addr & MMU_LARGE_PAGE_MASK) |
416 (map_attrs->endianism << 9) | (map_attrs->
417 element_size << 4) |
418 (map_attrs->mixed_size << 11) | 1);
419 break;
420
421 case HW_PAGE_SIZE1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423 virtual_addr &
424 MMU_SECTION_ADDR_MASK);
425 pte_val =
426 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427 (map_attrs->endianism << 15) | (map_attrs->
428 element_size << 10) |
429 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430 break;
431
432 case HW_PAGE_SIZE16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435 virtual_addr &
436 MMU_SSECTION_ADDR_MASK);
437 pte_val =
438 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439 (map_attrs->endianism << 15) | (map_attrs->
440 element_size << 10) |
441 (map_attrs->mixed_size << 17)
442 ) | 0x40000 | 0x2);
443 break;
444
445 case HW_MMU_COARSE_PAGE_SIZE:
446 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447 virtual_addr &
448 MMU_SECTION_ADDR_MASK);
449 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450 break;
451
452 default:
453 return -EINVAL;
454 }
455
456 while (--num_entries >= 0)
457 ((u32 *) pte_addr)[num_entries] = pte_val;
458
459 return status;
460}
461
462hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
463{
464 hw_status status = 0;
465 u32 pte_addr;
466 s32 num_entries = 1;
467
468 switch (page_size) {
469 case HW_PAGE_SIZE4KB:
470 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471 virtual_addr &
472 MMU_SMALL_PAGE_MASK);
473 break;
474
475 case HW_PAGE_SIZE64KB:
476 num_entries = 16;
477 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478 virtual_addr &
479 MMU_LARGE_PAGE_MASK);
480 break;
481
482 case HW_PAGE_SIZE1MB:
483 case HW_MMU_COARSE_PAGE_SIZE:
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr &
486 MMU_SECTION_ADDR_MASK);
487 break;
488
489 case HW_PAGE_SIZE16MB:
490 num_entries = 16;
491 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492 virtual_addr &
493 MMU_SSECTION_ADDR_MASK);
494 break;
495
496 default:
497 return -EINVAL;
498 }
499
500 while (--num_entries >= 0)
501 ((u32 *) pte_addr)[num_entries] = 0;
502
503 return status;
504}
505
506/* mmu_flush_entry */
507static hw_status mmu_flush_entry(const void __iomem *base_address)
508{
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
511
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515 return status;
516}
517
518/* mmu_set_cam_entry */
519static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520 const u32 page_sz,
521 const u32 preserved_bit,
522 const u32 valid_bit,
523 const u32 virtual_addr_tag)
524{
525 hw_status status = 0;
526 u32 mmu_cam_reg;
527
528 mmu_cam_reg = (virtual_addr_tag << 12);
529 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530 (preserved_bit << 3);
531
532 /* write values to register */
533 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
534
535 return status;
536}
537
538/* mmu_set_ram_entry */
539static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540 const u32 physical_addr,
541 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size,
543 enum hw_mmu_mixed_size_t mixed_size)
544{
545 hw_status status = 0;
546 u32 mmu_ram_reg;
547
548 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550 (mixed_size << 6));
551
552 /* write values to register */
553 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
554
555 return status;
556
557}
558
559void hw_mmu_tlb_flush_all(const void __iomem *base)
560{
561 __raw_writeb(1, base + MMU_GFLUSH);
562}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 000000000000..1458a2c6027b
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
1/*
2 * hw_mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * MMU types and API declarations
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_MMU_H
20#define _HW_MMU_H
21
22#include <linux/types.h>
23
24/* Bitmasks for interrupt sources */
25#define HW_MMU_TRANSLATION_FAULT 0x2
26#define HW_MMU_ALL_INTERRUPTS 0x1F
27
28#define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
31 CPU/TLB Element size */
32enum hw_mmu_mixed_size_t {
33 HW_MMU_TLBES,
34 HW_MMU_CPUES
35};
36
37/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
38struct hw_mmu_map_attrs_t {
39 enum hw_endianism_t endianism;
40 enum hw_element_size_t element_size;
41 enum hw_mmu_mixed_size_t mixed_size;
42 bool donotlockmpupage;
43};
44
45extern hw_status hw_mmu_enable(const void __iomem *base_address);
46
47extern hw_status hw_mmu_disable(const void __iomem *base_address);
48
49extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50 u32 num_locked_entries);
51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53 u32 victim_entry_num);
54
55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
57 u32 irq_mask);
58
59extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
60 u32 irq_mask);
61
62extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
63 u32 irq_mask);
64
65extern hw_status hw_mmu_event_status(const void __iomem *base_address,
66 u32 *irq_mask);
67
68extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
69 u32 *addr);
70
71/* Set the TT base address */
72extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
73 u32 ttb_phys_addr);
74
75extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
76
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80 u32 virtual_addr, u32 page_sz);
81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr,
84 u32 virtual_addr,
85 u32 page_sz,
86 u32 entry_num,
87 struct hw_mmu_map_attrs_t *map_attrs,
88 s8 preserved_bit, s8 valid_bit);
89
90/* For PTEs */
91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92 u32 physical_addr,
93 u32 virtual_addr,
94 u32 page_sz,
95 struct hw_mmu_map_attrs_t *map_attrs);
96
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 virtual_addr, u32 page_size);
99
100void hw_mmu_tlb_flush_all(const void __iomem *base);
101
102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{
104 u32 pte_addr;
105 u32 va31_to20;
106
107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
108 va31_to20 &= 0xFFFFFFFCUL;
109 pte_addr = l1_base + va31_to20;
110
111 return pte_addr;
112}
113
114static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115{
116 u32 pte_addr;
117
118 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119
120 return pte_addr;
121}
122
123static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
124{
125 u32 pte_coarse;
126
127 pte_coarse = pte_val & 0xFFFFFC00;
128
129 return pte_coarse;
130}
131
132static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
133{
134 u32 pte_size = 0;
135
136 if ((pte_val & 0x3) == 0x1) {
137 /* Points to L2 PT */
138 pte_size = HW_MMU_COARSE_PAGE_SIZE;
139 }
140
141 if ((pte_val & 0x3) == 0x2) {
142 if (pte_val & (1 << 18))
143 pte_size = HW_PAGE_SIZE16MB;
144 else
145 pte_size = HW_PAGE_SIZE1MB;
146 }
147
148 return pte_size;
149}
150
151static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
152{
153 u32 pte_size = 0;
154
155 if (pte_val & 0x2)
156 pte_size = HW_PAGE_SIZE4KB;
157 else if (pte_val & 0x1)
158 pte_size = HW_PAGE_SIZE64KB;
159
160 return pte_size;
161}
162
163#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cca34c7..38122dbf877a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@ struct cfg_hostres {
68 void __iomem *dw_per_base; 68 void __iomem *dw_per_base;
69 u32 dw_per_pm_base; 69 u32 dw_per_pm_base;
70 u32 dw_core_pm_base; 70 u32 dw_core_pm_base;
71 void __iomem *dw_dmmu_base;
71 void __iomem *dw_sys_ctrl_base; 72 void __iomem *dw_sys_ctrl_base;
72}; 73};
73 74
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f57429..357458fadd2a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
27#include <dspbridge/nodedefs.h> 27#include <dspbridge/nodedefs.h>
28#include <dspbridge/dispdefs.h> 28#include <dspbridge/dispdefs.h>
29#include <dspbridge/dspdefs.h> 29#include <dspbridge/dspdefs.h>
30#include <dspbridge/dmm.h>
30#include <dspbridge/host_os.h> 31#include <dspbridge/host_os.h>
31 32
32/* ----------------------------------- This */ 33/* ----------------------------------- This */
@@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
233 struct cmm_object **mgr); 234 struct cmm_object **mgr);
234 235
235/* 236/*
237 * ======== dev_get_dmm_mgr ========
238 * Purpose:
239 * Retrieve the handle to the dynamic memory manager created for this
240 * device.
241 * Parameters:
242 * hdev_obj: Handle to device object created with
243 * dev_create_device().
244 * *mgr: Ptr to location to store handle.
245 * Returns:
246 * 0: Success.
247 * -EFAULT: Invalid hdev_obj.
248 * Requires:
249 * mgr != NULL.
250 * DEV Initialized.
251 * Ensures:
252 * 0: *mgr contains a handle to a channel manager object,
253 * or NULL.
254 * else: *mgr is NULL.
255 */
256extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
257 struct dmm_object **mgr);
258
259/*
236 * ======== dev_get_cod_mgr ======== 260 * ======== dev_get_cod_mgr ========
237 * Purpose: 261 * Purpose:
238 * Retrieve the COD manager create for this device. 262 * Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 000000000000..6c58335c5f60
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
1/*
2 * dmm.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region.
8 *
9 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 *
11 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 */
19
20#ifndef DMM_
21#define DMM_
22
23#include <dspbridge/dbdefs.h>
24
25struct dmm_object;
26
27/* DMM attributes used in dmm_create() */
28struct dmm_mgrattrs {
29 u32 reserved;
30};
31
32#define DMMPOOLSIZE 0x4000000
33
34/*
35 * ======== dmm_get_handle ========
36 * Purpose:
37 * Return the dynamic memory manager object for this device.
38 * This is typically called from the client process.
39 */
40
41extern int dmm_get_handle(void *hprocessor,
42 struct dmm_object **dmm_manager);
43
44extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
45 u32 size, u32 *prsv_addr);
46
47extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
48 u32 rsv_addr);
49
50extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
51 u32 size);
52
53extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
54 u32 addr, u32 *psize);
55
56extern int dmm_destroy(struct dmm_object *dmm_mgr);
57
58extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
59
60extern int dmm_create(struct dmm_object **dmm_manager,
61 struct dev_object *hdev_obj,
62 const struct dmm_mgrattrs *mgr_attrts);
63
64extern bool dmm_init(void);
65
66extern void dmm_exit(void);
67
68extern int dmm_create_tables(struct dmm_object *dmm_mgr,
69 u32 addr, u32 size);
70
71#ifdef DSP_DMM_DEBUG
72u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
73#endif
74
75#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b5c6f2..c1f363ec9afa 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@ struct dmm_map_object {
108 struct bridge_dma_map_info dma_info; 108 struct bridge_dma_map_info dma_info;
109}; 109};
110 110
111/* Used for DMM reserved memory accounting */
112struct dmm_rsv_object {
113 struct list_head link;
114 u32 dsp_reserved_addr;
115};
116
111/* New structure (member of process context) abstracts DMM resource info */ 117/* New structure (member of process context) abstracts DMM resource info */
112struct dspheap_res_object { 118struct dspheap_res_object {
113 s32 heap_allocated; /* DMM status */ 119 s32 heap_allocated; /* DMM status */
@@ -159,6 +165,10 @@ struct process_context {
159 struct list_head dmm_map_list; 165 struct list_head dmm_map_list;
160 spinlock_t dmm_map_lock; 166 spinlock_t dmm_map_lock;
161 167
168 /* DMM reserved memory resources */
169 struct list_head dmm_rsv_list;
170 spinlock_t dmm_rsv_lock;
171
162 /* DSP Heap resources */ 172 /* DSP Heap resources */
163 struct dspheap_res_object *pdspheap_list; 173 struct dspheap_res_object *pdspheap_list;
164 174
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4cc0734..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * dsp-mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2005-2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _DSP_MMU_
20#define _DSP_MMU_
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25/**
26 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
27 *
28 * This function initialize dsp mmu module and returns a struct iommu
29 * handle to use it for dsp maps.
30 *
31 */
32struct iommu *dsp_mmu_init(void);
33
34/**
35 * dsp_mmu_exit() - destroy dsp mmu module
36 * @mmu: Pointer to iommu handle.
37 *
38 * This function destroys dsp mmu module.
39 *
40 */
41void dsp_mmu_exit(struct iommu *mmu);
42
43/**
44 * user_to_dsp_map() - maps user to dsp virtual address
45 * @mmu: Pointer to iommu handle.
46 * @uva: Virtual user space address.
47 * @da DSP address
48 * @size Buffer size to map.
49 * @usr_pgs struct page array pointer where the user pages will be stored
50 *
51 * This function maps a user space buffer into DSP virtual address.
52 *
53 */
54u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
55 struct page **usr_pgs);
56
57/**
58 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
59 * @mmu: Pointer to iommu handle.
60 * @da DSP address
61 *
62 * This function unmaps a user space buffer into DSP virtual address.
63 *
64 */
65int user_to_dsp_unmap(struct iommu *mmu, u32 da);
66
67#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 615363474810..0ae7d1646a1b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
162 u32 mem_type); 162 u32 mem_type);
163 163
164/* 164/*
165 * ======== bridge_brd_mem_map ========
166 * Purpose:
167 * Map a MPU memory region to a DSP/IVA memory space
168 * Parameters:
169 * dev_ctxt: Handle to Bridge driver defined device info.
170 * ul_mpu_addr: MPU memory region start address.
171 * virt_addr: DSP/IVA memory region u8 address.
172 * ul_num_bytes: Number of bytes to map.
173 * map_attrs: Mapping attributes (e.g. endianness).
174 * Returns:
175 * 0: Success.
176 * -EPERM: Other, unspecified error.
177 * Requires:
178 * dev_ctxt != NULL;
179 * Ensures:
180 */
181typedef int(*fxn_brd_memmap) (struct bridge_dev_context
182 * dev_ctxt, u32 ul_mpu_addr,
183 u32 virt_addr, u32 ul_num_bytes,
184 u32 map_attr,
185 struct page **mapped_pages);
186
187/*
188 * ======== bridge_brd_mem_un_map ========
189 * Purpose:
190 * UnMap an MPU memory region from DSP/IVA memory space
191 * Parameters:
192 * dev_ctxt: Handle to Bridge driver defined device info.
193 * virt_addr: DSP/IVA memory region u8 address.
194 * ul_num_bytes: Number of bytes to unmap.
195 * Returns:
196 * 0: Success.
197 * -EPERM: Other, unspecified error.
198 * Requires:
199 * dev_ctxt != NULL;
200 * Ensures:
201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt,
204 u32 virt_addr, u32 ul_num_bytes);
205
206/*
165 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
166 * Purpose: 208 * Purpose:
167 * Bring board to the BRD_STOPPED state. 209 * Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@ struct bridge_drv_interface {
951 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ 993 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
952 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ 994 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
953 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ 995 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
996 fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
997 fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
954 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ 998 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
955 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ 999 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
956 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ 1000 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad180108ada..41e0594dff34 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
19#ifndef DSPIOCTL_ 19#ifndef DSPIOCTL_
20#define DSPIOCTL_ 20#define DSPIOCTL_
21 21
22/* ------------------------------------ Hardware Abstraction Layer */
23#include <hw_defs.h>
24#include <hw_mmu.h>
25
22/* 26/*
23 * Any IOCTLS at or above this value are reserved for standard Bridge driver 27 * Any IOCTLS at or above this value are reserved for standard Bridge driver
24 * interfaces. 28 * interfaces.
@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
61 /* GPP virtual address. __va does not work for ioremapped addresses */ 65 /* GPP virtual address. __va does not work for ioremapped addresses */
62 u32 ul_gpp_va; 66 u32 ul_gpp_va;
63 u32 ul_size; /* Size of the mapped memory in bytes */ 67 u32 ul_size; /* Size of the mapped memory in bytes */
68 enum hw_endianism_t endianism;
69 enum hw_mmu_mixed_size_t mixed_mode;
70 enum hw_element_size_t elem_size;
64}; 71};
65 72
66#endif /* DSPIOCTL_ */ 73#endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab6b5bf..5e09fd165d9d 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor,
551 struct process_context *pr_ctxt); 551 struct process_context *pr_ctxt);
552 552
553/* 553/*
554 * ======== proc_reserve_memory ========
555 * Purpose:
556 * Reserve a virtually contiguous region of DSP address space.
557 * Parameters:
558 * hprocessor : The processor handle.
559 * ul_size : Size of the address space to reserve.
560 * pp_rsv_addr : Ptr to DSP side reserved u8 address.
561 * Returns:
562 * 0 : Success.
563 * -EFAULT : Invalid processor handle.
564 * -EPERM : General failure.
565 * -ENOMEM : Cannot reserve chunk of this size.
566 * Requires:
567 * pp_rsv_addr is not NULL
568 * PROC Initialized.
569 * Ensures:
570 * Details:
571 */
572extern int proc_reserve_memory(void *hprocessor,
573 u32 ul_size, void **pp_rsv_addr,
574 struct process_context *pr_ctxt);
575
576/*
554 * ======== proc_un_map ======== 577 * ======== proc_un_map ========
555 * Purpose: 578 * Purpose:
556 * Removes a MPU buffer mapping from the DSP address space. 579 * Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
572extern int proc_un_map(void *hprocessor, void *map_addr, 595extern int proc_un_map(void *hprocessor, void *map_addr,
573 struct process_context *pr_ctxt); 596 struct process_context *pr_ctxt);
574 597
598/*
599 * ======== proc_un_reserve_memory ========
600 * Purpose:
601 * Frees a previously reserved region of DSP address space.
602 * Parameters:
603 * hprocessor : The processor handle.
604 * prsv_addr : Ptr to DSP side reservedBYTE address.
605 * Returns:
606 * 0 : Success.
607 * -EFAULT : Invalid processor handle.
608 * -EPERM : General failure.
609 * -ENOENT : Cannot find a reserved region starting with this
610 * : address.
611 * Requires:
612 * prsv_addr is not NULL
613 * PROC Initialized.
614 * Ensures:
615 * Details:
616 */
617extern int proc_un_reserve_memory(void *hprocessor,
618 void *prsv_addr,
619 struct process_context *pr_ctxt);
620
575#endif /* PROC_ */ 621#endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267ef0e2..132e960967b9 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
34#include <dspbridge/cod.h> 34#include <dspbridge/cod.h>
35#include <dspbridge/drv.h> 35#include <dspbridge/drv.h>
36#include <dspbridge/proc.h> 36#include <dspbridge/proc.h>
37#include <dspbridge/dmm.h>
37 38
38/* ----------------------------------- Resource Manager */ 39/* ----------------------------------- Resource Manager */
39#include <dspbridge/mgr.h> 40#include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@ struct dev_object {
74 struct msg_mgr *hmsg_mgr; /* Message manager. */ 75 struct msg_mgr *hmsg_mgr; /* Message manager. */
75 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 76 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
76 struct cmm_object *hcmm_mgr; /* SM memory manager. */ 77 struct cmm_object *hcmm_mgr; /* SM memory manager. */
78 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
77 struct ldr_module *module_obj; /* Bridge Module handle. */ 79 struct ldr_module *module_obj; /* Bridge Module handle. */
78 u32 word_size; /* DSP word size: quick access. */ 80 u32 word_size; /* DSP word size: quick access. */
79 struct drv_object *hdrv_obj; /* Driver Object */ 81 struct drv_object *hdrv_obj; /* Driver Object */
@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
248 /* Instantiate the DEH module */ 250 /* Instantiate the DEH module */
249 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 251 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
250 } 252 }
253 /* Create DMM mgr . */
254 status = dmm_create(&dev_obj->dmm_mgr,
255 (struct dev_object *)dev_obj, NULL);
251 } 256 }
252 /* Add the new DEV_Object to the global list: */ 257 /* Add the new DEV_Object to the global list: */
253 if (!status) { 258 if (!status) {
@@ -273,6 +278,8 @@ leave:
273 kfree(dev_obj->proc_list); 278 kfree(dev_obj->proc_list);
274 if (dev_obj->cod_mgr) 279 if (dev_obj->cod_mgr)
275 cod_delete(dev_obj->cod_mgr); 280 cod_delete(dev_obj->cod_mgr);
281 if (dev_obj->dmm_mgr)
282 dmm_destroy(dev_obj->dmm_mgr);
276 kfree(dev_obj); 283 kfree(dev_obj);
277 } 284 }
278 285
@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
382 dev_obj->hcmm_mgr = NULL; 389 dev_obj->hcmm_mgr = NULL;
383 } 390 }
384 391
392 if (dev_obj->dmm_mgr) {
393 dmm_destroy(dev_obj->dmm_mgr);
394 dev_obj->dmm_mgr = NULL;
395 }
396
385 /* Call the driver's bridge_dev_destroy() function: */ 397 /* Call the driver's bridge_dev_destroy() function: */
386 /* Require of DevDestroy */ 398 /* Require of DevDestroy */
387 if (dev_obj->hbridge_context) { 399 if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
462} 474}
463 475
464/* 476/*
477 * ======== dev_get_dmm_mgr ========
478 * Purpose:
479 * Retrieve the handle to the dynamic memory manager created for this
480 * device.
481 */
482int dev_get_dmm_mgr(struct dev_object *hdev_obj,
483 struct dmm_object **mgr)
484{
485 int status = 0;
486 struct dev_object *dev_obj = hdev_obj;
487
488 DBC_REQUIRE(refs > 0);
489 DBC_REQUIRE(mgr != NULL);
490
491 if (hdev_obj) {
492 *mgr = dev_obj->dmm_mgr;
493 } else {
494 *mgr = NULL;
495 status = -EFAULT;
496 }
497
498 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
499 return status;
500}
501
502/*
465 * ======== dev_get_cod_mgr ======== 503 * ======== dev_get_cod_mgr ========
466 * Purpose: 504 * Purpose:
467 * Retrieve the COD manager create for this device. 505 * Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@ void dev_exit(void)
713 751
714 refs--; 752 refs--;
715 753
716 if (refs == 0) 754 if (refs == 0) {
717 cmm_exit(); 755 cmm_exit();
756 dmm_exit();
757 }
718 758
719 DBC_ENSURE(refs >= 0); 759 DBC_ENSURE(refs >= 0);
720} 760}
@@ -726,12 +766,25 @@ void dev_exit(void)
726 */ 766 */
727bool dev_init(void) 767bool dev_init(void)
728{ 768{
729 bool ret = true; 769 bool cmm_ret, dmm_ret, ret = true;
730 770
731 DBC_REQUIRE(refs >= 0); 771 DBC_REQUIRE(refs >= 0);
732 772
733 if (refs == 0) 773 if (refs == 0) {
734 ret = cmm_init(); 774 cmm_ret = cmm_init();
775 dmm_ret = dmm_init();
776
777 ret = cmm_ret && dmm_ret;
778
779 if (!ret) {
780 if (cmm_ret)
781 cmm_exit();
782
783 if (dmm_ret)
784 dmm_exit();
785
786 }
787 }
735 788
736 if (ret) 789 if (ret)
737 refs++; 790 refs++;
@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1065 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1118 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
1066 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1119 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
1067 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1120 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
1121 STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
1122 STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
1068 STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1123 STORE_FXN(fxn_chnl_create, pfn_chnl_create);
1069 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1124 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
1070 STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1125 STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 000000000000..8685233d7627
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/* ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
31/* ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/* ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h>
36
37/* ----------------------------------- Platform Manager */
38#include <dspbridge/dev.h>
39#include <dspbridge/proc.h>
40
41/* ----------------------------------- This */
42#include <dspbridge/dmm.h>
43
44/* ----------------------------------- Defines, Data Structures, Typedefs */
45#define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47 dyn_mem_map_beg)
48#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49
50/* DMM Mgr */
51struct dmm_object {
52 /* Dmm Lock is used to serialize access mem manager for
53 * multi-threads. */
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */
55};
56
57/* ----------------------------------- Globals */
58static u32 refs; /* module reference count */
59struct map_page {
60 u32 region_size:15;
61 u32 mapped_size:15;
62 u32 reserved:1;
63 u32 mapped:1;
64};
65
66/* Create the free list */
67static struct map_page *virtual_mapping_table;
68static u32 free_region; /* The index of free region */
69static u32 free_size;
70static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
71static u32 table_size; /* The size of virt and phys pages tables */
72
73/* ----------------------------------- Function Prototypes */
74static struct map_page *get_region(u32 addr);
75static struct map_page *get_free_region(u32 len);
76static struct map_page *get_mapped_region(u32 addrs);
77
78/* ======== dmm_create_tables ========
79 * Purpose:
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
83 * for DSP.
84 */
85int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
86{
87 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88 int status = 0;
89
90 status = dmm_delete_tables(dmm_obj);
91 if (!status) {
92 dyn_mem_map_beg = addr;
93 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94 /* Create the free list */
95 virtual_mapping_table = __vmalloc(table_size *
96 sizeof(struct map_page), GFP_KERNEL |
97 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98 if (virtual_mapping_table == NULL)
99 status = -ENOMEM;
100 else {
101 /* On successful allocation,
102 * all entries are zero ('free') */
103 free_region = 0;
104 free_size = table_size * PG_SIZE4K;
105 virtual_mapping_table[0].region_size = table_size;
106 }
107 }
108
109 if (status)
110 pr_err("%s: failure, status 0x%x\n", __func__, status);
111
112 return status;
113}
114
115/*
116 * ======== dmm_create ========
117 * Purpose:
118 * Create a dynamic memory manager object.
119 */
120int dmm_create(struct dmm_object **dmm_manager,
121 struct dev_object *hdev_obj,
122 const struct dmm_mgrattrs *mgr_attrts)
123{
124 struct dmm_object *dmm_obj = NULL;
125 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
128
129 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132 if (dmm_obj != NULL) {
133 spin_lock_init(&dmm_obj->dmm_lock);
134 *dmm_manager = dmm_obj;
135 } else {
136 status = -ENOMEM;
137 }
138
139 return status;
140}
141
142/*
143 * ======== dmm_destroy ========
144 * Purpose:
145 * Release the communication memory manager resources.
146 */
147int dmm_destroy(struct dmm_object *dmm_mgr)
148{
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0;
151
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj);
155 if (!status)
156 kfree(dmm_obj);
157 } else
158 status = -EFAULT;
159
160 return status;
161}
162
163/*
164 * ======== dmm_delete_tables ========
165 * Purpose:
166 * Delete DMM Tables.
167 */
168int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{
170 int status = 0;
171
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */
174 if (dmm_mgr)
175 vfree(virtual_mapping_table);
176 else
177 status = -EFAULT;
178 return status;
179}
180
181/*
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
186 */
187void dmm_exit(void)
188{
189 DBC_REQUIRE(refs > 0);
190
191 refs--;
192}
193
194/*
195 * ======== dmm_get_handle ========
196 * Purpose:
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
199 */
200int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
201{
202 int status = 0;
203 struct dev_object *hdev_obj;
204
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else
210 hdev_obj = dev_get_first(); /* default */
211
212 if (!status)
213 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
214
215 return status;
216}
217
218/*
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225 bool ret = true;
226
227 DBC_REQUIRE(refs >= 0);
228
229 if (ret)
230 refs++;
231
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234 virtual_mapping_table = NULL;
235 table_size = 0;
236
237 return ret;
238}
239
240/*
241 * ======== dmm_map_memory ========
242 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
247 */
248int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
249{
250 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251 struct map_page *chunk;
252 int status = 0;
253
254 spin_lock(&dmm_obj->dmm_lock);
255 /* Find the Reserved memory chunk containing the DSP block to
256 * be mapped */
257 chunk = (struct map_page *)get_region(addr);
258 if (chunk != NULL) {
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk->mapped = true;
261 chunk->mapped_size = (size / PG_SIZE4K);
262 } else
263 status = -ENOENT;
264 spin_unlock(&dmm_obj->dmm_lock);
265
266 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
268
269 return status;
270}
271
272/*
273 * ======== dmm_reserve_memory ========
274 * Purpose:
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
276 */
277int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278 u32 *prsv_addr)
279{
280 int status = 0;
281 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282 struct map_page *node;
283 u32 rsv_addr = 0;
284 u32 rsv_size = 0;
285
286 spin_lock(&dmm_obj->dmm_lock);
287
288 /* Try to get a DSP chunk from the free list */
289 node = get_free_region(size);
290 if (node != NULL) {
291 /* DSP chunk of given size is available. */
292 rsv_addr = DMM_ADDR_VIRTUAL(node);
293 /* Calculate the number entries to use */
294 rsv_size = size / PG_SIZE4K;
295 if (rsv_size < node->region_size) {
296 /* Mark remainder of free region */
297 node[rsv_size].mapped = false;
298 node[rsv_size].reserved = false;
299 node[rsv_size].region_size =
300 node->region_size - rsv_size;
301 node[rsv_size].mapped_size = 0;
302 }
303 /* get_region will return first fit chunk. But we only use what
304 is requested. */
305 node->mapped = false;
306 node->reserved = true;
307 node->region_size = rsv_size;
308 node->mapped_size = 0;
309 /* Return the chunk's starting address */
310 *prsv_addr = rsv_addr;
311 } else
312 /*dSP chunk of given size is not available */
313 status = -ENOMEM;
314
315 spin_unlock(&dmm_obj->dmm_lock);
316
317 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319 prsv_addr, status, rsv_addr, rsv_size);
320
321 return status;
322}
323
324/*
325 * ======== dmm_un_map_memory ========
326 * Purpose:
327 * Remove the mapped block from the reserved chunk.
328 */
329int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
330{
331 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332 struct map_page *chunk;
333 int status = 0;
334
335 spin_lock(&dmm_obj->dmm_lock);
336 chunk = get_mapped_region(addr);
337 if (chunk == NULL)
338 status = -ENOENT;
339
340 if (!status) {
341 /* Unmap the region */
342 *psize = chunk->mapped_size * PG_SIZE4K;
343 chunk->mapped = false;
344 chunk->mapped_size = 0;
345 }
346 spin_unlock(&dmm_obj->dmm_lock);
347
348 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
350
351 return status;
352}
353
354/*
355 * ======== dmm_un_reserve_memory ========
356 * Purpose:
357 * Free a chunk of reserved DSP/IVA address space.
358 */
359int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
360{
361 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362 struct map_page *chunk;
363 u32 i;
364 int status = 0;
365 u32 chunk_size;
366
367 spin_lock(&dmm_obj->dmm_lock);
368
369 /* Find the chunk containing the reserved address */
370 chunk = get_mapped_region(rsv_addr);
371 if (chunk == NULL)
372 status = -ENOENT;
373
374 if (!status) {
375 /* Free all the mapped pages for this reserved region */
376 i = 0;
377 while (i < chunk->region_size) {
378 if (chunk[i].mapped) {
379 /* Remove mapping from the page tables. */
380 chunk_size = chunk[i].mapped_size;
381 /* Clear the mapping flags */
382 chunk[i].mapped = false;
383 chunk[i].mapped_size = 0;
384 i += chunk_size;
385 } else
386 i++;
387 }
388 /* Clear the flags (mark the region 'free') */
389 chunk->reserved = false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
393 */
394 }
395 spin_unlock(&dmm_obj->dmm_lock);
396
397 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__, dmm_mgr, rsv_addr, status, chunk);
399
400 return status;
401}
402
403/*
404 * ======== get_region ========
405 * Purpose:
406 * Returns a region containing the specified memory region
407 */
408static struct map_page *get_region(u32 addr)
409{
410 struct map_page *curr_region = NULL;
411 u32 i = 0;
412
413 if (virtual_mapping_table != NULL) {
414 /* find page mapped by this address */
415 i = DMM_ADDR_TO_INDEX(addr);
416 if (i < table_size)
417 curr_region = virtual_mapping_table + i;
418 }
419
420 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__, curr_region, free_region, free_size);
422 return curr_region;
423}
424
425/*
426 * ======== get_free_region ========
427 * Purpose:
428 * Returns the requested free region
429 */
430static struct map_page *get_free_region(u32 len)
431{
432 struct map_page *curr_region = NULL;
433 u32 i = 0;
434 u32 region_size = 0;
435 u32 next_i = 0;
436
437 if (virtual_mapping_table == NULL)
438 return curr_region;
439 if (len > free_size) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i < table_size) {
443 region_size = virtual_mapping_table[i].region_size;
444 next_i = i + region_size;
445 if (virtual_mapping_table[i].reserved == false) {
446 /* Coalesce, if possible */
447 if (next_i < table_size &&
448 virtual_mapping_table[next_i].reserved
449 == false) {
450 virtual_mapping_table[i].region_size +=
451 virtual_mapping_table
452 [next_i].region_size;
453 continue;
454 }
455 region_size *= PG_SIZE4K;
456 if (region_size > free_size) {
457 free_region = i;
458 free_size = region_size;
459 }
460 }
461 i = next_i;
462 }
463 }
464 if (len <= free_size) {
465 curr_region = virtual_mapping_table + free_region;
466 free_region += (len / PG_SIZE4K);
467 free_size -= len;
468 }
469 return curr_region;
470}
471
472/*
473 * ======== get_mapped_region ========
474 * Purpose:
475 * Returns the requestedmapped region
476 */
477static struct map_page *get_mapped_region(u32 addrs)
478{
479 u32 i = 0;
480 struct map_page *curr_region = NULL;
481
482 if (virtual_mapping_table == NULL)
483 return curr_region;
484
485 i = DMM_ADDR_TO_INDEX(addrs);
486 if (i < table_size && (virtual_mapping_table[i].mapped ||
487 virtual_mapping_table[i].reserved))
488 curr_region = virtual_mapping_table + i;
489 return curr_region;
490}
491
492#ifdef DSP_DMM_DEBUG
493u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
494{
495 struct map_page *curr_node = NULL;
496 u32 i;
497 u32 freemem = 0;
498 u32 bigsize = 0;
499
500 spin_lock(&dmm_mgr->dmm_lock);
501
502 if (virtual_mapping_table != NULL) {
503 for (i = 0; i < table_size; i +=
504 virtual_mapping_table[i].region_size) {
505 curr_node = virtual_mapping_table + i;
506 if (curr_node->reserved) {
507 /*printk("RESERVED size = 0x%x, "
508 "Map size = 0x%x\n",
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
512 */
513 } else {
514/* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
516 */
517 freemem += (curr_node->region_size * PG_SIZE4K);
518 if (curr_node->region_size > bigsize)
519 bigsize = curr_node->region_size;
520 }
521 }
522 }
523 spin_unlock(&dmm_mgr->dmm_lock);
524 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525 freemem / (1024 * 1024));
526 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize * PG_SIZE4K / (1024 * 1024)));
530
531 return 0;
532}
533#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551ce4d78..86ca785f1913 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
993/* 993/*
994 * ======== procwrap_reserve_memory ======== 994 * ======== procwrap_reserve_memory ========
995 */ 995 */
996u32 __deprecated procwrap_reserve_memory(union trapped_args *args, 996u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
997 void *pr_ctxt)
998{ 997{
999 return 0; 998 int status;
999 void *prsv_addr;
1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1001
1002 if ((args->args_proc_rsvmem.ul_size <= 0) ||
1003 (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
1004 return -EINVAL;
1005
1006 status = proc_reserve_memory(hprocessor,
1007 args->args_proc_rsvmem.ul_size, &prsv_addr,
1008 pr_ctxt);
1009 if (!status) {
1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
1011 status = -EINVAL;
1012 proc_un_reserve_memory(args->args_proc_rsvmem.
1013 hprocessor, prsv_addr, pr_ctxt);
1014 }
1015 }
1016 return status;
1000} 1017}
1001 1018
1002/* 1019/*
@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1025/* 1042/*
1026 * ======== procwrap_un_reserve_memory ======== 1043 * ======== procwrap_un_reserve_memory ========
1027 */ 1044 */
1028u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, 1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
1029 void *pr_ctxt)
1030{ 1046{
1031 return 0; 1047 int status;
1048 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1049
1050 status = proc_un_reserve_memory(hprocessor,
1051 args->args_proc_unrsvmem.prsv_addr,
1052 pr_ctxt);
1053 return status;
1032} 1054}
1033 1055
1034/* 1056/*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc168516e5..81b1b9013550 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
146 struct process_context *ctxt = (struct process_context *)process_ctxt; 146 struct process_context *ctxt = (struct process_context *)process_ctxt;
147 int status = 0; 147 int status = 0;
148 struct dmm_map_object *temp_map, *map_obj; 148 struct dmm_map_object *temp_map, *map_obj;
149 struct dmm_rsv_object *temp_rsv, *rsv_obj;
149 150
150 /* Free DMM mapped memory resources */ 151 /* Free DMM mapped memory resources */
151 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 152 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
155 pr_err("%s: proc_un_map failed!" 156 pr_err("%s: proc_un_map failed!"
156 " status = 0x%xn", __func__, status); 157 " status = 0x%xn", __func__, status);
157 } 158 }
159
160 /* Free DMM reserved memory resources */
161 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
162 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
163 rsv_obj->dsp_reserved_addr,
164 ctxt);
165 if (status)
166 pr_err("%s: proc_un_reserve_memory failed!"
167 " status = 0x%xn", __func__, status);
168 }
158 return status; 169 return status;
159} 170}
160 171
@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
732 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 743 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 744 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 745 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
746 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
735 747
736 /* for 24xx base port is not mapping the mamory for DSP 748 /* for 24xx base port is not mapping the mamory for DSP
737 * internal memory TODO Do a ioremap here */ 749 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
785 OMAP_PER_PRM_SIZE); 797 OMAP_PER_PRM_SIZE);
786 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 798 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787 OMAP_CORE_PRM_SIZE); 799 OMAP_CORE_PRM_SIZE);
800 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
801 OMAP_DMMU_SIZE);
788 802
789 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 803 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790 host_res->dw_mem_base[0]); 804 host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
796 host_res->dw_mem_base[3]); 810 host_res->dw_mem_base[3]);
797 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 811 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798 host_res->dw_mem_base[4]); 812 host_res->dw_mem_base[4]);
813 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
799 814
800 shm_size = drv_datap->shm_size; 815 shm_size = drv_datap->shm_size;
801 if (shm_size >= 0x10000) { 816 if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43fec044..324fcdffb3b3 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
509 pr_ctxt->res_state = PROC_RES_ALLOCATED; 509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
510 spin_lock_init(&pr_ctxt->dmm_map_lock); 510 spin_lock_init(&pr_ctxt->dmm_map_lock);
511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); 511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
512 514
513 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
514 if (pr_ctxt->node_id) { 516 if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247f527a..1562f3c1281c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/nodepriv.h> 57#include <dspbridge/nodepriv.h>
58#include <dspbridge/node.h> 58#include <dspbridge/node.h>
59#include <dspbridge/dmm.h>
59 60
60/* Static/Dynamic Loader includes */ 61/* Static/Dynamic Loader includes */
61#include <dspbridge/dbll.h> 62#include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
316 u32 mapped_addr = 0; 317 u32 mapped_addr = 0;
317 u32 map_attrs = 0x0; 318 u32 map_attrs = 0x0;
318 struct dsp_processorstate proc_state; 319 struct dsp_processorstate proc_state;
320#ifdef DSP_DMM_DEBUG
321 struct dmm_object *dmm_mgr;
322 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
323#endif
319 324
320 void *node_res; 325 void *node_res;
321 326
@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
425 if (status) 430 if (status)
426 goto func_cont; 431 goto func_cont;
427 432
433 status = proc_reserve_memory(hprocessor,
434 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr),
438 pr_ctxt);
439 if (status) {
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
441 __func__, status);
442 goto func_cont;
443 }
444#ifdef DSP_DMM_DEBUG
445 status = dmm_get_handle(p_proc_object, &dmm_mgr);
446 if (!dmm_mgr) {
447 status = DSP_EHANDLE;
448 goto func_cont;
449 }
450
451 dmm_mem_map_dump(dmm_mgr);
452#endif
453
428 map_attrs |= DSP_MAPLITTLEENDIAN; 454 map_attrs |= DSP_MAPLITTLEENDIAN;
429 map_attrs |= DSP_MAPELEMSIZE32; 455 map_attrs |= DSP_MAPELEMSIZE32;
430 map_attrs |= DSP_MAPVIRTUALADDR; 456 map_attrs |= DSP_MAPVIRTUALADDR;
431 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, 457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
432 pnode->create_args.asa.task_arg_obj.heap_size, 458 pnode->create_args.asa.task_arg_obj.heap_size,
433 NULL, (void **)&mapped_addr, map_attrs, 459 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
434 pr_ctxt); 461 pr_ctxt);
435 if (status) 462 if (status)
436 pr_err("%s: Failed to map memory for Heap: 0x%x\n", 463 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
2484 struct stream_chnl stream; 2511 struct stream_chnl stream;
2485 struct node_msgargs node_msg_args; 2512 struct node_msgargs node_msg_args;
2486 struct node_taskargs task_arg_obj; 2513 struct node_taskargs task_arg_obj;
2487 2514#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2518#endif
2488 int status; 2519 int status;
2489 if (!hnode) 2520 if (!hnode)
2490 goto func_end; 2521 goto func_end;
@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
2545 status = proc_un_map(hnode->hprocessor, (void *) 2576 status = proc_un_map(hnode->hprocessor, (void *)
2546 task_arg_obj.udsp_heap_addr, 2577 task_arg_obj.udsp_heap_addr,
2547 pr_ctxt); 2578 pr_ctxt);
2579
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2581 (void *)
2582 task_arg_obj.
2583 udsp_heap_res_addr,
2584 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2587 if (dmm_mgr)
2588 dmm_mem_map_dump(dmm_mgr);
2589 else
2590 status = DSP_EHANDLE;
2591#endif
2548 } 2592 }
2549 } 2593 }
2550 if (node_type != NODE_MESSAGE) { 2594 if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02efedf..b47d7aa747b1 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
39#include <dspbridge/cod.h> 39#include <dspbridge/cod.h>
40#include <dspbridge/dev.h> 40#include <dspbridge/dev.h>
41#include <dspbridge/procpriv.h> 41#include <dspbridge/procpriv.h>
42#include <dspbridge/dmm.h>
42 43
43/* ----------------------------------- Resource Manager */ 44/* ----------------------------------- Resource Manager */
44#include <dspbridge/mgr.h> 45#include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
51#include <dspbridge/msg.h> 52#include <dspbridge/msg.h>
52#include <dspbridge/dspioctl.h> 53#include <dspbridge/dspioctl.h>
53#include <dspbridge/drv.h> 54#include <dspbridge/drv.h>
54#include <_tiomap.h>
55 55
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/proc.h> 57#include <dspbridge/proc.h>
@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
151 return map_obj; 151 return map_obj;
152} 152}
153 153
154static int match_exact_map_obj(struct dmm_map_object *map_obj,
155 u32 dsp_addr, u32 size)
156{
157 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
158 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
159 __func__, dsp_addr, map_obj->size, size);
160
161 return map_obj->dsp_addr == dsp_addr &&
162 map_obj->size == size;
163}
164
154static void remove_mapping_information(struct process_context *pr_ctxt, 165static void remove_mapping_information(struct process_context *pr_ctxt,
155 u32 dsp_addr) 166 u32 dsp_addr, u32 size)
156{ 167{
157 struct dmm_map_object *map_obj; 168 struct dmm_map_object *map_obj;
158 169
159 pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); 170 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
171 dsp_addr, size);
160 172
161 spin_lock(&pr_ctxt->dmm_map_lock); 173 spin_lock(&pr_ctxt->dmm_map_lock);
162 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { 174 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
163 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", 175 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
164 __func__, 176 __func__,
165 map_obj->mpu_addr, 177 map_obj->mpu_addr,
166 map_obj->dsp_addr); 178 map_obj->dsp_addr,
179 map_obj->size);
167 180
168 if (map_obj->dsp_addr == dsp_addr) { 181 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
169 pr_debug("%s: match, deleting map info\n", __func__); 182 pr_debug("%s: match, deleting map info\n", __func__);
170 list_del(&map_obj->link); 183 list_del(&map_obj->link);
171 kfree(map_obj->dma_info.sg); 184 kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1077 s32 cnew_envp; /* " " in new_envp[] */ 1090 s32 cnew_envp; /* " " in new_envp[] */
1078 s32 nproc_id = 0; /* Anticipate MP version. */ 1091 s32 nproc_id = 0; /* Anticipate MP version. */
1079 struct dcd_manager *hdcd_handle; 1092 struct dcd_manager *hdcd_handle;
1093 struct dmm_object *dmm_mgr;
1080 u32 dw_ext_end; 1094 u32 dw_ext_end;
1081 u32 proc_id; 1095 u32 proc_id;
1082 int brd_state; 1096 int brd_state;
@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
1267 if (!status) 1281 if (!status)
1268 status = cod_get_sym_value(cod_mgr, EXTEND, 1282 status = cod_get_sym_value(cod_mgr, EXTEND,
1269 &dw_ext_end); 1283 &dw_ext_end);
1284
1285 /* Reset DMM structs and add an initial free chunk */
1286 if (!status) {
1287 status =
1288 dev_get_dmm_mgr(p_proc_object->hdev_obj,
1289 &dmm_mgr);
1290 if (dmm_mgr) {
1291 /* Set dw_ext_end to DMM START u8
1292 * address */
1293 dw_ext_end =
1294 (dw_ext_end + 1) * DSPWORDSIZE;
1295 /* DMM memory is from EXT_END */
1296 status = dmm_create_tables(dmm_mgr,
1297 dw_ext_end,
1298 DMMPOOLSIZE);
1299 } else {
1300 status = -EFAULT;
1301 }
1302 }
1270 } 1303 }
1271 } 1304 }
1272 /* Restore the original argv[0] */ 1305 /* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1319{ 1352{
1320 u32 va_align; 1353 u32 va_align;
1321 u32 pa_align; 1354 u32 pa_align;
1355 struct dmm_object *dmm_mgr;
1322 u32 size_align; 1356 u32 size_align;
1323 int status = 0; 1357 int status = 0;
1324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1358 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1325 struct dmm_map_object *map_obj; 1359 struct dmm_map_object *map_obj;
1360 u32 tmp_addr = 0;
1326 1361
1327#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK 1362#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1328 if ((ul_map_attr & BUFMODE_MASK) != RBUF) { 1363 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1347 } 1382 }
1348 /* Critical section */ 1383 /* Critical section */
1349 mutex_lock(&proc_lock); 1384 mutex_lock(&proc_lock);
1385 dmm_get_handle(p_proc_object, &dmm_mgr);
1386 if (dmm_mgr)
1387 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1388 else
1389 status = -EFAULT;
1350 1390
1351 /* Add mapping to the page tables. */ 1391 /* Add mapping to the page tables. */
1352 if (!status) { 1392 if (!status) {
1393
1394 /* Mapped address = MSB of VA | LSB of PA */
1395 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1353 /* mapped memory resource tracking */ 1396 /* mapped memory resource tracking */
1354 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, 1397 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1355 size_align); 1398 size_align);
1356 if (!map_obj) { 1399 if (!map_obj)
1357 status = -ENOMEM; 1400 status = -ENOMEM;
1358 } else { 1401 else
1359 va_align = user_to_dsp_map( 1402 status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
1360 p_proc_object->hbridge_context->dsp_mmu, 1403 (p_proc_object->hbridge_context, pa_align, va_align,
1361 pa_align, va_align, size_align, 1404 size_align, ul_map_attr, map_obj->pages);
1362 map_obj->pages);
1363 if (IS_ERR_VALUE(va_align))
1364 status = (int)va_align;
1365 }
1366 } 1405 }
1367 if (!status) { 1406 if (!status) {
1368 /* Mapped address = MSB of VA | LSB of PA */ 1407 /* Mapped address = MSB of VA | LSB of PA */
1369 map_obj->dsp_addr = (va_align | 1408 *pp_map_addr = (void *) tmp_addr;
1370 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1371 *pp_map_addr = (void *)map_obj->dsp_addr;
1372 } else { 1409 } else {
1373 remove_mapping_information(pr_ctxt, va_align); 1410 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1411 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1374 } 1412 }
1375 mutex_unlock(&proc_lock); 1413 mutex_unlock(&proc_lock);
1376 1414
@@ -1463,6 +1501,55 @@ func_end:
1463} 1501}
1464 1502
1465/* 1503/*
1504 * ======== proc_reserve_memory ========
1505 * Purpose:
1506 * Reserve a virtually contiguous region of DSP address space.
1507 */
1508int proc_reserve_memory(void *hprocessor, u32 ul_size,
1509 void **pp_rsv_addr,
1510 struct process_context *pr_ctxt)
1511{
1512 struct dmm_object *dmm_mgr;
1513 int status = 0;
1514 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1515 struct dmm_rsv_object *rsv_obj;
1516
1517 if (!p_proc_object) {
1518 status = -EFAULT;
1519 goto func_end;
1520 }
1521
1522 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1523 if (!dmm_mgr) {
1524 status = -EFAULT;
1525 goto func_end;
1526 }
1527
1528 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1529 if (status != 0)
1530 goto func_end;
1531
1532 /*
1533 * A successful reserve should be followed by insertion of rsv_obj
1534 * into dmm_rsv_list, so that reserved memory resource tracking
1535 * remains uptodate
1536 */
1537 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1538 if (rsv_obj) {
1539 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1540 spin_lock(&pr_ctxt->dmm_rsv_lock);
1541 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1542 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1543 }
1544
1545func_end:
1546 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1547 "status 0x%x\n", __func__, hprocessor,
1548 ul_size, pp_rsv_addr, status);
1549 return status;
1550}
1551
1552/*
1466 * ======== proc_start ======== 1553 * ======== proc_start ========
1467 * Purpose: 1554 * Purpose:
1468 * Start a processor running. 1555 * Start a processor running.
@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
1610{ 1697{
1611 int status = 0; 1698 int status = 0;
1612 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1699 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1700 struct dmm_object *dmm_mgr;
1613 u32 va_align; 1701 u32 va_align;
1702 u32 size_align;
1614 1703
1615 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); 1704 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1616 if (!p_proc_object) { 1705 if (!p_proc_object) {
@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
1618 goto func_end; 1707 goto func_end;
1619 } 1708 }
1620 1709
1710 status = dmm_get_handle(hprocessor, &dmm_mgr);
1711 if (!dmm_mgr) {
1712 status = -EFAULT;
1713 goto func_end;
1714 }
1715
1621 /* Critical section */ 1716 /* Critical section */
1622 mutex_lock(&proc_lock); 1717 mutex_lock(&proc_lock);
1718 /*
1719 * Update DMM structures. Get the size to unmap.
1720 * This function returns error if the VA is not mapped
1721 */
1722 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1623 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1624 status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, 1724 if (!status) {
1625 va_align); 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align);
1727 }
1626 1728
1627 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);
1628 if (status) 1730 if (status)
@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1633 * from dmm_map_list, so that mapped memory resource tracking 1735 * from dmm_map_list, so that mapped memory resource tracking
1634 * remains uptodate 1736 * remains uptodate
1635 */ 1737 */
1636 remove_mapping_information(pr_ctxt, (u32) map_addr); 1738 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1637 1739
1638func_end: 1740func_end:
1639 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", 1741 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@ func_end:
1642} 1744}
1643 1745
1644/* 1746/*
1747 * ======== proc_un_reserve_memory ========
1748 * Purpose:
1749 * Frees a previously reserved region of DSP address space.
1750 */
1751int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1752 struct process_context *pr_ctxt)
1753{
1754 struct dmm_object *dmm_mgr;
1755 int status = 0;
1756 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1757 struct dmm_rsv_object *rsv_obj;
1758
1759 if (!p_proc_object) {
1760 status = -EFAULT;
1761 goto func_end;
1762 }
1763
1764 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1765 if (!dmm_mgr) {
1766 status = -EFAULT;
1767 goto func_end;
1768 }
1769
1770 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1771 if (status != 0)
1772 goto func_end;
1773
1774 /*
1775 * A successful unreserve should be followed by removal of rsv_obj
1776 * from dmm_rsv_list, so that reserved memory resource tracking
1777 * remains uptodate
1778 */
1779 spin_lock(&pr_ctxt->dmm_rsv_lock);
1780 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1781 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1782 list_del(&rsv_obj->link);
1783 kfree(rsv_obj);
1784 break;
1785 }
1786 }
1787 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1788
1789func_end:
1790 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1791 __func__, hprocessor, prsv_addr, status);
1792 return status;
1793}
1794
1795/*
1645 * ======== = proc_monitor ======== == 1796 * ======== = proc_monitor ======== ==
1646 * Purpose: 1797 * Purpose:
1647 * Place the Processor in Monitor State. This is an internal 1798 * Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e848d297..fed25105970a 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
887 887
888 struct fb_deferred_io *fbdefio; 888 struct fb_deferred_io *fbdefio;
889 889
890 fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); 890 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
891 891
892 if (fbdefio) { 892 if (fbdefio) {
893 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 893 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d9e15b..7cc3d2407d1b 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1675 1675
1676 { 1676 {
1677 char essid[IW_ESSID_MAX_SIZE+1]; 1677 char essid[IW_ESSID_MAX_SIZE+1];
1678 if (wrq->u.essid.pointer) 1678 if (wrq->u.essid.pointer) {
1679 rc = iwctl_giwessid(dev, NULL, 1679 rc = iwctl_giwessid(dev, NULL,
1680 &(wrq->u.essid), essid); 1680 &(wrq->u.essid), essid);
1681 if (copy_to_user(wrq->u.essid.pointer, 1681 if (copy_to_user(wrq->u.essid.pointer,
1682 essid, 1682 essid,
1683 wrq->u.essid.length) ) 1683 wrq->u.essid.length) )
1684 rc = -EFAULT; 1684 rc = -EFAULT;
1685 }
1685 } 1686 }
1686 break; 1687 break;
1687 1688
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a2197012065..7777d9a60a52 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle,
1417 */ 1417 */
1418 bus_mask = 0; 1418 bus_mask = 0;
1419 media_mask = 0; 1419 media_mask = 0;
1420 media_mask = 0;
1421 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { 1420 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
1422 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { 1421 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
1423 if (config_p->devices_to_enumerate[bus][device] == 1422 if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5318f2..6a71f52c59b1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@ exit:
139} 139}
140 140
141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, 141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
142 u8 key_index, const u8 *mac_addr, 142 u8 key_index, bool pairwise, const u8 *mac_addr,
143 struct key_params *params) 143 struct key_params *params)
144{ 144{
145 wlandevice_t *wlandev = dev->ml_priv; 145 wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@ exit:
198} 198}
199 199
200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, 200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
201 u8 key_index, const u8 *mac_addr, void *cookie, 201 u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
202 void (*callback)(void *cookie, struct key_params*)) 202 void (*callback)(void *cookie, struct key_params*))
203{ 203{
204 wlandevice_t *wlandev = dev->ml_priv; 204 wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
227} 227}
228 228
229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, 229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
230 u8 key_index, const u8 *mac_addr) 230 u8 key_index, bool pairwise, const u8 *mac_addr)
231{ 231{
232 wlandevice_t *wlandev = dev->ml_priv; 232 wlandevice_t *wlandev = dev->ml_priv;
233 u32 did; 233 u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8429e..b7b4a733b467 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
522 if (copy_to_user(useraddr, &edata, sizeof(edata))) 522 if (copy_to_user(useraddr, &edata, sizeof(edata)))
523 return -EFAULT; 523 return -EFAULT;
524 return 0; 524 return 0;
525 }
526#endif 525#endif
526 }
527 527
528 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
529} 529}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 04ef3ef0a422..81b46585edf7 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
716 if (msg->len < 128) 716 if (msg->len < 128)
717 *--dp = (msg->len << 1) | EA; 717 *--dp = (msg->len << 1) | EA;
718 else { 718 else {
719 *--dp = (msg->len >> 6) | EA; 719 *--dp = ((msg->len & 127) << 1) | EA;
720 *--dp = (msg->len & 127) << 1; 720 *--dp = (msg->len >> 6) & 0xfe;
721 } 721 }
722 } 722 }
723 723
@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2375 gsm->mru = c->mru; 2375 gsm->mru = c->mru;
2376 gsm->encoding = c->encapsulation; 2376 gsm->encoding = c->encapsulation;
2377 gsm->adaption = c->adaption; 2377 gsm->adaption = c->adaption;
2378 gsm->n2 = c->n2;
2378 2379
2379 if (c->i == 1) 2380 if (c->i == 1)
2380 gsm->ftype = UIH; 2381 gsm->ftype = UIH;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cc1e9850d655..d8210ca00720 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0;
417 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
418 int count; 419 int count;
419 char *char_buf; 420 char *char_buf;
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
423 if (!count) { 424 if (!count) {
424 if (head->next == NULL) 425 if (head->next == NULL)
425 break; 426 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
426 tty->buf.head = head->next; 436 tty->buf.head = head->next;
427 tty_buffer_free(tty, head); 437 tty_buffer_free(tty, head);
428 continue; 438 continue;
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
432 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
433 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
434 break; 444 break;
435 if (!tty->receive_room) { 445 if (!tty->receive_room || seen_tail) {
436 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_delayed_work(&tty->buf.work, 1);
437 break; 447 break;
438 } 448 }
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 412f9775d19c..d8e96b005023 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
47 47
48static DEFINE_SPINLOCK(tty_ldisc_lock); 48static DEFINE_SPINLOCK(tty_ldisc_lock);
49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
50static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
50/* Line disc dispatch table */ 51/* Line disc dispatch table */
51static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; 52static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
52 53
@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
83 return; 84 return;
84 } 85 }
85 local_irq_restore(flags); 86 local_irq_restore(flags);
87 wake_up(&tty_ldisc_idle);
86} 88}
87 89
88/** 90/**
@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
531} 533}
532 534
533/** 535/**
536 * tty_ldisc_wait_idle - wait for the ldisc to become idle
537 * @tty: tty to wait for
538 *
539 * Wait for the line discipline to become idle. The discipline must
540 * have been halted for this to guarantee it remains idle.
541 */
542static int tty_ldisc_wait_idle(struct tty_struct *tty)
543{
544 int ret;
545 ret = wait_event_interruptible_timeout(tty_ldisc_idle,
546 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
547 if (ret < 0)
548 return ret;
549 return ret > 0 ? 0 : -EBUSY;
550}
551
552/**
534 * tty_set_ldisc - set line discipline 553 * tty_set_ldisc - set line discipline
535 * @tty: the terminal to set 554 * @tty: the terminal to set
536 * @ldisc: the line discipline 555 * @ldisc: the line discipline
@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
634 653
635 flush_scheduled_work(); 654 flush_scheduled_work();
636 655
656 retval = tty_ldisc_wait_idle(tty);
657
637 tty_lock(); 658 tty_lock();
638 mutex_lock(&tty->ldisc_mutex); 659 mutex_lock(&tty->ldisc_mutex);
660
661 /* handle wait idle failure locked */
662 if (retval) {
663 tty_ldisc_put(new_ldisc);
664 goto enable;
665 }
666
639 if (test_bit(TTY_HUPPED, &tty->flags)) { 667 if (test_bit(TTY_HUPPED, &tty->flags)) {
640 /* We were raced by the hangup method. It will have stomped 668 /* We were raced by the hangup method. It will have stomped
641 the ldisc data and closed the ldisc down */ 669 the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
669 697
670 tty_ldisc_put(o_ldisc); 698 tty_ldisc_put(o_ldisc);
671 699
700enable:
672 /* 701 /*
673 * Allow ldisc referencing to occur again 702 * Allow ldisc referencing to occur again
674 */ 703 */
@@ -714,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty)
714 * state closed 743 * state closed
715 */ 744 */
716 745
717static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) 746static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
718{ 747{
719 struct tty_ldisc *ld; 748 struct tty_ldisc *ld = tty_ldisc_get(ldisc);
749
750 if (IS_ERR(ld))
751 return -1;
720 752
721 tty_ldisc_close(tty, tty->ldisc); 753 tty_ldisc_close(tty, tty->ldisc);
722 tty_ldisc_put(tty->ldisc); 754 tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
724 /* 756 /*
725 * Switch the line discipline back 757 * Switch the line discipline back
726 */ 758 */
727 ld = tty_ldisc_get(ldisc);
728 BUG_ON(IS_ERR(ld));
729 tty_ldisc_assign(tty, ld); 759 tty_ldisc_assign(tty, ld);
730 tty_set_termios_ldisc(tty, ldisc); 760 tty_set_termios_ldisc(tty, ldisc);
761
762 return 0;
731} 763}
732 764
733/** 765/**
@@ -802,13 +834,16 @@ void tty_ldisc_hangup(struct tty_struct *tty)
802 a FIXME */ 834 a FIXME */
803 if (tty->ldisc) { /* Not yet closed */ 835 if (tty->ldisc) { /* Not yet closed */
804 if (reset == 0) { 836 if (reset == 0) {
805 tty_ldisc_reinit(tty, tty->termios->c_line); 837
806 err = tty_ldisc_open(tty, tty->ldisc); 838 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
839 err = tty_ldisc_open(tty, tty->ldisc);
840 else
841 err = 1;
807 } 842 }
808 /* If the re-open fails or we reset then go to N_TTY. The 843 /* If the re-open fails or we reset then go to N_TTY. The
809 N_TTY open cannot fail */ 844 N_TTY open cannot fail */
810 if (reset || err) { 845 if (reset || err) {
811 tty_ldisc_reinit(tty, N_TTY); 846 BUG_ON(tty_ldisc_reinit(tty, N_TTY));
812 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 847 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
813 } 848 }
814 tty_ldisc_enable(tty); 849 tty_ldisc_enable(tty);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 273ab44cc91d..eab3a1ff99e4 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@ static unsigned int
553vcs_poll(struct file *file, poll_table *wait) 553vcs_poll(struct file *file, poll_table *wait)
554{ 554{
555 struct vcs_poll_data *poll = vcs_poll_data_get(file); 555 struct vcs_poll_data *poll = vcs_poll_data_get(file);
556 int ret = 0; 556 int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
557 557
558 if (poll) { 558 if (poll) {
559 poll_wait(file, &poll->waitq, wait); 559 poll_wait(file, &poll->waitq, wait);
560 if (!poll->seen_last_update) 560 if (poll->seen_last_update)
561 ret = POLLIN | POLLRDNORM; 561 ret = DEFAULT_POLLMASK;
562 } 562 }
563 return ret; 563 return ret;
564} 564}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6202a5..045bb4b823e1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
965 965
966static int proc_connectinfo(struct dev_state *ps, void __user *arg) 966static int proc_connectinfo(struct dev_state *ps, void __user *arg)
967{ 967{
968 struct usbdevfs_connectinfo ci; 968 struct usbdevfs_connectinfo ci = {
969 .devnum = ps->dev->devnum,
970 .slow = ps->dev->speed == USB_SPEED_LOW
971 };
969 972
970 ci.devnum = ps->dev->devnum;
971 ci.slow = ps->dev->speed == USB_SPEED_LOW;
972 if (copy_to_user(arg, &ci, sizeof(ci))) 973 if (copy_to_user(arg, &ci, sizeof(ci)))
973 return -EFAULT; 974 return -EFAULT;
974 return 0; 975 return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca814651..607d0db4a988 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC || ARCH_MXC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 select USB_FSL_MPH_DR_OF 161 select USB_FSL_MPH_DR_OF if OF
162 help 162 help
163 Some of Freescale PowerPC processors have a High Speed 163 Some of Freescale PowerPC processors have a High Speed
164 Dual-Role(DR) USB controller, which supports device mode. 164 Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb2319056..e7e0c69d3b1f 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@ struct goku_udc {
251 got_region:1, 251 got_region:1,
252 req_config:1, 252 req_config:1,
253 configured:1, 253 configured:1,
254 enabled:1; 254 enabled:1,
255 registered:1;
255 256
256 /* pci state used to access those endpoints */ 257 /* pci state used to access those endpoints */
257 struct pci_dev *pdev; 258 struct pci_dev *pdev;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354a4c20..40f7716b31fc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@ struct gs_port {
105 wait_queue_head_t close_wait; /* wait for last close */ 105 wait_queue_head_t close_wait; /* wait for last close */
106 106
107 struct list_head read_pool; 107 struct list_head read_pool;
108 int read_started;
109 int read_allocated;
108 struct list_head read_queue; 110 struct list_head read_queue;
109 unsigned n_read; 111 unsigned n_read;
110 struct tasklet_struct push; 112 struct tasklet_struct push;
111 113
112 struct list_head write_pool; 114 struct list_head write_pool;
115 int write_started;
116 int write_allocated;
113 struct gs_buf port_write_buf; 117 struct gs_buf port_write_buf;
114 wait_queue_head_t drain_wait; /* wait while writes drain */ 118 wait_queue_head_t drain_wait; /* wait while writes drain */
115 119
@@ -363,6 +367,9 @@ __acquires(&port->port_lock)
363 struct usb_request *req; 367 struct usb_request *req;
364 int len; 368 int len;
365 369
370 if (port->write_started >= QUEUE_SIZE)
371 break;
372
366 req = list_entry(pool->next, struct usb_request, list); 373 req = list_entry(pool->next, struct usb_request, list);
367 len = gs_send_packet(port, req->buf, in->maxpacket); 374 len = gs_send_packet(port, req->buf, in->maxpacket);
368 if (len == 0) { 375 if (len == 0) {
@@ -397,6 +404,8 @@ __acquires(&port->port_lock)
397 break; 404 break;
398 } 405 }
399 406
407 port->write_started++;
408
400 /* abort immediately after disconnect */ 409 /* abort immediately after disconnect */
401 if (!port->port_usb) 410 if (!port->port_usb)
402 break; 411 break;
@@ -418,7 +427,6 @@ __acquires(&port->port_lock)
418{ 427{
419 struct list_head *pool = &port->read_pool; 428 struct list_head *pool = &port->read_pool;
420 struct usb_ep *out = port->port_usb->out; 429 struct usb_ep *out = port->port_usb->out;
421 unsigned started = 0;
422 430
423 while (!list_empty(pool)) { 431 while (!list_empty(pool)) {
424 struct usb_request *req; 432 struct usb_request *req;
@@ -430,6 +438,9 @@ __acquires(&port->port_lock)
430 if (!tty) 438 if (!tty)
431 break; 439 break;
432 440
441 if (port->read_started >= QUEUE_SIZE)
442 break;
443
433 req = list_entry(pool->next, struct usb_request, list); 444 req = list_entry(pool->next, struct usb_request, list);
434 list_del(&req->list); 445 list_del(&req->list);
435 req->length = out->maxpacket; 446 req->length = out->maxpacket;
@@ -447,13 +458,13 @@ __acquires(&port->port_lock)
447 list_add(&req->list, pool); 458 list_add(&req->list, pool);
448 break; 459 break;
449 } 460 }
450 started++; 461 port->read_started++;
451 462
452 /* abort immediately after disconnect */ 463 /* abort immediately after disconnect */
453 if (!port->port_usb) 464 if (!port->port_usb)
454 break; 465 break;
455 } 466 }
456 return started; 467 return port->read_started;
457} 468}
458 469
459/* 470/*
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port)
535 } 546 }
536recycle: 547recycle:
537 list_move(&req->list, &port->read_pool); 548 list_move(&req->list, &port->read_pool);
549 port->read_started--;
538 } 550 }
539 551
540 /* Push from tty to ldisc; without low_latency set this is handled by 552 /* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
587 599
588 spin_lock(&port->port_lock); 600 spin_lock(&port->port_lock);
589 list_add(&req->list, &port->write_pool); 601 list_add(&req->list, &port->write_pool);
602 port->write_started--;
590 603
591 switch (req->status) { 604 switch (req->status) {
592 default: 605 default:
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
608 spin_unlock(&port->port_lock); 621 spin_unlock(&port->port_lock);
609} 622}
610 623
611static void gs_free_requests(struct usb_ep *ep, struct list_head *head) 624static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 int *allocated)
612{ 626{
613 struct usb_request *req; 627 struct usb_request *req;
614 628
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
616 req = list_entry(head->next, struct usb_request, list); 630 req = list_entry(head->next, struct usb_request, list);
617 list_del(&req->list); 631 list_del(&req->list);
618 gs_free_req(ep, req); 632 gs_free_req(ep, req);
633 if (allocated)
634 (*allocated)--;
619 } 635 }
620} 636}
621 637
622static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, 638static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
623 void (*fn)(struct usb_ep *, struct usb_request *)) 639 void (*fn)(struct usb_ep *, struct usb_request *),
640 int *allocated)
624{ 641{
625 int i; 642 int i;
626 struct usb_request *req; 643 struct usb_request *req;
644 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
627 645
628 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't 646 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
629 * do quite that many this time, don't fail ... we just won't 647 * do quite that many this time, don't fail ... we just won't
630 * be as speedy as we might otherwise be. 648 * be as speedy as we might otherwise be.
631 */ 649 */
632 for (i = 0; i < QUEUE_SIZE; i++) { 650 for (i = 0; i < n; i++) {
633 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); 651 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
634 if (!req) 652 if (!req)
635 return list_empty(head) ? -ENOMEM : 0; 653 return list_empty(head) ? -ENOMEM : 0;
636 req->complete = fn; 654 req->complete = fn;
637 list_add_tail(&req->list, head); 655 list_add_tail(&req->list, head);
656 if (allocated)
657 (*allocated)++;
638 } 658 }
639 return 0; 659 return 0;
640} 660}
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port)
661 * configurations may use different endpoints with a given port; 681 * configurations may use different endpoints with a given port;
662 * and high speed vs full speed changes packet sizes too. 682 * and high speed vs full speed changes packet sizes too.
663 */ 683 */
664 status = gs_alloc_requests(ep, head, gs_read_complete); 684 status = gs_alloc_requests(ep, head, gs_read_complete,
685 &port->read_allocated);
665 if (status) 686 if (status)
666 return status; 687 return status;
667 688
668 status = gs_alloc_requests(port->port_usb->in, &port->write_pool, 689 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
669 gs_write_complete); 690 gs_write_complete, &port->write_allocated);
670 if (status) { 691 if (status) {
671 gs_free_requests(ep, head); 692 gs_free_requests(ep, head, &port->read_allocated);
672 return status; 693 return status;
673 } 694 }
674 695
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port)
680 if (started) { 701 if (started) {
681 tty_wakeup(port->port_tty); 702 tty_wakeup(port->port_tty);
682 } else { 703 } else {
683 gs_free_requests(ep, head); 704 gs_free_requests(ep, head, &port->read_allocated);
684 gs_free_requests(port->port_usb->in, &port->write_pool); 705 gs_free_requests(port->port_usb->in, &port->write_pool,
706 &port->write_allocated);
685 status = -EIO; 707 status = -EIO;
686 } 708 }
687 709
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser)
1315 spin_lock_irqsave(&port->port_lock, flags); 1337 spin_lock_irqsave(&port->port_lock, flags);
1316 if (port->open_count == 0 && !port->openclose) 1338 if (port->open_count == 0 && !port->openclose)
1317 gs_buf_free(&port->port_write_buf); 1339 gs_buf_free(&port->port_write_buf);
1318 gs_free_requests(gser->out, &port->read_pool); 1340 gs_free_requests(gser->out, &port->read_pool, NULL);
1319 gs_free_requests(gser->out, &port->read_queue); 1341 gs_free_requests(gser->out, &port->read_queue, NULL);
1320 gs_free_requests(gser->in, &port->write_pool); 1342 gs_free_requests(gser->in, &port->write_pool, NULL);
1343
1344 port->read_allocated = port->read_started =
1345 port->write_allocated = port->write_started = 0;
1346
1321 spin_unlock_irqrestore(&port->port_lock, flags); 1347 spin_unlock_irqrestore(&port->port_lock, flags);
1322} 1348}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c396ca32..6f4f8e6a40c7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@ config USB_EHCI_FSL
122 bool "Support for Freescale on-chip EHCI USB controller" 122 bool "Support for Freescale on-chip EHCI USB controller"
123 depends on USB_EHCI_HCD && FSL_SOC 123 depends on USB_EHCI_HCD && FSL_SOC
124 select USB_EHCI_ROOT_HUB_TT 124 select USB_EHCI_ROOT_HUB_TT
125 select USB_FSL_MPH_DR_OF 125 select USB_FSL_MPH_DR_OF if OF
126 ---help--- 126 ---help---
127 Variation of ARC USB block used in some Freescale chips. 127 Variation of ARC USB block used in some Freescale chips.
128 128
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7c44af..bce85055019a 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@ struct ehci_mxc_priv {
36static int ehci_mxc_setup(struct usb_hcd *hcd) 36static int ehci_mxc_setup(struct usb_hcd *hcd)
37{ 37{
38 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 38 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
39 struct device *dev = hcd->self.controller;
40 struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
39 int retval; 41 int retval;
40 42
41 /* EHCI registers start at offset 0x100 */ 43 /* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
63 65
64 ehci_reset(ehci); 66 ehci_reset(ehci);
65 67
68 /* set up the PORTSCx register */
69 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
70
71 /* is this really needed? */
72 msleep(10);
73
66 ehci_port_power(ehci, 0); 74 ehci_port_power(ehci, 0);
67 return 0; 75 return 0;
68} 76}
@@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
114 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 122 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
115 struct usb_hcd *hcd; 123 struct usb_hcd *hcd;
116 struct resource *res; 124 struct resource *res;
117 int irq, ret, temp; 125 int irq, ret;
118 struct ehci_mxc_priv *priv; 126 struct ehci_mxc_priv *priv;
119 struct device *dev = &pdev->dev; 127 struct device *dev = &pdev->dev;
120 128
@@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
188 clk_enable(priv->ahbclk); 196 clk_enable(priv->ahbclk);
189 } 197 }
190 198
191 /* set up the PORTSCx register */
192 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
193 mdelay(10);
194
195 /* setup specific usb hw */ 199 /* setup specific usb hw */
196 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); 200 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
197 if (ret < 0) 201 if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872f3ab9..931d588c3fb5 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
273 }, 273 },
274}; 274};
275 275
276MODULE_ALIAS("platfrom:jz4740-ohci"); 276MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 375664198776..c9078e4e1f4d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
553 /* needed for power consumption */ 553 /* needed for power consumption */
554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; 554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
555 555
556 memset(&info, 0, sizeof(info));
556 /* directly from the descriptor */ 557 /* directly from the descriptor */
557 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); 558 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
558 info.product = dev->product_id; 559 info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e99a4b4..dd573abd2d1e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3008#else 3008#else
3009 x.sisusb_conactive = 0; 3009 x.sisusb_conactive = 0;
3010#endif 3010#endif
3011 memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
3011 3012
3012 if (copy_to_user((void __user *)arg, &x, sizeof(x))) 3013 if (copy_to_user((void __user *)arg, &x, sizeof(x)))
3013 retval = -EFAULT; 3014 retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d274363..fcb5206a65bd 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
171 } 171 }
172 172
173 /* Start sampling ID pin, when plug is removed from MUSB */ 173 /* Start sampling ID pin, when plug is removed from MUSB */
174 if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 174 if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
176 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
176 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 177 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
177 musb->a_wait_bcon = TIMER_DELAY; 178 musb->a_wait_bcon = TIMER_DELAY;
178 } 179 }
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
323 return -EIO; 324 return -EIO;
324} 325}
325 326
326int __init musb_platform_init(struct musb *musb, void *board_data) 327static void musb_platform_reg_init(struct musb *musb)
327{ 328{
328
329 /*
330 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
331 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
332 * be low for DEVICE mode and high for HOST mode. We set it high
333 * here because we are in host mode
334 */
335
336 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
337 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
338 musb->config->gpio_vrsel);
339 return -ENODEV;
340 }
341 gpio_direction_output(musb->config->gpio_vrsel, 0);
342
343 usb_nop_xceiv_register();
344 musb->xceiv = otg_get_transceiver();
345 if (!musb->xceiv) {
346 gpio_free(musb->config->gpio_vrsel);
347 return -ENODEV;
348 }
349
350 if (ANOMALY_05000346) { 329 if (ANOMALY_05000346) {
351 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 330 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
352 SSYNC(); 331 SSYNC();
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
358 } 337 }
359 338
360 /* Configure PLL oscillator register */ 339 /* Configure PLL oscillator register */
361 bfin_write_USB_PLLOSC_CTRL(0x30a8); 340 bfin_write_USB_PLLOSC_CTRL(0x3080 |
341 ((480/musb->config->clkin) << 1));
362 SSYNC(); 342 SSYNC();
363 343
364 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); 344 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
380 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | 360 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
381 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); 361 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
382 SSYNC(); 362 SSYNC();
363}
364
365int __init musb_platform_init(struct musb *musb, void *board_data)
366{
367
368 /*
369 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
370 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
371 * be low for DEVICE mode and high for HOST mode. We set it high
372 * here because we are in host mode
373 */
374
375 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
376 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
377 musb->config->gpio_vrsel);
378 return -ENODEV;
379 }
380 gpio_direction_output(musb->config->gpio_vrsel, 0);
381
382 usb_nop_xceiv_register();
383 musb->xceiv = otg_get_transceiver();
384 if (!musb->xceiv) {
385 gpio_free(musb->config->gpio_vrsel);
386 return -ENODEV;
387 }
388
389 musb_platform_reg_init(musb);
383 390
384 if (is_host_enabled(musb)) { 391 if (is_host_enabled(musb)) {
385 musb->board_set_vbus = bfin_set_vbus; 392 musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
394 return 0; 401 return 0;
395} 402}
396 403
404#ifdef CONFIG_PM
405void musb_platform_save_context(struct musb *musb,
406 struct musb_context_registers *musb_context)
407{
408 if (is_host_active(musb))
409 /*
410 * During hibernate gpio_vrsel will change from high to low
411 * low which will generate wakeup event resume the system
412 * immediately. Set it to 0 before hibernate to avoid this
413 * wakeup event.
414 */
415 gpio_set_value(musb->config->gpio_vrsel, 0);
416}
417
418void musb_platform_restore_context(struct musb *musb,
419 struct musb_context_registers *musb_context)
420{
421 musb_platform_reg_init(musb);
422}
423#endif
424
397int musb_platform_exit(struct musb *musb) 425int musb_platform_exit(struct musb *musb)
398{ 426{
399 gpio_free(musb->config->gpio_vrsel); 427 gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024c5515..e6669fc3b804 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
552 if (int_usb & MUSB_INTR_SESSREQ) { 552 if (int_usb & MUSB_INTR_SESSREQ) {
553 void __iomem *mbase = musb->mregs; 553 void __iomem *mbase = musb->mregs;
554 554
555 if (devctl & MUSB_DEVCTL_BDEVICE) { 555 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
556 && (devctl & MUSB_DEVCTL_BDEVICE)) {
556 DBG(3, "SessReq while on B state\n"); 557 DBG(3, "SessReq while on B state\n");
557 return IRQ_HANDLED; 558 return IRQ_HANDLED;
558 } 559 }
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev)
1052 clk_put(musb->clock); 1053 clk_put(musb->clock);
1053 spin_unlock_irqrestore(&musb->lock, flags); 1054 spin_unlock_irqrestore(&musb->lock, flags);
1054 1055
1056 if (!is_otg_enabled(musb) && is_host_enabled(musb))
1057 usb_remove_hcd(musb_to_hcd(musb));
1058 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1059 musb_platform_exit(musb);
1060
1055 /* FIXME power down */ 1061 /* FIXME power down */
1056} 1062}
1057 1063
@@ -2244,13 +2250,6 @@ static int __exit musb_remove(struct platform_device *pdev)
2244 */ 2250 */
2245 musb_exit_debugfs(musb); 2251 musb_exit_debugfs(musb);
2246 musb_shutdown(pdev); 2252 musb_shutdown(pdev);
2247#ifdef CONFIG_USB_MUSB_HDRC_HCD
2248 if (musb->board_mode == MUSB_HOST)
2249 usb_remove_hcd(musb_to_hcd(musb));
2250#endif
2251 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2252 musb_platform_exit(musb);
2253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2254 2253
2255 musb_free(musb); 2254 musb_free(musb);
2256 iounmap(ctrl_base); 2255 iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@ static int musb_suspend(struct device *dev)
2411 unsigned long flags; 2410 unsigned long flags;
2412 struct musb *musb = dev_to_musb(&pdev->dev); 2411 struct musb *musb = dev_to_musb(&pdev->dev);
2413 2412
2414 if (!musb->clock)
2415 return 0;
2416
2417 spin_lock_irqsave(&musb->lock, flags); 2413 spin_lock_irqsave(&musb->lock, flags);
2418 2414
2419 if (is_peripheral_active(musb)) { 2415 if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@ static int musb_suspend(struct device *dev)
2428 2424
2429 musb_save_context(musb); 2425 musb_save_context(musb);
2430 2426
2431 if (musb->set_clock) 2427 if (musb->clock) {
2432 musb->set_clock(musb->clock, 0); 2428 if (musb->set_clock)
2433 else 2429 musb->set_clock(musb->clock, 0);
2434 clk_disable(musb->clock); 2430 else
2431 clk_disable(musb->clock);
2432 }
2435 spin_unlock_irqrestore(&musb->lock, flags); 2433 spin_unlock_irqrestore(&musb->lock, flags);
2436 return 0; 2434 return 0;
2437} 2435}
@@ -2441,13 +2439,12 @@ static int musb_resume_noirq(struct device *dev)
2441 struct platform_device *pdev = to_platform_device(dev); 2439 struct platform_device *pdev = to_platform_device(dev);
2442 struct musb *musb = dev_to_musb(&pdev->dev); 2440 struct musb *musb = dev_to_musb(&pdev->dev);
2443 2441
2444 if (!musb->clock) 2442 if (musb->clock) {
2445 return 0; 2443 if (musb->set_clock)
2446 2444 musb->set_clock(musb->clock, 1);
2447 if (musb->set_clock) 2445 else
2448 musb->set_clock(musb->clock, 1); 2446 clk_enable(musb->clock);
2449 else 2447 }
2450 clk_enable(musb->clock);
2451 2448
2452 musb_restore_context(musb); 2449 musb_restore_context(musb);
2453 2450
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5b46a7..febaabcc2b35 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@ struct musb_context_registers {
487}; 487};
488 488
489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
490 defined(CONFIG_ARCH_OMAP4) 490 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
491extern void musb_platform_save_context(struct musb *musb, 491extern void musb_platform_save_context(struct musb *musb,
492 struct musb_context_registers *musb_context); 492 struct musb_context_registers *musb_context);
493extern void musb_platform_restore_context(struct musb *musb, 493extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d815049cbaa..36cfd060dbe5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
644 */ 644 */
645 645
646 csr |= MUSB_RXCSR_DMAENAB; 646 csr |= MUSB_RXCSR_DMAENAB;
647 if (!musb_ep->hb_mult &&
648 musb_ep->hw_ep->rx_double_buffered)
649 csr |= MUSB_RXCSR_AUTOCLEAR;
650#ifdef USE_MODE1 647#ifdef USE_MODE1
648 csr |= MUSB_RXCSR_AUTOCLEAR;
651 /* csr |= MUSB_RXCSR_DMAMODE; */ 649 /* csr |= MUSB_RXCSR_DMAMODE; */
652 650
653 /* this special sequence (enabling and then 651 /* this special sequence (enabling and then
@@ -656,6 +654,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
656 */ 654 */
657 musb_writew(epio, MUSB_RXCSR, 655 musb_writew(epio, MUSB_RXCSR,
658 csr | MUSB_RXCSR_DMAMODE); 656 csr | MUSB_RXCSR_DMAMODE);
657#else
658 if (!musb_ep->hb_mult &&
659 musb_ep->hw_ep->rx_double_buffered)
660 csr |= MUSB_RXCSR_AUTOCLEAR;
659#endif 661#endif
660 musb_writew(epio, MUSB_RXCSR, csr); 662 musb_writew(epio, MUSB_RXCSR, csr);
661 663
@@ -807,7 +809,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
807 809
808#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 810#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
809 /* Autoclear doesn't clear RxPktRdy for short packets */ 811 /* Autoclear doesn't clear RxPktRdy for short packets */
810 if ((dma->desired_mode == 0) 812 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
811 || (dma->actual_len 813 || (dma->actual_len
812 & (musb_ep->packet_sz - 1))) { 814 & (musb_ep->packet_sz - 1))) {
813 /* ack the read! */ 815 /* ack the read! */
@@ -818,8 +820,16 @@ void musb_g_rx(struct musb *musb, u8 epnum)
818 /* incomplete, and not short? wait for next IN packet */ 820 /* incomplete, and not short? wait for next IN packet */
819 if ((request->actual < request->length) 821 if ((request->actual < request->length)
820 && (musb_ep->dma->actual_len 822 && (musb_ep->dma->actual_len
821 == musb_ep->packet_sz)) 823 == musb_ep->packet_sz)) {
824 /* In double buffer case, continue to unload fifo if
825 * there is Rx packet in FIFO.
826 **/
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
829 hw_ep->rx_double_buffered)
830 goto exit;
822 return; 831 return;
832 }
823#endif 833#endif
824 musb_g_giveback(musb_ep, request, 0); 834 musb_g_giveback(musb_ep, request, 0);
825 835
@@ -827,7 +837,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
827 if (!request) 837 if (!request)
828 return; 838 return;
829 } 839 }
830 840exit:
831 /* Analyze request */ 841 /* Analyze request */
832 rxstate(musb, to_musb_request(request)); 842 rxstate(musb, to_musb_request(request));
833} 843}
@@ -916,13 +926,9 @@ static int musb_gadget_enable(struct usb_ep *ep,
916 * likewise high bandwidth periodic tx 926 * likewise high bandwidth periodic tx
917 */ 927 */
918 /* Set TXMAXP with the FIFO size of the endpoint 928 /* Set TXMAXP with the FIFO size of the endpoint
919 * to disable double buffering mode. Currently, It seems that double 929 * to disable double buffering mode.
920 * buffering has problem if musb RTL revision number < 2.0.
921 */ 930 */
922 if (musb->hwvers < MUSB_HWVERS_2000) 931 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
923 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
924 else
925 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
926 932
927 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 933 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
928 if (musb_readw(regs, MUSB_TXCSR) 934 if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
958 /* Set RXMAXP with the FIFO size of the endpoint 964 /* Set RXMAXP with the FIFO size of the endpoint
959 * to disable double buffering mode. 965 * to disable double buffering mode.
960 */ 966 */
961 if (musb->hwvers < MUSB_HWVERS_2000) 967 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
962 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
963 else
964 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
965 968
966 /* force shared fifo to OUT-only mode */ 969 /* force shared fifo to OUT-only mode */
967 if (hw_ep->is_shared_fifo) { 970 if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1166 : DMA_FROM_DEVICE); 1169 : DMA_FROM_DEVICE);
1167 request->mapped = 0; 1170 request->mapped = 0;
1168 } 1171 }
1169 } else if (!req->buf) {
1170 return -ENODATA;
1171 } else 1172 } else
1172 request->mapped = 0; 1173 request->mapped = 0;
1173 1174
@@ -1695,8 +1696,10 @@ int __init musb_gadget_setup(struct musb *musb)
1695 musb_platform_try_idle(musb, 0); 1696 musb_platform_try_idle(musb, 0);
1696 1697
1697 status = device_register(&musb->g.dev); 1698 status = device_register(&musb->g.dev);
1698 if (status != 0) 1699 if (status != 0) {
1700 put_device(&musb->g.dev);
1699 the_gadget = NULL; 1701 the_gadget = NULL;
1702 }
1700 return status; 1703 return status;
1701} 1704}
1702 1705
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 244267527a60..5a727c5b8676 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
633 return 0; 633 return 0;
634} 634}
635 635
636static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) 636static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
637{ 637{
638 return 0;
638} 639}
639 640
640#endif /* CONFIG_BLACKFIN */ 641#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af5cbdb..563114d613d6 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel,
158 dma_addr_t dma_addr, u32 len) 158 dma_addr_t dma_addr, u32 len)
159{ 159{
160 struct musb_dma_channel *musb_channel = channel->private_data; 160 struct musb_dma_channel *musb_channel = channel->private_data;
161 struct musb_dma_controller *controller = musb_channel->controller;
162 struct musb *musb = controller->private_data;
161 163
162 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", 164 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
163 musb_channel->epnum, 165 musb_channel->epnum,
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel,
167 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
168 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
169 171
172 /*
173 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary.
175 * It ends up masking the last two bits of the address
176 * programmed in DMA_ADDR.
177 *
178 * Fail such DMA transfers, so that the backup PIO mode
179 * can carry out the transfer
180 */
181 if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
182 return false;
183
170 channel->actual_len = 0; 184 channel->actual_len = 0;
171 musb_channel->start_addr = dma_addr; 185 musb_channel->start_addr = dma_addr;
172 musb_channel->len = len; 186 musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a5847803..76f8b3556672 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@ static struct usb_device_id id_table_combined [] = {
794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
797 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
798 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
797 { }, /* Optional parameter entry */ 799 { }, /* Optional parameter entry */
798 { } /* Terminating entry */ 800 { } /* Terminating entry */
799}; 801};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1fb6a..263f62551197 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C 1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D 1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
1103
1104/*
1105 * Milkymist One JTAG/Serial
1106 */
1107#define QIHARDWARE_VID 0x20B7
1108#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1109
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1bcf65..ef2977d3a613 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = {
518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, 518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
521 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 521 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7110cb..e45e673b8770 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab
326 int bit_index; 326 int bit_index;
327 327
328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329 329 if (!ai)
330 return UWB_RSV_ALLOC_NOT_FOUND;
330 ai->min_mas = rsv->min_mas; 331 ai->min_mas = rsv->min_mas;
331 ai->max_mas = rsv->max_mas; 332 ai->max_mas = rsv->max_mas;
332 ai->max_interval = rsv->max_interval; 333 ai->max_interval = rsv->max_interval;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f548a8e..321a0c8346e5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@ static void restore_cpu_virqs(unsigned int cpu)
1299 evtchn_to_irq[evtchn] = irq; 1299 evtchn_to_irq[evtchn] = irq;
1300 irq_info[irq] = mk_virq_info(evtchn, virq); 1300 irq_info[irq] = mk_virq_info(evtchn, virq);
1301 bind_evtchn_to_cpu(evtchn, cpu); 1301 bind_evtchn_to_cpu(evtchn, cpu);
1302
1303 /* Ready for use. */
1304 unmask_evtchn(evtchn);
1305 } 1302 }
1306} 1303}
1307 1304
@@ -1327,10 +1324,6 @@ static void restore_cpu_ipis(unsigned int cpu)
1327 evtchn_to_irq[evtchn] = irq; 1324 evtchn_to_irq[evtchn] = irq;
1328 irq_info[irq] = mk_ipi_info(evtchn, ipi); 1325 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1329 bind_evtchn_to_cpu(evtchn, cpu); 1326 bind_evtchn_to_cpu(evtchn, cpu);
1330
1331 /* Ready for use. */
1332 unmask_evtchn(evtchn);
1333
1334 } 1327 }
1335} 1328}
1336 1329
@@ -1390,6 +1383,7 @@ void xen_poll_irq(int irq)
1390void xen_irq_resume(void) 1383void xen_irq_resume(void)
1391{ 1384{
1392 unsigned int cpu, irq, evtchn; 1385 unsigned int cpu, irq, evtchn;
1386 struct irq_desc *desc;
1393 1387
1394 init_evtchn_cpu_bindings(); 1388 init_evtchn_cpu_bindings();
1395 1389
@@ -1408,6 +1402,23 @@ void xen_irq_resume(void)
1408 restore_cpu_virqs(cpu); 1402 restore_cpu_virqs(cpu);
1409 restore_cpu_ipis(cpu); 1403 restore_cpu_ipis(cpu);
1410 } 1404 }
1405
1406 /*
1407 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1408 * are not handled by the IRQ core.
1409 */
1410 for_each_irq_desc(irq, desc) {
1411 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1412 continue;
1413 if (desc->status & IRQ_DISABLED)
1414 continue;
1415
1416 evtchn = evtchn_from_irq(irq);
1417 if (evtchn == -1)
1418 continue;
1419
1420 unmask_evtchn(evtchn);
1421 }
1411} 1422}
1412 1423
1413static struct irq_chip xen_dynamic_chip __read_mostly = { 1424static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a754748dd5f..c7ea9bc8897c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
50#define N_V253 19 /* Codec control over voice modem */ 50#define N_V253 19 /* Codec control over voice modem */
51#define N_CAIF 20 /* CAIF protocol for talking to modems */ 51#define N_CAIF 20 /* CAIF protocol for talking to modems */
52#define N_GSM0710 21 /* GSM 0710 Mux */ 52#define N_GSM0710 21 /* GSM 0710 Mux */
53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ 53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
54 54
55/* 55/*
56 * This character is the same as _POSIX_VDISABLE: it cannot be used as 56 * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab222bb..24300d8a1bc1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@ struct usbdrv_wrap {
797 * @disconnect: Called when the interface is no longer accessible, usually 797 * @disconnect: Called when the interface is no longer accessible, usually
798 * because its device has been (or is being) disconnected or the 798 * because its device has been (or is being) disconnected or the
799 * driver module is being unloaded. 799 * driver module is being unloaded.
800 * @ioctl: Used for drivers that want to talk to userspace through 800 * @unlocked_ioctl: Used for drivers that want to talk to userspace through
801 * the "usbfs" filesystem. This lets devices provide ways to 801 * the "usbfs" filesystem. This lets devices provide ways to
802 * expose information to user space regardless of where they 802 * expose information to user space regardless of where they
803 * do (or don't) show up otherwise in the filesystem. 803 * do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d506ed..2387f9fc8138 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@ struct musb_hdrc_config {
89 /* A GPIO controlling VRSEL in Blackfin */ 89 /* A GPIO controlling VRSEL in Blackfin */
90 unsigned int gpio_vrsel; 90 unsigned int gpio_vrsel;
91 unsigned int gpio_vrsel_active; 91 unsigned int gpio_vrsel_active;
92 /* musb CLKIN in Blackfin in MHZ */
93 unsigned char clkin;
92#endif 94#endif
93 95
94}; 96};