aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-04-08 17:28:13 -0400
committerDan Williams <dan.j.williams@intel.com>2009-04-08 17:28:13 -0400
commitfd74ea65883c7e6903e9b652795f72b723a2be69 (patch)
tree0792ad598080eae201d2836ac3c5a8fc46d0d03e /drivers
parentc8f517c444e4f9f55b5b5ca202b8404691a35805 (diff)
parent8c6db1bbf80123839ec87bdd6cb364aea384623d (diff)
Merge branch 'dmaengine' into async-tx-raid6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/char/agp/amd64-agp.c13
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvcs.c9
-rw-r--r--drivers/char/hvsi.c1
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/dmaengine.c60
-rw-r--r--drivers/dma/dmatest.c307
-rw-r--r--drivers/dma/dw_dmac.c333
-rw-r--r--drivers/dma/dw_dmac_regs.h7
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ioat_dma.c1
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c371
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mv_xor.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c115
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/abituguru3.c7
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/it87.c8
-rw-r--r--drivers/hwmon/lm85.c8
-rw-r--r--drivers/hwmon/lm90.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c39
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h1
-rw-r--r--drivers/md/md.c30
-rw-r--r--drivers/mfd/wm8350-core.c5
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c3
-rw-r--r--drivers/mtd/maps/physmap.c19
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/quirks.c31
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/power/ds2760_battery.c11
-rw-r--r--drivers/sbus/char/bbc_i2c.c2
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/video/aty/aty128fb.c10
-rw-r--r--drivers/video/aty/radeon_pm.c10
-rw-r--r--drivers/video/i810/i810_main.c5
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/w1/masters/w1-gpio.c2
49 files changed, 1143 insertions, 350 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 43fa90b837ee..f8f578a71b25 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -303,7 +303,7 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
303 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); 303 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
304 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; 304 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
305 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { 305 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
306 unsigned int nid; 306 int nid;
307 307
308 nid = get_nid_for_pfn(pfn); 308 nid = get_nid_for_pfn(pfn);
309 if (nid < 0) 309 if (nid < 0)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 52f4361eb6e4..d765afda9c2a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -271,15 +271,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
271 nb_order = (nb_order >> 1) & 7; 271 nb_order = (nb_order >> 1) & 7;
272 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); 272 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
273 nb_aper = nb_base << 25; 273 nb_aper = nb_base << 25;
274 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
275 return 0;
276 }
277 274
278 /* Northbridge seems to contain crap. Try the AGP bridge. */ 275 /* Northbridge seems to contain crap. Try the AGP bridge. */
279 276
280 pci_read_config_word(agp, cap+0x14, &apsize); 277 pci_read_config_word(agp, cap+0x14, &apsize);
281 if (apsize == 0xffff) 278 if (apsize == 0xffff) {
279 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
280 return 0;
282 return -1; 281 return -1;
282 }
283 283
284 apsize &= 0xfff; 284 apsize &= 0xfff;
285 /* Some BIOS use weird encodings not in the AGPv3 table. */ 285 /* Some BIOS use weird encodings not in the AGPv3 table. */
@@ -301,6 +301,11 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
301 order = nb_order; 301 order = nb_order;
302 } 302 }
303 303
304 if (nb_order >= order) {
305 if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
306 return 0;
307 }
308
304 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n", 309 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
305 aper, 32 << order); 310 aper, 32 << order);
306 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 311 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c7714185f831..4373adb2119a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -633,13 +633,15 @@ static void intel_i830_init_gtt_entries(void)
633 break; 633 break;
634 } 634 }
635 } 635 }
636 if (gtt_entries > 0) 636 if (gtt_entries > 0) {
637 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", 637 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
638 gtt_entries / KB(1), local ? "local" : "stolen"); 638 gtt_entries / KB(1), local ? "local" : "stolen");
639 else 639 gtt_entries /= KB(4);
640 } else {
640 dev_info(&agp_bridge->dev->dev, 641 dev_info(&agp_bridge->dev->dev,
641 "no pre-allocated video memory detected\n"); 642 "no pre-allocated video memory detected\n");
642 gtt_entries /= KB(4); 643 gtt_entries = 0;
644 }
643 645
644 intel_private.gtt_entries = gtt_entries; 646 intel_private.gtt_entries = gtt_entries;
645} 647}
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 6e6eb445d374..c76bccf5354d 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -1139,15 +1139,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
1139 hvcsd->tty = tty; 1139 hvcsd->tty = tty;
1140 tty->driver_data = hvcsd; 1140 tty->driver_data = hvcsd;
1141 1141
1142 /*
1143 * Set this driver to low latency so that we actually have a chance at
1144 * catching a throttled TTY after we flip_buffer_push. Otherwise the
1145 * flush_to_async may not execute until after the kernel_thread has
1146 * yielded and resumed the next flip_buffer_push resulting in data
1147 * loss.
1148 */
1149 tty->low_latency = 1;
1150
1151 memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); 1142 memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
1152 1143
1153 /* 1144 /*
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 406f8742a260..2989056a9e39 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -810,7 +810,6 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
810 hp = &hvsi_ports[line]; 810 hp = &hvsi_ports[line];
811 811
812 tty->driver_data = hp; 812 tty->driver_data = hp;
813 tty->low_latency = 1; /* avoid throttle/tty_flip_buffer_push race */
814 813
815 mb(); 814 mb();
816 if (hp->state == HVSI_FSP_DIED) 815 if (hp->state == HVSI_FSP_DIED)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 48ea59e79672..3b3c01b6f1ee 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -98,6 +98,17 @@ config NET_DMA
98 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise 98 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
99 say N. 99 say N.
100 100
101config ASYNC_TX_DMA
102 bool "Async_tx: Offload support for the async_tx api"
103 depends on DMA_ENGINE
104 help
105 This allows the async_tx api to take advantage of offload engines for
106 memcpy, memset, xor, and raid6 p+q operations. If your platform has
107 a dma engine that can perform raid operations and you have enabled
108 MD_RAID456 say Y.
109
110 If unsure, say N.
111
101config DMATEST 112config DMATEST
102 tristate "DMA Test client" 113 tristate "DMA Test client"
103 depends on DMA_ENGINE 114 depends on DMA_ENGINE
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 280a9d263eb3..92438e9dacc3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
507 * published in the general-purpose allocator 507 * published in the general-purpose allocator
508 */ 508 */
509 dma_cap_set(DMA_PRIVATE, device->cap_mask); 509 dma_cap_set(DMA_PRIVATE, device->cap_mask);
510 device->privatecnt++;
510 err = dma_chan_get(chan); 511 err = dma_chan_get(chan);
511 512
512 if (err == -ENODEV) { 513 if (err == -ENODEV) {
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
518 dma_chan_name(chan), err); 519 dma_chan_name(chan), err);
519 else 520 else
520 break; 521 break;
522 if (--device->privatecnt == 0)
523 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
521 chan->private = NULL; 524 chan->private = NULL;
522 chan = NULL; 525 chan = NULL;
523 } 526 }
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
537 WARN_ONCE(chan->client_count != 1, 540 WARN_ONCE(chan->client_count != 1,
538 "chan reference count %d != 1\n", chan->client_count); 541 "chan reference count %d != 1\n", chan->client_count);
539 dma_chan_put(chan); 542 dma_chan_put(chan);
543 /* drop PRIVATE cap enabled by __dma_request_channel() */
544 if (--chan->device->privatecnt == 0)
545 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
540 chan->private = NULL; 546 chan->private = NULL;
541 mutex_unlock(&dma_list_mutex); 547 mutex_unlock(&dma_list_mutex);
542} 548}
@@ -602,6 +608,24 @@ void dmaengine_put(void)
602} 608}
603EXPORT_SYMBOL(dmaengine_put); 609EXPORT_SYMBOL(dmaengine_put);
604 610
611static int get_dma_id(struct dma_device *device)
612{
613 int rc;
614
615 idr_retry:
616 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
617 return -ENOMEM;
618 mutex_lock(&dma_list_mutex);
619 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
620 mutex_unlock(&dma_list_mutex);
621 if (rc == -EAGAIN)
622 goto idr_retry;
623 else if (rc != 0)
624 return rc;
625
626 return 0;
627}
628
605/** 629/**
606 * dma_async_device_register - registers DMA devices found 630 * dma_async_device_register - registers DMA devices found
607 * @device: &dma_device 631 * @device: &dma_device
@@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device)
640 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 664 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
641 if (!idr_ref) 665 if (!idr_ref)
642 return -ENOMEM; 666 return -ENOMEM;
643 atomic_set(idr_ref, 0); 667 rc = get_dma_id(device);
644 idr_retry: 668 if (rc != 0) {
645 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 669 kfree(idr_ref);
646 return -ENOMEM;
647 mutex_lock(&dma_list_mutex);
648 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
649 mutex_unlock(&dma_list_mutex);
650 if (rc == -EAGAIN)
651 goto idr_retry;
652 else if (rc != 0)
653 return rc; 670 return rc;
671 }
672
673 atomic_set(idr_ref, 0);
654 674
655 /* represent channels in sysfs. Probably want devs too */ 675 /* represent channels in sysfs. Probably want devs too */
656 list_for_each_entry(chan, &device->channels, device_node) { 676 list_for_each_entry(chan, &device->channels, device_node) {
677 rc = -ENOMEM;
657 chan->local = alloc_percpu(typeof(*chan->local)); 678 chan->local = alloc_percpu(typeof(*chan->local));
658 if (chan->local == NULL) 679 if (chan->local == NULL)
659 continue; 680 goto err_out;
660 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 681 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
661 if (chan->dev == NULL) { 682 if (chan->dev == NULL) {
662 free_percpu(chan->local); 683 free_percpu(chan->local);
663 continue; 684 chan->local = NULL;
685 goto err_out;
664 } 686 }
665 687
666 chan->chan_id = chancnt++; 688 chan->chan_id = chancnt++;
@@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device)
677 if (rc) { 699 if (rc) {
678 free_percpu(chan->local); 700 free_percpu(chan->local);
679 chan->local = NULL; 701 chan->local = NULL;
702 kfree(chan->dev);
703 atomic_dec(idr_ref);
680 goto err_out; 704 goto err_out;
681 } 705 }
682 chan->client_count = 0; 706 chan->client_count = 0;
@@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device)
701 } 725 }
702 } 726 }
703 list_add_tail_rcu(&device->global_node, &dma_device_list); 727 list_add_tail_rcu(&device->global_node, &dma_device_list);
728 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
729 device->privatecnt++; /* Always private */
704 dma_channel_rebalance(); 730 dma_channel_rebalance();
705 mutex_unlock(&dma_list_mutex); 731 mutex_unlock(&dma_list_mutex);
706 732
707 return 0; 733 return 0;
708 734
709err_out: 735err_out:
736 /* if we never registered a channel just release the idr */
737 if (atomic_read(idr_ref) == 0) {
738 mutex_lock(&dma_list_mutex);
739 idr_remove(&dma_idr, device->dev_id);
740 mutex_unlock(&dma_list_mutex);
741 kfree(idr_ref);
742 return rc;
743 }
744
710 list_for_each_entry(chan, &device->channels, device_node) { 745 list_for_each_entry(chan, &device->channels, device_node) {
711 if (chan->local == NULL) 746 if (chan->local == NULL)
712 continue; 747 continue;
@@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
893{ 928{
894 tx->chan = chan; 929 tx->chan = chan;
895 spin_lock_init(&tx->lock); 930 spin_lock_init(&tx->lock);
931 INIT_LIST_HEAD(&tx->tx_list);
896} 932}
897EXPORT_SYMBOL(dma_async_tx_descriptor_init); 933EXPORT_SYMBOL(dma_async_tx_descriptor_init);
898 934
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e190d8b30700..a27c0fb1bc11 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(max_channels, 38MODULE_PARM_DESC(max_channels,
39 "Maximum number of channels to use (default: all)"); 39 "Maximum number of channels to use (default: all)");
40 40
41static unsigned int xor_sources = 3;
42module_param(xor_sources, uint, S_IRUGO);
43MODULE_PARM_DESC(xor_sources,
44 "Number of xor source buffers (default: 3)");
45
41/* 46/*
42 * Initialization patterns. All bytes in the source buffer has bit 7 47 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared. 48 * set, all bytes in the destination buffer has bit 7 cleared.
@@ -59,8 +64,9 @@ struct dmatest_thread {
59 struct list_head node; 64 struct list_head node;
60 struct task_struct *task; 65 struct task_struct *task;
61 struct dma_chan *chan; 66 struct dma_chan *chan;
62 u8 *srcbuf; 67 u8 **srcs;
63 u8 *dstbuf; 68 u8 **dsts;
69 enum dma_transaction_type type;
64}; 70};
65 71
66struct dmatest_chan { 72struct dmatest_chan {
@@ -98,30 +104,37 @@ static unsigned long dmatest_random(void)
98 return buf; 104 return buf;
99} 105}
100 106
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) 107static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
102{ 108{
103 unsigned int i; 109 unsigned int i;
104 110 u8 *buf;
105 for (i = 0; i < start; i++) 111
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 112 for (; (buf = *bufs); bufs++) {
107 for ( ; i < start + len; i++) 113 for (i = 0; i < start; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY 114 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
109 | (~i & PATTERN_COUNT_MASK);; 115 for ( ; i < start + len; i++)
110 for ( ; i < test_buf_size; i++) 116 buf[i] = PATTERN_SRC | PATTERN_COPY
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 117 | (~i & PATTERN_COUNT_MASK);;
118 for ( ; i < test_buf_size; i++)
119 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
120 buf++;
121 }
112} 122}
113 123
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) 124static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
115{ 125{
116 unsigned int i; 126 unsigned int i;
117 127 u8 *buf;
118 for (i = 0; i < start; i++) 128
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 129 for (; (buf = *bufs); bufs++) {
120 for ( ; i < start + len; i++) 130 for (i = 0; i < start; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 131 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
122 | (~i & PATTERN_COUNT_MASK); 132 for ( ; i < start + len; i++)
123 for ( ; i < test_buf_size; i++) 133 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 134 | (~i & PATTERN_COUNT_MASK);
135 for ( ; i < test_buf_size; i++)
136 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
137 }
125} 138}
126 139
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, 140static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
@@ -150,23 +163,30 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
150 thread_name, index, expected, actual); 163 thread_name, index, expected, actual);
151} 164}
152 165
153static unsigned int dmatest_verify(u8 *buf, unsigned int start, 166static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern, 167 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf) 168 bool is_srcbuf)
156{ 169{
157 unsigned int i; 170 unsigned int i;
158 unsigned int error_count = 0; 171 unsigned int error_count = 0;
159 u8 actual; 172 u8 actual;
160 173 u8 expected;
161 for (i = start; i < end; i++) { 174 u8 *buf;
162 actual = buf[i]; 175 unsigned int counter_orig = counter;
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { 176
164 if (error_count < 32) 177 for (; (buf = *bufs); bufs++) {
165 dmatest_mismatch(actual, pattern, i, counter, 178 counter = counter_orig;
166 is_srcbuf); 179 for (i = start; i < end; i++) {
167 error_count++; 180 actual = buf[i];
181 expected = pattern | (~counter & PATTERN_COUNT_MASK);
182 if (actual != expected) {
183 if (error_count < 32)
184 dmatest_mismatch(actual, pattern, i,
185 counter, is_srcbuf);
186 error_count++;
187 }
188 counter++;
168 } 189 }
169 counter++;
170 } 190 }
171 191
172 if (error_count > 32) 192 if (error_count > 32)
@@ -176,12 +196,17 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start,
176 return error_count; 196 return error_count;
177} 197}
178 198
199static void dmatest_callback(void *completion)
200{
201 complete(completion);
202}
203
179/* 204/*
180 * This function repeatedly tests DMA transfers of various lengths and 205 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be 206 * offsets for a given operation type until it is told to exit by
182 * multiple threads running this function in parallel for a single 207 * kthread_stop(). There may be multiple threads running this function
183 * channel, and there may be multiple channels being tested in 208 * in parallel for a single channel, and there may be multiple channels
184 * parallel. 209 * being tested in parallel.
185 * 210 *
186 * Before each test, the source and destination buffer is initialized 211 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on 212 * with a known pattern. This pattern is different depending on
@@ -201,25 +226,57 @@ static int dmatest_func(void *data)
201 unsigned int total_tests = 0; 226 unsigned int total_tests = 0;
202 dma_cookie_t cookie; 227 dma_cookie_t cookie;
203 enum dma_status status; 228 enum dma_status status;
229 enum dma_ctrl_flags flags;
204 int ret; 230 int ret;
231 int src_cnt;
232 int dst_cnt;
233 int i;
205 234
206 thread_name = current->comm; 235 thread_name = current->comm;
207 236
208 ret = -ENOMEM; 237 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215 238
216 smp_rmb(); 239 smp_rmb();
217 chan = thread->chan; 240 chan = thread->chan;
241 if (thread->type == DMA_MEMCPY)
242 src_cnt = dst_cnt = 1;
243 else if (thread->type == DMA_XOR) {
244 src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
245 dst_cnt = 1;
246 } else
247 goto err_srcs;
248
249 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
250 if (!thread->srcs)
251 goto err_srcs;
252 for (i = 0; i < src_cnt; i++) {
253 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
254 if (!thread->srcs[i])
255 goto err_srcbuf;
256 }
257 thread->srcs[i] = NULL;
258
259 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
260 if (!thread->dsts)
261 goto err_dsts;
262 for (i = 0; i < dst_cnt; i++) {
263 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
264 if (!thread->dsts[i])
265 goto err_dstbuf;
266 }
267 thread->dsts[i] = NULL;
268
269 set_user_nice(current, 10);
270
271 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
218 272
219 while (!kthread_should_stop()) { 273 while (!kthread_should_stop()) {
220 struct dma_device *dev = chan->device; 274 struct dma_device *dev = chan->device;
221 struct dma_async_tx_descriptor *tx; 275 struct dma_async_tx_descriptor *tx = NULL;
222 dma_addr_t dma_src, dma_dest; 276 dma_addr_t dma_srcs[src_cnt];
277 dma_addr_t dma_dsts[dst_cnt];
278 struct completion cmp;
279 unsigned long tmo = msecs_to_jiffies(3000);
223 280
224 total_tests++; 281 total_tests++;
225 282
@@ -227,22 +284,41 @@ static int dmatest_func(void *data)
227 src_off = dmatest_random() % (test_buf_size - len + 1); 284 src_off = dmatest_random() % (test_buf_size - len + 1);
228 dst_off = dmatest_random() % (test_buf_size - len + 1); 285 dst_off = dmatest_random() % (test_buf_size - len + 1);
229 286
230 dmatest_init_srcbuf(thread->srcbuf, src_off, len); 287 dmatest_init_srcs(thread->srcs, src_off, len);
231 dmatest_init_dstbuf(thread->dstbuf, dst_off, len); 288 dmatest_init_dsts(thread->dsts, dst_off, len);
232 289
233 dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off, 290 for (i = 0; i < src_cnt; i++) {
234 len, DMA_TO_DEVICE); 291 u8 *buf = thread->srcs[i] + src_off;
292
293 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
294 DMA_TO_DEVICE);
295 }
235 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 296 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
236 dma_dest = dma_map_single(dev->dev, thread->dstbuf, 297 for (i = 0; i < dst_cnt; i++) {
237 test_buf_size, DMA_BIDIRECTIONAL); 298 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
299 test_buf_size,
300 DMA_BIDIRECTIONAL);
301 }
302
303 if (thread->type == DMA_MEMCPY)
304 tx = dev->device_prep_dma_memcpy(chan,
305 dma_dsts[0] + dst_off,
306 dma_srcs[0], len,
307 flags);
308 else if (thread->type == DMA_XOR)
309 tx = dev->device_prep_dma_xor(chan,
310 dma_dsts[0] + dst_off,
311 dma_srcs, xor_sources,
312 len, flags);
238 313
239 tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
240 dma_src, len,
241 DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
242 if (!tx) { 314 if (!tx) {
243 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 315 for (i = 0; i < src_cnt; i++)
244 dma_unmap_single(dev->dev, dma_dest, 316 dma_unmap_single(dev->dev, dma_srcs[i], len,
245 test_buf_size, DMA_BIDIRECTIONAL); 317 DMA_TO_DEVICE);
318 for (i = 0; i < dst_cnt; i++)
319 dma_unmap_single(dev->dev, dma_dsts[i],
320 test_buf_size,
321 DMA_BIDIRECTIONAL);
246 pr_warning("%s: #%u: prep error with src_off=0x%x " 322 pr_warning("%s: #%u: prep error with src_off=0x%x "
247 "dst_off=0x%x len=0x%x\n", 323 "dst_off=0x%x len=0x%x\n",
248 thread_name, total_tests - 1, 324 thread_name, total_tests - 1,
@@ -251,7 +327,10 @@ static int dmatest_func(void *data)
251 failed_tests++; 327 failed_tests++;
252 continue; 328 continue;
253 } 329 }
254 tx->callback = NULL; 330
331 init_completion(&cmp);
332 tx->callback = dmatest_callback;
333 tx->callback_param = &cmp;
255 cookie = tx->tx_submit(tx); 334 cookie = tx->tx_submit(tx);
256 335
257 if (dma_submit_error(cookie)) { 336 if (dma_submit_error(cookie)) {
@@ -263,44 +342,50 @@ static int dmatest_func(void *data)
263 failed_tests++; 342 failed_tests++;
264 continue; 343 continue;
265 } 344 }
266 dma_async_memcpy_issue_pending(chan); 345 dma_async_issue_pending(chan);
267 346
268 do { 347 tmo = wait_for_completion_timeout(&cmp, tmo);
269 msleep(1); 348 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
270 status = dma_async_memcpy_complete(
271 chan, cookie, NULL, NULL);
272 } while (status == DMA_IN_PROGRESS);
273 349
274 if (status == DMA_ERROR) { 350 if (tmo == 0) {
275 pr_warning("%s: #%u: error during copy\n", 351 pr_warning("%s: #%u: test timed out\n",
276 thread_name, total_tests - 1); 352 thread_name, total_tests - 1);
353 failed_tests++;
354 continue;
355 } else if (status != DMA_SUCCESS) {
356 pr_warning("%s: #%u: got completion callback,"
357 " but status is \'%s\'\n",
358 thread_name, total_tests - 1,
359 status == DMA_ERROR ? "error" : "in progress");
277 failed_tests++; 360 failed_tests++;
278 continue; 361 continue;
279 } 362 }
363
280 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 364 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
281 dma_unmap_single(dev->dev, dma_dest, 365 for (i = 0; i < dst_cnt; i++)
282 test_buf_size, DMA_BIDIRECTIONAL); 366 dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
367 DMA_BIDIRECTIONAL);
283 368
284 error_count = 0; 369 error_count = 0;
285 370
286 pr_debug("%s: verifying source buffer...\n", thread_name); 371 pr_debug("%s: verifying source buffer...\n", thread_name);
287 error_count += dmatest_verify(thread->srcbuf, 0, src_off, 372 error_count += dmatest_verify(thread->srcs, 0, src_off,
288 0, PATTERN_SRC, true); 373 0, PATTERN_SRC, true);
289 error_count += dmatest_verify(thread->srcbuf, src_off, 374 error_count += dmatest_verify(thread->srcs, src_off,
290 src_off + len, src_off, 375 src_off + len, src_off,
291 PATTERN_SRC | PATTERN_COPY, true); 376 PATTERN_SRC | PATTERN_COPY, true);
292 error_count += dmatest_verify(thread->srcbuf, src_off + len, 377 error_count += dmatest_verify(thread->srcs, src_off + len,
293 test_buf_size, src_off + len, 378 test_buf_size, src_off + len,
294 PATTERN_SRC, true); 379 PATTERN_SRC, true);
295 380
296 pr_debug("%s: verifying dest buffer...\n", 381 pr_debug("%s: verifying dest buffer...\n",
297 thread->task->comm); 382 thread->task->comm);
298 error_count += dmatest_verify(thread->dstbuf, 0, dst_off, 383 error_count += dmatest_verify(thread->dsts, 0, dst_off,
299 0, PATTERN_DST, false); 384 0, PATTERN_DST, false);
300 error_count += dmatest_verify(thread->dstbuf, dst_off, 385 error_count += dmatest_verify(thread->dsts, dst_off,
301 dst_off + len, src_off, 386 dst_off + len, src_off,
302 PATTERN_SRC | PATTERN_COPY, false); 387 PATTERN_SRC | PATTERN_COPY, false);
303 error_count += dmatest_verify(thread->dstbuf, dst_off + len, 388 error_count += dmatest_verify(thread->dsts, dst_off + len,
304 test_buf_size, dst_off + len, 389 test_buf_size, dst_off + len,
305 PATTERN_DST, false); 390 PATTERN_DST, false);
306 391
@@ -319,10 +404,16 @@ static int dmatest_func(void *data)
319 } 404 }
320 405
321 ret = 0; 406 ret = 0;
322 kfree(thread->dstbuf); 407 for (i = 0; thread->dsts[i]; i++)
408 kfree(thread->dsts[i]);
323err_dstbuf: 409err_dstbuf:
324 kfree(thread->srcbuf); 410 kfree(thread->dsts);
411err_dsts:
412 for (i = 0; thread->srcs[i]; i++)
413 kfree(thread->srcs[i]);
325err_srcbuf: 414err_srcbuf:
415 kfree(thread->srcs);
416err_srcs:
326 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 417 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
327 thread_name, total_tests, failed_tests, ret); 418 thread_name, total_tests, failed_tests, ret);
328 return ret; 419 return ret;
@@ -344,35 +435,36 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
344 kfree(dtc); 435 kfree(dtc);
345} 436}
346 437
347static int dmatest_add_channel(struct dma_chan *chan) 438static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
348{ 439{
349 struct dmatest_chan *dtc; 440 struct dmatest_thread *thread;
350 struct dmatest_thread *thread; 441 struct dma_chan *chan = dtc->chan;
351 unsigned int i; 442 char *op;
352 443 unsigned int i;
353 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
354 if (!dtc) {
355 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
356 return -ENOMEM;
357 }
358 444
359 dtc->chan = chan; 445 if (type == DMA_MEMCPY)
360 INIT_LIST_HEAD(&dtc->threads); 446 op = "copy";
447 else if (type == DMA_XOR)
448 op = "xor";
449 else
450 return -EINVAL;
361 451
362 for (i = 0; i < threads_per_chan; i++) { 452 for (i = 0; i < threads_per_chan; i++) {
363 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 453 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
364 if (!thread) { 454 if (!thread) {
365 pr_warning("dmatest: No memory for %s-test%u\n", 455 pr_warning("dmatest: No memory for %s-%s%u\n",
366 dma_chan_name(chan), i); 456 dma_chan_name(chan), op, i);
457
367 break; 458 break;
368 } 459 }
369 thread->chan = dtc->chan; 460 thread->chan = dtc->chan;
461 thread->type = type;
370 smp_wmb(); 462 smp_wmb();
371 thread->task = kthread_run(dmatest_func, thread, "%s-test%u", 463 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
372 dma_chan_name(chan), i); 464 dma_chan_name(chan), op, i);
373 if (IS_ERR(thread->task)) { 465 if (IS_ERR(thread->task)) {
374 pr_warning("dmatest: Failed to run thread %s-test%u\n", 466 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
375 dma_chan_name(chan), i); 467 dma_chan_name(chan), op, i);
376 kfree(thread); 468 kfree(thread);
377 break; 469 break;
378 } 470 }
@@ -382,7 +474,36 @@ static int dmatest_add_channel(struct dma_chan *chan)
382 list_add_tail(&thread->node, &dtc->threads); 474 list_add_tail(&thread->node, &dtc->threads);
383 } 475 }
384 476
385 pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); 477 return i;
478}
479
480static int dmatest_add_channel(struct dma_chan *chan)
481{
482 struct dmatest_chan *dtc;
483 struct dma_device *dma_dev = chan->device;
484 unsigned int thread_count = 0;
485 unsigned int cnt;
486
487 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
488 if (!dtc) {
489 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
490 return -ENOMEM;
491 }
492
493 dtc->chan = chan;
494 INIT_LIST_HEAD(&dtc->threads);
495
496 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
497 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
498 thread_count += cnt > 0 ?: 0;
499 }
500 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
501 cnt = dmatest_add_threads(dtc, DMA_XOR);
502 thread_count += cnt > 0 ?: 0;
503 }
504
505 pr_info("dmatest: Started %u threads using %s\n",
506 thread_count, dma_chan_name(chan));
386 507
387 list_add_tail(&dtc->node, &dmatest_channels); 508 list_add_tail(&dtc->node, &dmatest_channels);
388 nr_channels++; 509 nr_channels++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a97c07eef7ec..0b8aada08aa8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
363 dwc_descriptor_complete(dwc, bad_desc); 363 dwc_descriptor_complete(dwc, bad_desc);
364} 364}
365 365
366/* --------------------- Cyclic DMA API extensions -------------------- */
367
368inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
369{
370 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
371 return channel_readl(dwc, SAR);
372}
373EXPORT_SYMBOL(dw_dma_get_src_addr);
374
375inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
376{
377 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
378 return channel_readl(dwc, DAR);
379}
380EXPORT_SYMBOL(dw_dma_get_dst_addr);
381
382/* called with dwc->lock held and all DMAC interrupts disabled */
383static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
384 u32 status_block, u32 status_err, u32 status_xfer)
385{
386 if (status_block & dwc->mask) {
387 void (*callback)(void *param);
388 void *callback_param;
389
390 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
391 channel_readl(dwc, LLP));
392 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
393
394 callback = dwc->cdesc->period_callback;
395 callback_param = dwc->cdesc->period_callback_param;
396 if (callback) {
397 spin_unlock(&dwc->lock);
398 callback(callback_param);
399 spin_lock(&dwc->lock);
400 }
401 }
402
403 /*
404 * Error and transfer complete are highly unlikely, and will most
405 * likely be due to a configuration error by the user.
406 */
407 if (unlikely(status_err & dwc->mask) ||
408 unlikely(status_xfer & dwc->mask)) {
409 int i;
410
411 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
412 "interrupt, stopping DMA transfer\n",
413 status_xfer ? "xfer" : "error");
414 dev_err(chan2dev(&dwc->chan),
415 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
416 channel_readl(dwc, SAR),
417 channel_readl(dwc, DAR),
418 channel_readl(dwc, LLP),
419 channel_readl(dwc, CTL_HI),
420 channel_readl(dwc, CTL_LO));
421
422 channel_clear_bit(dw, CH_EN, dwc->mask);
423 while (dma_readl(dw, CH_EN) & dwc->mask)
424 cpu_relax();
425
426 /* make sure DMA does not restart by loading a new list */
427 channel_writel(dwc, LLP, 0);
428 channel_writel(dwc, CTL_LO, 0);
429 channel_writel(dwc, CTL_HI, 0);
430
431 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
432 dma_writel(dw, CLEAR.ERROR, dwc->mask);
433 dma_writel(dw, CLEAR.XFER, dwc->mask);
434
435 for (i = 0; i < dwc->cdesc->periods; i++)
436 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
437 }
438}
439
440/* ------------------------------------------------------------------------- */
441
366static void dw_dma_tasklet(unsigned long data) 442static void dw_dma_tasklet(unsigned long data)
367{ 443{
368 struct dw_dma *dw = (struct dw_dma *)data; 444 struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
382 for (i = 0; i < dw->dma.chancnt; i++) { 458 for (i = 0; i < dw->dma.chancnt; i++) {
383 dwc = &dw->chan[i]; 459 dwc = &dw->chan[i];
384 spin_lock(&dwc->lock); 460 spin_lock(&dwc->lock);
385 if (status_err & (1 << i)) 461 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
462 dwc_handle_cyclic(dw, dwc, status_block, status_err,
463 status_xfer);
464 else if (status_err & (1 << i))
386 dwc_handle_error(dw, dwc); 465 dwc_handle_error(dw, dwc);
387 else if ((status_block | status_xfer) & (1 << i)) 466 else if ((status_block | status_xfer) & (1 << i))
388 dwc_scan_descriptors(dw, dwc); 467 dwc_scan_descriptors(dw, dwc);
@@ -826,7 +905,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
826 dma_async_tx_descriptor_init(&desc->txd, chan); 905 dma_async_tx_descriptor_init(&desc->txd, chan);
827 desc->txd.tx_submit = dwc_tx_submit; 906 desc->txd.tx_submit = dwc_tx_submit;
828 desc->txd.flags = DMA_CTRL_ACK; 907 desc->txd.flags = DMA_CTRL_ACK;
829 INIT_LIST_HEAD(&desc->txd.tx_list);
830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, 908 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
831 sizeof(desc->lli), DMA_TO_DEVICE); 909 sizeof(desc->lli), DMA_TO_DEVICE);
832 dwc_desc_put(dwc, desc); 910 dwc_desc_put(dwc, desc);
@@ -884,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
884 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 962 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
885} 963}
886 964
965/* --------------------- Cyclic DMA API extensions -------------------- */
966
967/**
968 * dw_dma_cyclic_start - start the cyclic DMA transfer
969 * @chan: the DMA channel to start
970 *
971 * Must be called with soft interrupts disabled. Returns zero on success or
972 * -errno on failure.
973 */
974int dw_dma_cyclic_start(struct dma_chan *chan)
975{
976 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
977 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
978
979 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
980 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
981 return -ENODEV;
982 }
983
984 spin_lock(&dwc->lock);
985
986 /* assert channel is idle */
987 if (dma_readl(dw, CH_EN) & dwc->mask) {
988 dev_err(chan2dev(&dwc->chan),
989 "BUG: Attempted to start non-idle channel\n");
990 dev_err(chan2dev(&dwc->chan),
991 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
992 channel_readl(dwc, SAR),
993 channel_readl(dwc, DAR),
994 channel_readl(dwc, LLP),
995 channel_readl(dwc, CTL_HI),
996 channel_readl(dwc, CTL_LO));
997 spin_unlock(&dwc->lock);
998 return -EBUSY;
999 }
1000
1001 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1002 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1003 dma_writel(dw, CLEAR.XFER, dwc->mask);
1004
1005 /* setup DMAC channel registers */
1006 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1007 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1008 channel_writel(dwc, CTL_HI, 0);
1009
1010 channel_set_bit(dw, CH_EN, dwc->mask);
1011
1012 spin_unlock(&dwc->lock);
1013
1014 return 0;
1015}
1016EXPORT_SYMBOL(dw_dma_cyclic_start);
1017
1018/**
1019 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1020 * @chan: the DMA channel to stop
1021 *
1022 * Must be called with soft interrupts disabled.
1023 */
1024void dw_dma_cyclic_stop(struct dma_chan *chan)
1025{
1026 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1027 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1028
1029 spin_lock(&dwc->lock);
1030
1031 channel_clear_bit(dw, CH_EN, dwc->mask);
1032 while (dma_readl(dw, CH_EN) & dwc->mask)
1033 cpu_relax();
1034
1035 spin_unlock(&dwc->lock);
1036}
1037EXPORT_SYMBOL(dw_dma_cyclic_stop);
1038
1039/**
1040 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1041 * @chan: the DMA channel to prepare
1042 * @buf_addr: physical DMA address where the buffer starts
1043 * @buf_len: total number of bytes for the entire buffer
1044 * @period_len: number of bytes for each period
1045 * @direction: transfer direction, to or from device
1046 *
1047 * Must be called before trying to start the transfer. Returns a valid struct
1048 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1049 */
1050struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1051 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1052 enum dma_data_direction direction)
1053{
1054 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1055 struct dw_cyclic_desc *cdesc;
1056 struct dw_cyclic_desc *retval = NULL;
1057 struct dw_desc *desc;
1058 struct dw_desc *last = NULL;
1059 struct dw_dma_slave *dws = chan->private;
1060 unsigned long was_cyclic;
1061 unsigned int reg_width;
1062 unsigned int periods;
1063 unsigned int i;
1064
1065 spin_lock_bh(&dwc->lock);
1066 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1067 spin_unlock_bh(&dwc->lock);
1068 dev_dbg(chan2dev(&dwc->chan),
1069 "queue and/or active list are not empty\n");
1070 return ERR_PTR(-EBUSY);
1071 }
1072
1073 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1074 spin_unlock_bh(&dwc->lock);
1075 if (was_cyclic) {
1076 dev_dbg(chan2dev(&dwc->chan),
1077 "channel already prepared for cyclic DMA\n");
1078 return ERR_PTR(-EBUSY);
1079 }
1080
1081 retval = ERR_PTR(-EINVAL);
1082 reg_width = dws->reg_width;
1083 periods = buf_len / period_len;
1084
1085 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1086 if (period_len > (DWC_MAX_COUNT << reg_width))
1087 goto out_err;
1088 if (unlikely(period_len & ((1 << reg_width) - 1)))
1089 goto out_err;
1090 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1091 goto out_err;
1092 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
1093 goto out_err;
1094
1095 retval = ERR_PTR(-ENOMEM);
1096
1097 if (periods > NR_DESCS_PER_CHANNEL)
1098 goto out_err;
1099
1100 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1101 if (!cdesc)
1102 goto out_err;
1103
1104 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1105 if (!cdesc->desc)
1106 goto out_err_alloc;
1107
1108 for (i = 0; i < periods; i++) {
1109 desc = dwc_desc_get(dwc);
1110 if (!desc)
1111 goto out_err_desc_get;
1112
1113 switch (direction) {
1114 case DMA_TO_DEVICE:
1115 desc->lli.dar = dws->tx_reg;
1116 desc->lli.sar = buf_addr + (period_len * i);
1117 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1118 | DWC_CTLL_DST_WIDTH(reg_width)
1119 | DWC_CTLL_SRC_WIDTH(reg_width)
1120 | DWC_CTLL_DST_FIX
1121 | DWC_CTLL_SRC_INC
1122 | DWC_CTLL_FC_M2P
1123 | DWC_CTLL_INT_EN);
1124 break;
1125 case DMA_FROM_DEVICE:
1126 desc->lli.dar = buf_addr + (period_len * i);
1127 desc->lli.sar = dws->rx_reg;
1128 desc->lli.ctllo = (DWC_DEFAULT_CTLLO
1129 | DWC_CTLL_SRC_WIDTH(reg_width)
1130 | DWC_CTLL_DST_WIDTH(reg_width)
1131 | DWC_CTLL_DST_INC
1132 | DWC_CTLL_SRC_FIX
1133 | DWC_CTLL_FC_P2M
1134 | DWC_CTLL_INT_EN);
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 desc->lli.ctlhi = (period_len >> reg_width);
1141 cdesc->desc[i] = desc;
1142
1143 if (last) {
1144 last->lli.llp = desc->txd.phys;
1145 dma_sync_single_for_device(chan2parent(chan),
1146 last->txd.phys, sizeof(last->lli),
1147 DMA_TO_DEVICE);
1148 }
1149
1150 last = desc;
1151 }
1152
1153 /* lets make a cyclic list */
1154 last->lli.llp = cdesc->desc[0]->txd.phys;
1155 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1156 sizeof(last->lli), DMA_TO_DEVICE);
1157
1158 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1159 "period %zu periods %d\n", buf_addr, buf_len,
1160 period_len, periods);
1161
1162 cdesc->periods = periods;
1163 dwc->cdesc = cdesc;
1164
1165 return cdesc;
1166
1167out_err_desc_get:
1168 while (i--)
1169 dwc_desc_put(dwc, cdesc->desc[i]);
1170out_err_alloc:
1171 kfree(cdesc);
1172out_err:
1173 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1174 return (struct dw_cyclic_desc *)retval;
1175}
1176EXPORT_SYMBOL(dw_dma_cyclic_prep);
1177
1178/**
1179 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1180 * @chan: the DMA channel to free
1181 */
1182void dw_dma_cyclic_free(struct dma_chan *chan)
1183{
1184 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1185 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1186 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1187 int i;
1188
1189 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1190
1191 if (!cdesc)
1192 return;
1193
1194 spin_lock_bh(&dwc->lock);
1195
1196 channel_clear_bit(dw, CH_EN, dwc->mask);
1197 while (dma_readl(dw, CH_EN) & dwc->mask)
1198 cpu_relax();
1199
1200 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1201 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1202 dma_writel(dw, CLEAR.XFER, dwc->mask);
1203
1204 spin_unlock_bh(&dwc->lock);
1205
1206 for (i = 0; i < cdesc->periods; i++)
1207 dwc_desc_put(dwc, cdesc->desc[i]);
1208
1209 kfree(cdesc->desc);
1210 kfree(cdesc);
1211
1212 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1213}
1214EXPORT_SYMBOL(dw_dma_cyclic_free);
1215
887/*----------------------------------------------------------------------*/ 1216/*----------------------------------------------------------------------*/
888 1217
889static void dw_dma_off(struct dw_dma *dw) 1218static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b202c5cf..13a580767031 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {
126 126
127#define DW_REGLEN 0x400 127#define DW_REGLEN 0x400
128 128
129enum dw_dmac_flags {
130 DW_DMA_IS_CYCLIC = 0,
131};
132
129struct dw_dma_chan { 133struct dw_dma_chan {
130 struct dma_chan chan; 134 struct dma_chan chan;
131 void __iomem *ch_regs; 135 void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
134 spinlock_t lock; 138 spinlock_t lock;
135 139
136 /* these other elements are all protected by lock */ 140 /* these other elements are all protected by lock */
141 unsigned long flags;
137 dma_cookie_t completed; 142 dma_cookie_t completed;
138 struct list_head active_list; 143 struct list_head active_list;
139 struct list_head queue; 144 struct list_head queue;
140 struct list_head free_list; 145 struct list_head free_list;
146 struct dw_cyclic_desc *cdesc;
141 147
142 unsigned int descs_allocated; 148 unsigned int descs_allocated;
143}; 149};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
158 return container_of(chan, struct dw_dma_chan, chan); 164 return container_of(chan, struct dw_dma_chan, chan);
159} 165}
160 166
161
162struct dw_dma { 167struct dw_dma {
163 struct dma_device dma; 168 struct dma_device dma;
164 void __iomem *regs; 169 void __iomem *regs;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 86d6da47f558..da8a8ed9e411 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
354 dma_async_tx_descriptor_init(&desc_sw->async_tx, 354 dma_async_tx_descriptor_init(&desc_sw->async_tx,
355 &fsl_chan->common); 355 &fsl_chan->common);
356 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 356 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
357 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
358 desc_sw->async_tx.phys = pdesc; 357 desc_sw->async_tx.phys = pdesc;
359 } 358 }
360 359
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 5905cd36bcd2..e4fc33c1c32f 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
693 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 693 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
694 break; 694 break;
695 } 695 }
696 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
697 696
698 desc_sw->hw = desc; 697 desc_sw->hw = desc;
699 desc_sw->async_tx.phys = phys; 698 desc_sw->async_tx.phys = phys;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 16adbe61cfb2..2f052265122f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
498 slot->async_tx.tx_submit = iop_adma_tx_submit; 498 slot->async_tx.tx_submit = iop_adma_tx_submit;
499 INIT_LIST_HEAD(&slot->chain_node); 499 INIT_LIST_HEAD(&slot->chain_node);
500 INIT_LIST_HEAD(&slot->slot_node); 500 INIT_LIST_HEAD(&slot->slot_node);
501 INIT_LIST_HEAD(&slot->async_tx.tx_list);
502 hw_desc = (char *) iop_chan->device->dma_desc_pool; 501 hw_desc = (char *) iop_chan->device->dma_desc_pool;
503 slot->async_tx.phys = 502 slot->async_tx.phys =
504 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 503 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index ae50a9d1a4e6..90773844cc89 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -28,6 +28,9 @@
28#define FS_VF_IN_VALID 0x00000002 28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001 29#define FS_ENC_IN_VALID 0x00000001
30 30
31static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
32 bool wait_for_stop);
33
31/* 34/*
32 * There can be only one, we could allocate it dynamically, but then we'd have 35 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as 36 * to add an extra parameter to some functions, and use something as ugly as
@@ -107,7 +110,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
107 } 110 }
108} 111}
109 112
110/* Enable / disable direct write to memory by the Camera Sensor Interface */ 113/* Enable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) 114static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{ 115{
113 uint32_t ic_conf, mask; 116 uint32_t ic_conf, mask;
@@ -126,6 +129,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
126 idmac_write_icreg(ipu, ic_conf, IC_CONF); 129 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127} 130}
128 131
132/* Called under spin_lock_irqsave(&ipu_data.lock) */
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) 133static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{ 134{
131 uint32_t ic_conf, mask; 135 uint32_t ic_conf, mask;
@@ -422,7 +426,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
422 break; 426 break;
423 default: 427 default:
424 dev_err(ipu_data.dev, 428 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); 429 "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break; 430 break;
427 } 431 }
428 432
@@ -433,20 +437,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels) 437 uint16_t burst_pixels)
434{ 438{
435 params->pp.npb = burst_pixels - 1; 439 params->pp.npb = burst_pixels - 1;
436}; 440}
437 441
438static void ipu_ch_param_set_buffer(union chan_param_mem *params, 442static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1) 443 dma_addr_t buf0, dma_addr_t buf1)
440{ 444{
441 params->pp.eba0 = buf0; 445 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1; 446 params->pp.eba1 = buf1;
443}; 447}
444 448
445static void ipu_ch_param_set_rotation(union chan_param_mem *params, 449static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate) 450 enum ipu_rotate_mode rotate)
447{ 451{
448 params->pp.bam = rotate; 452 params->pp.bam = rotate;
449}; 453}
450 454
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data, 455static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words) 456 uint32_t num_words)
@@ -571,7 +575,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch)
571{ 575{
572 /* Channel Parameter Memory */ 576 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4); 577 return 0x10000 | (dma_ch << 4);
574}; 578}
575 579
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, 580static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio) 581 bool prio)
@@ -611,7 +615,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
611 615
612/** 616/**
613 * ipu_enable_channel() - enable an IPU channel. 617 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID. 618 * @idmac: IPU DMAC context.
619 * @ichan: IDMAC channel.
615 * @return: 0 on success or negative error code on failure. 620 * @return: 0 on success or negative error code on failure.
616 */ 621 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) 622static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
@@ -649,7 +654,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
649 654
650/** 655/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. 656 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID. 657 * @ichan: IDMAC channel.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. 658 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels. 659 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels. 660 * @height: height of buffer in pixels.
@@ -687,7 +692,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
687 } 692 }
688 693
689 /* IC channel's stride must be a multiple of 8 pixels */ 694 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) { 695 if ((channel <= IDMAC_IC_13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); 696 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL; 697 return -EINVAL;
693 } 698 }
@@ -752,7 +757,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
752 757
753/** 758/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer. 759 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID. 760 * @ichan: IDMAC channel.
756 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
@@ -760,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
760 * function will fail if the buffer is set to ready. 765 * function will fail if the buffer is set to ready.
761 */ 766 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */ 767/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel, 768static int ipu_update_channel_buffer(struct idmac_channel *ichan,
764 int buffer_n, dma_addr_t phyaddr) 769 int buffer_n, dma_addr_t phyaddr)
765{ 770{
771 enum ipu_channel channel = ichan->dma_chan.chan_id;
766 uint32_t reg; 772 uint32_t reg;
767 unsigned long flags; 773 unsigned long flags;
768 774
@@ -771,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
771 if (buffer_n == 0) { 777 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); 778 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) { 779 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags); 780 ipu_ic_disable_task(&ipu_data, channel);
775 return -EACCES; 781 ichan->status = IPU_CHANNEL_READY;
776 } 782 }
777 783
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ 784 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
@@ -782,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
782 } else { 788 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); 789 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) { 790 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags); 791 ipu_ic_disable_task(&ipu_data, channel);
786 return -EACCES; 792 ichan->status = IPU_CHANNEL_READY;
787 } 793 }
788 794
789 /* Check if double-buffering is already enabled */ 795 /* Check if double-buffering is already enabled */
@@ -805,6 +811,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
805} 811}
806 812
807/* Called under spin_lock_irqsave(&ichan->lock) */ 813/* Called under spin_lock_irqsave(&ichan->lock) */
814static int ipu_submit_buffer(struct idmac_channel *ichan,
815 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
816{
817 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820
821 if (async_tx_test_ack(&desc->txd))
822 return -EINTR;
823
824 /*
825 * On first invocation this shouldn't be necessary, the call to
826 * ipu_init_channel_buffer() above will set addresses for us, so we
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either.
829 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx,
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838
839 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
841 sg, chan_id, buf_idx);
842
843 return 0;
844}
845
846/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan, 847static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc) 848 struct idmac_tx_desc *desc)
810{ 849{
@@ -815,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
815 if (!ichan->sg[i]) { 854 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg; 855 ichan->sg[i] = sg;
817 856
818 /* 857 ret = ipu_submit_buffer(ichan, desc, sg, i);
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0) 858 if (ret < 0)
828 return ret; 859 return ret;
829 860
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg); 861 sg = sg_next(sg);
833 } 862 }
834 } 863 }
@@ -842,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan); 871 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device); 872 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac); 873 struct ipu *ipu = to_ipu(idmac);
874 struct device *dev = &ichan->dma_chan.dev->device;
845 dma_cookie_t cookie; 875 dma_cookie_t cookie;
846 unsigned long flags; 876 unsigned long flags;
877 int ret;
847 878
848 /* Sanity check */ 879 /* Sanity check */
849 if (!list_empty(&desc->list)) { 880 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */ 881 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device, 882 dev_err(dev, "Descriptor %p not prepared!\n", tx);
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY; 883 return -EBUSY;
854 } 884 }
855 885
856 mutex_lock(&ichan->chan_mutex); 886 mutex_lock(&ichan->chan_mutex);
857 887
888 async_tx_clear_ack(tx);
889
858 if (ichan->status < IPU_CHANNEL_READY) { 890 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video; 891 struct idmac_video_param *video = &ichan->params.video;
860 /* 892 /*
@@ -878,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
878 goto out; 910 goto out;
879 } 911 }
880 912
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 913 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891 914
892 cookie = ichan->dma_chan.cookie; 915 cookie = ichan->dma_chan.cookie;
893 916
@@ -897,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
897 /* from dmaengine.h: "last cookie value returned to client" */ 920 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie; 921 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie; 922 tx->cookie = cookie;
923
924 /* ipu->lock can be taken under ichan->lock, but not v.v. */
900 spin_lock_irqsave(&ichan->lock, flags); 925 spin_lock_irqsave(&ichan->lock, flags);
926
901 list_add_tail(&desc->list, &ichan->queue); 927 list_add_tail(&desc->list, &ichan->queue);
928 /* submit_buffers() atomically verifies and fills empty sg slots */
929 ret = ipu_submit_channel_buffers(ichan, desc);
930
902 spin_unlock_irqrestore(&ichan->lock, flags); 931 spin_unlock_irqrestore(&ichan->lock, flags);
903 932
933 if (ret < 0) {
934 cookie = ret;
935 goto dequeue;
936 }
937
904 if (ichan->status < IPU_CHANNEL_ENABLED) { 938 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan); 939 ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) { 940 if (ret < 0) {
907 cookie = ret; 941 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags); 942 goto dequeue;
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 } 943 }
914 } 944 }
915 945
916 dump_idmac_reg(ipu); 946 dump_idmac_reg(ipu);
917 947
948dequeue:
949 if (cookie < 0) {
950 spin_lock_irqsave(&ichan->lock, flags);
951 list_del_init(&desc->list);
952 spin_unlock_irqrestore(&ichan->lock, flags);
953 tx->cookie = cookie;
954 ichan->dma_chan.cookie = cookie;
955 }
956
918out: 957out:
919 mutex_unlock(&ichan->chan_mutex); 958 mutex_unlock(&ichan->chan_mutex);
920 959
@@ -944,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
944 memset(txd, 0, sizeof(*txd)); 983 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan); 984 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit; 985 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949 986
950 list_add(&desc->list, &ichan->free_list); 987 list_add(&desc->list, &ichan->free_list);
951 988
@@ -1161,6 +1198,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1161 return 0; 1198 return 0;
1162} 1199}
1163 1200
1201static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
1202 struct idmac_tx_desc **desc, struct scatterlist *sg)
1203{
1204 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
1205
1206 if (sgnew)
1207 /* next sg-element in this list */
1208 return sgnew;
1209
1210 if ((*desc)->list.next == &ichan->queue)
1211 /* No more descriptors on the queue */
1212 return NULL;
1213
1214 /* Fetch next descriptor */
1215 *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
1216 return (*desc)->sg;
1217}
1218
1164/* 1219/*
1165 * We have several possibilities here: 1220 * We have several possibilities here:
1166 * current BUF next BUF 1221 * current BUF next BUF
@@ -1176,23 +1231,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id) 1231static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{ 1232{
1178 struct idmac_channel *ichan = dev_id; 1233 struct idmac_channel *ichan = dev_id;
1234 struct device *dev = &ichan->dma_chan.dev->device;
1179 unsigned int chan_id = ichan->dma_chan.chan_id; 1235 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL; 1236 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */ 1237 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew; 1238 struct idmac_tx_desc *desc, *descnew;
1183 dma_async_tx_callback callback; 1239 dma_async_tx_callback callback;
1184 void *callback_param; 1240 void *callback_param;
1185 bool done = false; 1241 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), 1242 u32 ready0, ready1, curbuf, err;
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), 1243 unsigned long flags;
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189 1244
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ 1245 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191 1246
1192 pr_debug("IDMAC irq %d\n", irq); 1247 dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
1248
1249 spin_lock_irqsave(&ipu_data.lock, flags);
1250
1251 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
1252 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
1253 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1254 err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
1255
1256 if (err & (1 << chan_id)) {
1257 idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
1258 spin_unlock_irqrestore(&ipu_data.lock, flags);
1259 /*
1260 * Doing this
1261 * ichan->sg[0] = ichan->sg[1] = NULL;
1262 * you can force channel re-enable on the next tx_submit(), but
1263 * this is dirty - think about descriptors with multiple
1264 * sg elements.
1265 */
1266 dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
1267 chan_id, ready0, ready1, curbuf);
1268 return IRQ_HANDLED;
1269 }
1270 spin_unlock_irqrestore(&ipu_data.lock, flags);
1271
1193 /* Other interrupts do not interfere with this channel */ 1272 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock); 1273 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && 1274 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { 1275 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100; 1276 int i = 100;
@@ -1207,19 +1285,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1207 1285
1208 if (!i) { 1286 if (!i) {
1209 spin_unlock(&ichan->lock); 1287 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev, 1288 dev_dbg(dev,
1211 "IRQ on active buffer on channel %x, active " 1289 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id, 1290 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf); 1291 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE; 1292 return IRQ_NONE;
1215 } 1293 } else
1294 dev_dbg(dev,
1295 "Buffer deactivated on channel %x, active "
1296 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1297 ichan->active_buffer, ready0, ready1, curbuf, i);
1216 } 1298 }
1217 1299
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1300 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1301 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) { 1302 )) {
1221 spin_unlock(&ichan->lock); 1303 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev, 1304 dev_dbg(dev,
1223 "IRQ with active buffer still ready on channel %x, " 1305 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id, 1306 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1); 1307 ichan->active_buffer, ready0, ready1);
@@ -1227,8 +1309,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1227 } 1309 }
1228 1310
1229 if (unlikely(list_empty(&ichan->queue))) { 1311 if (unlikely(list_empty(&ichan->queue))) {
1312 ichan->sg[ichan->active_buffer] = NULL;
1230 spin_unlock(&ichan->lock); 1313 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev, 1314 dev_err(dev,
1232 "IRQ without queued buffers on channel %x, active %d, " 1315 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id, 1316 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1); 1317 ichan->active_buffer, ready0, ready1);
@@ -1243,40 +1326,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1243 sg = &ichan->sg[ichan->active_buffer]; 1326 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer]; 1327 sgnext = ichan->sg[!ichan->active_buffer];
1245 1328
1329 if (!*sg) {
1330 spin_unlock(&ichan->lock);
1331 return IRQ_HANDLED;
1332 }
1333
1334 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1335 descnew = desc;
1336
1337 dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
1338 irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
1339
1340 /* Find the descriptor of sgnext */
1341 sgnew = idmac_sg_next(ichan, &descnew, *sg);
1342 if (sgnext != sgnew)
1343 dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
1344
1246 /* 1345 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and 1346 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty 1347 * queue must be empty
1249 */ 1348 */
1250 if (unlikely(!sgnext)) { 1349 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) { 1350 if (!WARN_ON(sg_next(*sg)))
1252 dev_err(ichan->dma_chan.device->dev, 1351 dev_dbg(dev, "Underrun on channel %x\n", chan_id);
1253 "Broken buffer-update locking on channel %x!\n", 1352 ichan->sg[!ichan->active_buffer] = sgnew;
1254 chan_id); 1353
1255 /* We'll let the user catch up */ 1354 if (unlikely(sgnew)) {
1355 ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
1256 } else { 1356 } else {
1257 /* Underrun */ 1357 spin_lock_irqsave(&ipu_data.lock, flags);
1258 ipu_ic_disable_task(&ipu_data, chan_id); 1358 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev, 1359 spin_unlock_irqrestore(&ipu_data.lock, flags);
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY; 1360 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */ 1361 /* Continue to check for complete descriptor */
1263 } 1362 }
1264 } 1363 }
1265 1364
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); 1365 /* Calculate and submit the next sg element */
1267 1366 sgnew = idmac_sg_next(ichan, &descnew, sgnew);
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280 1367
1281 if (unlikely(!sg_next(*sg)) || !sgnext) { 1368 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /* 1369 /*
@@ -1289,17 +1376,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1289 1376
1290 *sg = sgnew; 1377 *sg = sgnew;
1291 1378
1292 if (likely(sgnew)) { 1379 if (likely(sgnew) &&
1293 int ret; 1380 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1294 1381 callback = desc->txd.callback;
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, 1382 callback_param = desc->txd.callback_param;
1296 sg_dma_address(*sg)); 1383 spin_unlock(&ichan->lock);
1297 if (ret < 0) 1384 callback(callback_param);
1298 dev_err(ichan->dma_chan.device->dev, 1385 spin_lock(&ichan->lock);
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 } 1386 }
1304 1387
1305 /* Flip the active buffer - even if update above failed */ 1388 /* Flip the active buffer - even if update above failed */
@@ -1327,13 +1410,20 @@ static void ipu_gc_tasklet(unsigned long arg)
1327 struct idmac_channel *ichan = ipu->channel + i; 1410 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc; 1411 struct idmac_tx_desc *desc;
1329 unsigned long flags; 1412 unsigned long flags;
1330 int j; 1413 struct scatterlist *sg;
1414 int j, k;
1331 1415
1332 for (j = 0; j < ichan->n_tx_desc; j++) { 1416 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j; 1417 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags); 1418 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) { 1419 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list); 1420 list_move(&desc->list, &ichan->free_list);
1421 for_each_sg(desc->sg, sg, desc->sg_len, k) {
1422 if (ichan->sg[0] == sg)
1423 ichan->sg[0] = NULL;
1424 else if (ichan->sg[1] == sg)
1425 ichan->sg[1] = NULL;
1426 }
1337 async_tx_clear_ack(&desc->txd); 1427 async_tx_clear_ack(&desc->txd);
1338 } 1428 }
1339 spin_unlock_irqrestore(&ichan->lock, flags); 1429 spin_unlock_irqrestore(&ichan->lock, flags);
@@ -1341,13 +1431,7 @@ static void ipu_gc_tasklet(unsigned long arg)
1341 } 1431 }
1342} 1432}
1343 1433
1344/* 1434/* Allocate and initialise a transfer descriptor. */
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1435static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len, 1436 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags) 1437 enum dma_data_direction direction, unsigned long tx_flags)
@@ -1358,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
1358 unsigned long flags; 1442 unsigned long flags;
1359 1443
1360 /* We only can handle these three channels so far */ 1444 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && 1445 if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7) 1446 chan->chan_id != IDMAC_IC_7)
1363 return NULL; 1447 return NULL;
1364 1448
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { 1449 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
@@ -1400,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan)
1400 1484
1401 /* This is not always needed, but doesn't hurt either */ 1485 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags); 1486 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); 1487 ipu_select_buffer(chan->chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags); 1488 spin_unlock_irqrestore(&ipu->lock, flags);
1405 1489
1406 /* 1490 /*
@@ -1432,8 +1516,7 @@ static void __idmac_terminate_all(struct dma_chan *chan)
1432 struct idmac_tx_desc *desc = ichan->desc + i; 1516 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list)) 1517 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */ 1518 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list, 1519 list_add(&desc->list, &ichan->free_list);
1436 &ichan->free_list);
1437 1520
1438 async_tx_clear_ack(&desc->txd); 1521 async_tx_clear_ack(&desc->txd);
1439 } 1522 }
@@ -1458,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan)
1458 mutex_unlock(&ichan->chan_mutex); 1541 mutex_unlock(&ichan->chan_mutex);
1459} 1542}
1460 1543
1544#ifdef DEBUG
1545static irqreturn_t ic_sof_irq(int irq, void *dev_id)
1546{
1547 struct idmac_channel *ichan = dev_id;
1548 printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
1549 irq, ichan->dma_chan.chan_id);
1550 disable_irq(irq);
1551 return IRQ_HANDLED;
1552}
1553
1554static irqreturn_t ic_eof_irq(int irq, void *dev_id)
1555{
1556 struct idmac_channel *ichan = dev_id;
1557 printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
1558 irq, ichan->dma_chan.chan_id);
1559 disable_irq(irq);
1560 return IRQ_HANDLED;
1561}
1562
1563static int ic_sof = -EINVAL, ic_eof = -EINVAL;
1564#endif
1565
1461static int idmac_alloc_chan_resources(struct dma_chan *chan) 1566static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{ 1567{
1463 struct idmac_channel *ichan = to_idmac_chan(chan); 1568 struct idmac_channel *ichan = to_idmac_chan(chan);
@@ -1471,31 +1576,49 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1471 chan->cookie = 1; 1576 chan->cookie = 1;
1472 ichan->completed = -ENXIO; 1577 ichan->completed = -ENXIO;
1473 1578
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id); 1579 ret = ipu_irq_map(chan->chan_id);
1475 if (ret < 0) 1580 if (ret < 0)
1476 goto eimap; 1581 goto eimap;
1477 1582
1478 ichan->eof_irq = ret; 1583 ichan->eof_irq = ret;
1584
1585 /*
1586 * Important to first disable the channel, because maybe someone
1587 * used it before us, e.g., the bootloader
1588 */
1589 ipu_disable_channel(idmac, ichan, true);
1590
1591 ret = ipu_init_channel(idmac, ichan);
1592 if (ret < 0)
1593 goto eichan;
1594
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, 1595 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan); 1596 ichan->eof_name, ichan);
1481 if (ret < 0) 1597 if (ret < 0)
1482 goto erirq; 1598 goto erirq;
1483 1599
1484 ret = ipu_init_channel(idmac, ichan); 1600#ifdef DEBUG
1485 if (ret < 0) 1601 if (chan->chan_id == IDMAC_IC_7) {
1486 goto eichan; 1602 ic_sof = ipu_irq_map(69);
1603 if (ic_sof > 0)
1604 request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
1605 ic_eof = ipu_irq_map(70);
1606 if (ic_eof > 0)
1607 request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
1608 }
1609#endif
1487 1610
1488 ichan->status = IPU_CHANNEL_INITIALIZED; 1611 ichan->status = IPU_CHANNEL_INITIALIZED;
1489 1612
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", 1613 dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq); 1614 chan->chan_id, ichan->eof_irq);
1492 1615
1493 return ret; 1616 return ret;
1494 1617
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq: 1618erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id); 1619 ipu_uninit_channel(idmac, ichan);
1620eichan:
1621 ipu_irq_unmap(chan->chan_id);
1499eimap: 1622eimap:
1500 return ret; 1623 return ret;
1501} 1624}
@@ -1510,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1510 __idmac_terminate_all(chan); 1633 __idmac_terminate_all(chan);
1511 1634
1512 if (ichan->status > IPU_CHANNEL_FREE) { 1635 if (ichan->status > IPU_CHANNEL_FREE) {
1636#ifdef DEBUG
1637 if (chan->chan_id == IDMAC_IC_7) {
1638 if (ic_sof > 0) {
1639 free_irq(ic_sof, ichan);
1640 ipu_irq_unmap(69);
1641 ic_sof = -EINVAL;
1642 }
1643 if (ic_eof > 0) {
1644 free_irq(ic_eof, ichan);
1645 ipu_irq_unmap(70);
1646 ic_eof = -EINVAL;
1647 }
1648 }
1649#endif
1513 free_irq(ichan->eof_irq, ichan); 1650 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id); 1651 ipu_irq_unmap(chan->chan_id);
1515 } 1652 }
1516 1653
1517 ichan->status = IPU_CHANNEL_FREE; 1654 ichan->status = IPU_CHANNEL_FREE;
@@ -1573,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1573 dma_chan->device = &idmac->dma; 1710 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1; 1711 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i; 1712 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels); 1713 list_add_tail(&dma_chan->device_node, &dma->channels);
1577 } 1714 }
1578 1715
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); 1716 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
@@ -1581,7 +1718,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1581 return dma_async_device_register(&idmac->dma); 1718 return dma_async_device_register(&idmac->dma);
1582} 1719}
1583 1720
1584static void ipu_idmac_exit(struct ipu *ipu) 1721static void __exit ipu_idmac_exit(struct ipu *ipu)
1585{ 1722{
1586 int i; 1723 int i;
1587 struct idmac *idmac = &ipu->idmac; 1724 struct idmac *idmac = &ipu->idmac;
@@ -1600,7 +1737,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1600 * IPU common probe / remove 1737 * IPU common probe / remove
1601 */ 1738 */
1602 1739
1603static int ipu_probe(struct platform_device *pdev) 1740static int __init ipu_probe(struct platform_device *pdev)
1604{ 1741{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data; 1742 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic; 1743 struct resource *mem_ipu, *mem_ic;
@@ -1700,7 +1837,7 @@ err_noirq:
1700 return ret; 1837 return ret;
1701} 1838}
1702 1839
1703static int ipu_remove(struct platform_device *pdev) 1840static int __exit ipu_remove(struct platform_device *pdev)
1704{ 1841{
1705 struct ipu *ipu = platform_get_drvdata(pdev); 1842 struct ipu *ipu = platform_get_drvdata(pdev);
1706 1843
@@ -1725,7 +1862,7 @@ static struct platform_driver ipu_platform_driver = {
1725 .name = "ipu-core", 1862 .name = "ipu-core",
1726 .owner = THIS_MODULE, 1863 .owner = THIS_MODULE,
1727 }, 1864 },
1728 .remove = ipu_remove, 1865 .remove = __exit_p(ipu_remove),
1729}; 1866};
1730 1867
1731static int __init ipu_init(void) 1868static int __init ipu_init(void)
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 83f532cc767f..dd8ebc75b667 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = {
352}; 352};
353 353
354/* Install the IRQ handler */ 354/* Install the IRQ handler */
355int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) 355int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{ 356{
357 struct ipu_platform_data *pdata = dev->dev.platform_data; 357 struct ipu_platform_data *pdata = dev->dev.platform_data;
358 unsigned int irq, irq_base, i; 358 unsigned int irq, irq_base, i;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index cb7f26fb9f18..ddab94f51224 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
632 slot->async_tx.tx_submit = mv_xor_tx_submit; 632 slot->async_tx.tx_submit = mv_xor_tx_submit;
633 INIT_LIST_HEAD(&slot->chain_node); 633 INIT_LIST_HEAD(&slot->chain_node);
634 INIT_LIST_HEAD(&slot->slot_node); 634 INIT_LIST_HEAD(&slot->slot_node);
635 INIT_LIST_HEAD(&slot->async_tx.tx_list);
636 hw_desc = (char *) mv_chan->device->dma_desc_pool; 635 hw_desc = (char *) mv_chan->device->dma_desc_pool;
637 slot->async_tx.phys = 636 slot->async_tx.phys =
638 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 637 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6dab63bdc4c1..6d21b9e48b89 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1105,7 +1105,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1105 1024 * 1024, 1105 1024 * 1024,
1106 MTRR_TYPE_WRCOMB, 1); 1106 MTRR_TYPE_WRCOMB, 1);
1107 if (dev_priv->mm.gtt_mtrr < 0) { 1107 if (dev_priv->mm.gtt_mtrr < 0) {
1108 DRM_INFO("MTRR allocation failed\n. Graphics " 1108 DRM_INFO("MTRR allocation failed. Graphics "
1109 "performance may suffer.\n"); 1109 "performance may suffer.\n");
1110 } 1110 }
1111 1111
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17fa40858d26..d6cc9861e0a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -279,7 +279,6 @@ typedef struct drm_i915_private {
279 u8 saveAR_INDEX; 279 u8 saveAR_INDEX;
280 u8 saveAR[21]; 280 u8 saveAR[21];
281 u8 saveDACMASK; 281 u8 saveDACMASK;
282 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
283 u8 saveCR[37]; 282 u8 saveCR[37];
284 283
285 struct { 284 struct {
@@ -457,6 +456,12 @@ struct drm_i915_gem_object {
457 456
458 /** for phy allocated objects */ 457 /** for phy allocated objects */
459 struct drm_i915_gem_phys_object *phys_obj; 458 struct drm_i915_gem_phys_object *phys_obj;
459
460 /**
461 * Used for checking the object doesn't appear more than once
462 * in an execbuffer object list.
463 */
464 int in_execbuffer;
460}; 465};
461 466
462/** 467/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 85685bfd12da..37427e4016cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1476,7 +1476,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1476 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1476 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1477 int regnum = obj_priv->fence_reg; 1477 int regnum = obj_priv->fence_reg;
1478 int tile_width; 1478 int tile_width;
1479 uint32_t val; 1479 uint32_t fence_reg, val;
1480 uint32_t pitch_val; 1480 uint32_t pitch_val;
1481 1481
1482 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1482 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
@@ -1503,7 +1503,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1503 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 1503 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1504 val |= I830_FENCE_REG_VALID; 1504 val |= I830_FENCE_REG_VALID;
1505 1505
1506 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 1506 if (regnum < 8)
1507 fence_reg = FENCE_REG_830_0 + (regnum * 4);
1508 else
1509 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
1510 I915_WRITE(fence_reg, val);
1507} 1511}
1508 1512
1509static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) 1513static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -1557,7 +1561,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1557 struct drm_i915_private *dev_priv = dev->dev_private; 1561 struct drm_i915_private *dev_priv = dev->dev_private;
1558 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1562 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1559 struct drm_i915_fence_reg *reg = NULL; 1563 struct drm_i915_fence_reg *reg = NULL;
1560 int i, ret; 1564 struct drm_i915_gem_object *old_obj_priv = NULL;
1565 int i, ret, avail;
1561 1566
1562 switch (obj_priv->tiling_mode) { 1567 switch (obj_priv->tiling_mode) {
1563 case I915_TILING_NONE: 1568 case I915_TILING_NONE:
@@ -1580,25 +1585,46 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1580 } 1585 }
1581 1586
1582 /* First try to find a free reg */ 1587 /* First try to find a free reg */
1588try_again:
1589 avail = 0;
1583 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 1590 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1584 reg = &dev_priv->fence_regs[i]; 1591 reg = &dev_priv->fence_regs[i];
1585 if (!reg->obj) 1592 if (!reg->obj)
1586 break; 1593 break;
1594
1595 old_obj_priv = reg->obj->driver_private;
1596 if (!old_obj_priv->pin_count)
1597 avail++;
1587 } 1598 }
1588 1599
1589 /* None available, try to steal one or wait for a user to finish */ 1600 /* None available, try to steal one or wait for a user to finish */
1590 if (i == dev_priv->num_fence_regs) { 1601 if (i == dev_priv->num_fence_regs) {
1591 struct drm_i915_gem_object *old_obj_priv = NULL; 1602 uint32_t seqno = dev_priv->mm.next_gem_seqno;
1592 loff_t offset; 1603 loff_t offset;
1593 1604
1594try_again: 1605 if (avail == 0)
1595 /* Could try to use LRU here instead... */ 1606 return -ENOMEM;
1607
1596 for (i = dev_priv->fence_reg_start; 1608 for (i = dev_priv->fence_reg_start;
1597 i < dev_priv->num_fence_regs; i++) { 1609 i < dev_priv->num_fence_regs; i++) {
1610 uint32_t this_seqno;
1611
1598 reg = &dev_priv->fence_regs[i]; 1612 reg = &dev_priv->fence_regs[i];
1599 old_obj_priv = reg->obj->driver_private; 1613 old_obj_priv = reg->obj->driver_private;
1600 if (!old_obj_priv->pin_count) 1614
1615 if (old_obj_priv->pin_count)
1616 continue;
1617
1618 /* i915 uses fences for GPU access to tiled buffers */
1619 if (IS_I965G(dev) || !old_obj_priv->active)
1601 break; 1620 break;
1621
1622 /* find the seqno of the first available fence */
1623 this_seqno = old_obj_priv->last_rendering_seqno;
1624 if (this_seqno != 0 &&
1625 reg->obj->write_domain == 0 &&
1626 i915_seqno_passed(seqno, this_seqno))
1627 seqno = this_seqno;
1602 } 1628 }
1603 1629
1604 /* 1630 /*
@@ -1606,15 +1632,25 @@ try_again:
1606 * objects to finish before trying again. 1632 * objects to finish before trying again.
1607 */ 1633 */
1608 if (i == dev_priv->num_fence_regs) { 1634 if (i == dev_priv->num_fence_regs) {
1609 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0); 1635 if (seqno == dev_priv->mm.next_gem_seqno) {
1610 if (ret) { 1636 i915_gem_flush(dev,
1611 WARN(ret != -ERESTARTSYS, 1637 I915_GEM_GPU_DOMAINS,
1612 "switch to GTT domain failed: %d\n", ret); 1638 I915_GEM_GPU_DOMAINS);
1613 return ret; 1639 seqno = i915_add_request(dev,
1640 I915_GEM_GPU_DOMAINS);
1641 if (seqno == 0)
1642 return -ENOMEM;
1614 } 1643 }
1644
1645 ret = i915_wait_request(dev, seqno);
1646 if (ret)
1647 return ret;
1615 goto try_again; 1648 goto try_again;
1616 } 1649 }
1617 1650
1651 BUG_ON(old_obj_priv->active ||
1652 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
1653
1618 /* 1654 /*
1619 * Zap this virtual mapping so we can set up a fence again 1655 * Zap this virtual mapping so we can set up a fence again
1620 * for this object next time we need it. 1656 * for this object next time we need it.
@@ -1655,8 +1691,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1655 1691
1656 if (IS_I965G(dev)) 1692 if (IS_I965G(dev))
1657 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 1693 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1658 else 1694 else {
1659 I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); 1695 uint32_t fence_reg;
1696
1697 if (obj_priv->fence_reg < 8)
1698 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
1699 else
1700 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
1701 8) * 4;
1702
1703 I915_WRITE(fence_reg, 0);
1704 }
1660 1705
1661 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; 1706 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1662 obj_priv->fence_reg = I915_FENCE_REG_NONE; 1707 obj_priv->fence_reg = I915_FENCE_REG_NONE;
@@ -2469,6 +2514,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2469 struct drm_i915_gem_exec_object *exec_list = NULL; 2514 struct drm_i915_gem_exec_object *exec_list = NULL;
2470 struct drm_gem_object **object_list = NULL; 2515 struct drm_gem_object **object_list = NULL;
2471 struct drm_gem_object *batch_obj; 2516 struct drm_gem_object *batch_obj;
2517 struct drm_i915_gem_object *obj_priv;
2472 int ret, i, pinned = 0; 2518 int ret, i, pinned = 0;
2473 uint64_t exec_offset; 2519 uint64_t exec_offset;
2474 uint32_t seqno, flush_domains; 2520 uint32_t seqno, flush_domains;
@@ -2533,6 +2579,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2533 ret = -EBADF; 2579 ret = -EBADF;
2534 goto err; 2580 goto err;
2535 } 2581 }
2582
2583 obj_priv = object_list[i]->driver_private;
2584 if (obj_priv->in_execbuffer) {
2585 DRM_ERROR("Object %p appears more than once in object list\n",
2586 object_list[i]);
2587 ret = -EBADF;
2588 goto err;
2589 }
2590 obj_priv->in_execbuffer = true;
2536 } 2591 }
2537 2592
2538 /* Pin and relocate */ 2593 /* Pin and relocate */
@@ -2674,8 +2729,13 @@ err:
2674 for (i = 0; i < pinned; i++) 2729 for (i = 0; i < pinned; i++)
2675 i915_gem_object_unpin(object_list[i]); 2730 i915_gem_object_unpin(object_list[i]);
2676 2731
2677 for (i = 0; i < args->buffer_count; i++) 2732 for (i = 0; i < args->buffer_count; i++) {
2733 if (object_list[i]) {
2734 obj_priv = object_list[i]->driver_private;
2735 obj_priv->in_execbuffer = false;
2736 }
2678 drm_gem_object_unreference(object_list[i]); 2737 drm_gem_object_unreference(object_list[i]);
2738 }
2679 2739
2680 mutex_unlock(&dev->struct_mutex); 2740 mutex_unlock(&dev->struct_mutex);
2681 2741
@@ -2712,17 +2772,24 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2712 ret = i915_gem_object_bind_to_gtt(obj, alignment); 2772 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2713 if (ret != 0) { 2773 if (ret != 0) {
2714 if (ret != -EBUSY && ret != -ERESTARTSYS) 2774 if (ret != -EBUSY && ret != -ERESTARTSYS)
2715 DRM_ERROR("Failure to bind: %d", ret); 2775 DRM_ERROR("Failure to bind: %d\n", ret);
2776 return ret;
2777 }
2778 }
2779 /*
2780 * Pre-965 chips need a fence register set up in order to
2781 * properly handle tiled surfaces.
2782 */
2783 if (!IS_I965G(dev) &&
2784 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2785 obj_priv->tiling_mode != I915_TILING_NONE) {
2786 ret = i915_gem_object_get_fence_reg(obj, true);
2787 if (ret != 0) {
2788 if (ret != -EBUSY && ret != -ERESTARTSYS)
2789 DRM_ERROR("Failure to install fence: %d\n",
2790 ret);
2716 return ret; 2791 return ret;
2717 } 2792 }
2718 /*
2719 * Pre-965 chips need a fence register set up in order to
2720 * properly handle tiled surfaces.
2721 */
2722 if (!IS_I965G(dev) &&
2723 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2724 obj_priv->tiling_mode != I915_TILING_NONE)
2725 i915_gem_object_get_fence_reg(obj, true);
2726 } 2793 }
2727 obj_priv->pin_count++; 2794 obj_priv->pin_count++;
2728 2795
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d6539a868b3..90600d899413 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -184,6 +184,7 @@
184 * Fence registers 184 * Fence registers
185 */ 185 */
186#define FENCE_REG_830_0 0x2000 186#define FENCE_REG_830_0 0x2000
187#define FENCE_REG_945_8 0x3000
187#define I830_FENCE_START_MASK 0x07f80000 188#define I830_FENCE_START_MASK 0x07f80000
188#define I830_FENCE_TILING_Y_SHIFT 12 189#define I830_FENCE_TILING_Y_SHIFT 12
189#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5d84027ee8f3..d669cc2b42c0 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -119,11 +119,6 @@ static void i915_save_vga(struct drm_device *dev)
119 119
120 /* VGA color palette registers */ 120 /* VGA color palette registers */
121 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); 121 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
122 /* DACCRX automatically increments during read */
123 I915_WRITE8(VGA_DACRX, 0);
124 /* Read 3 bytes of color data from each index */
125 for (i = 0; i < 256 * 3; i++)
126 dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
127 122
128 /* MSR bits */ 123 /* MSR bits */
129 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); 124 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
@@ -225,12 +220,6 @@ static void i915_restore_vga(struct drm_device *dev)
225 220
226 /* VGA color palette registers */ 221 /* VGA color palette registers */
227 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); 222 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
228 /* DACCRX automatically increments during read */
229 I915_WRITE8(VGA_DACWX, 0);
230 /* Read 3 bytes of color data from each index */
231 for (i = 0; i < 256 * 3; i++)
232 I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
233
234} 223}
235 224
236int i915_save_state(struct drm_device *dev) 225int i915_save_state(struct drm_device *dev)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b84bf066879b..b4eea0292c1a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -543,8 +543,8 @@ config SENSORS_LM90
543 help 543 help
544 If you say yes here you get support for National Semiconductor LM90, 544 If you say yes here you get support for National Semiconductor LM90,
545 LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim 545 LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim
546 MAX6646, MAX6647, MAX6649, MAX6657, MAX6658, MAX6659, MAX6680 and 546 MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
547 MAX6681 sensor chips. 547 MAX6680, MAX6681 and MAX6692 sensor chips.
548 548
549 This driver can also be built as a module. If so, the module 549 This driver can also be built as a module. If so, the module
550 will be called lm90. 550 will be called lm90.
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index e52b38806d03..ad2b3431b725 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -760,8 +760,11 @@ static int abituguru3_read_increment_offset(struct abituguru3_data *data,
760 760
761 for (i = 0; i < offset_count; i++) 761 for (i = 0; i < offset_count; i++)
762 if ((x = abituguru3_read(data, bank, offset + i, count, 762 if ((x = abituguru3_read(data, bank, offset + i, count,
763 buf + i * count)) != count) 763 buf + i * count)) != count) {
764 return i * count + (i && (x < 0)) ? 0 : x; 764 if (x < 0)
765 return x;
766 return i * count + x;
767 }
765 768
766 return i * count; 769 return i * count;
767} 770}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 1692de369969..18a1ba888165 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -617,7 +617,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
617static int f75375_probe(struct i2c_client *client, 617static int f75375_probe(struct i2c_client *client,
618 const struct i2c_device_id *id) 618 const struct i2c_device_id *id)
619{ 619{
620 struct f75375_data *data = i2c_get_clientdata(client); 620 struct f75375_data *data;
621 struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data; 621 struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
622 int err; 622 int err;
623 623
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 95a99c590da2..9157247fed8e 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -213,7 +213,7 @@ static inline u16 FAN16_TO_REG(long rpm)
213 213
214#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\ 214#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\
215 ((val)+500)/1000),-128,127)) 215 ((val)+500)/1000),-128,127))
216#define TEMP_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000) 216#define TEMP_FROM_REG(val) ((val) * 1000)
217 217
218#define PWM_TO_REG(val) ((val) >> 1) 218#define PWM_TO_REG(val) ((val) >> 1)
219#define PWM_FROM_REG(val) (((val)&0x7f) << 1) 219#define PWM_FROM_REG(val) (((val)&0x7f) << 1)
@@ -267,9 +267,9 @@ struct it87_data {
267 u8 has_fan; /* Bitfield, fans enabled */ 267 u8 has_fan; /* Bitfield, fans enabled */
268 u16 fan[5]; /* Register values, possibly combined */ 268 u16 fan[5]; /* Register values, possibly combined */
269 u16 fan_min[5]; /* Register values, possibly combined */ 269 u16 fan_min[5]; /* Register values, possibly combined */
270 u8 temp[3]; /* Register value */ 270 s8 temp[3]; /* Register value */
271 u8 temp_high[3]; /* Register value */ 271 s8 temp_high[3]; /* Register value */
272 u8 temp_low[3]; /* Register value */ 272 s8 temp_low[3]; /* Register value */
273 u8 sensor; /* Register value */ 273 u8 sensor; /* Register value */
274 u8 fan_div[3]; /* Register encoding, shifted right */ 274 u8 fan_div[3]; /* Register encoding, shifted right */
275 u8 vid; /* Register encoding, combined */ 275 u8 vid; /* Register encoding, combined */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index cfc1ee90f5a3..b251d8674b41 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -72,6 +72,7 @@ I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
72#define LM85_COMPANY_SMSC 0x5c 72#define LM85_COMPANY_SMSC 0x5c
73#define LM85_VERSTEP_VMASK 0xf0 73#define LM85_VERSTEP_VMASK 0xf0
74#define LM85_VERSTEP_GENERIC 0x60 74#define LM85_VERSTEP_GENERIC 0x60
75#define LM85_VERSTEP_GENERIC2 0x70
75#define LM85_VERSTEP_LM85C 0x60 76#define LM85_VERSTEP_LM85C 0x60
76#define LM85_VERSTEP_LM85B 0x62 77#define LM85_VERSTEP_LM85B 0x62
77#define LM85_VERSTEP_ADM1027 0x60 78#define LM85_VERSTEP_ADM1027 0x60
@@ -334,6 +335,7 @@ static struct lm85_data *lm85_update_device(struct device *dev);
334static const struct i2c_device_id lm85_id[] = { 335static const struct i2c_device_id lm85_id[] = {
335 { "adm1027", adm1027 }, 336 { "adm1027", adm1027 },
336 { "adt7463", adt7463 }, 337 { "adt7463", adt7463 },
338 { "adt7468", adt7468 },
337 { "lm85", any_chip }, 339 { "lm85", any_chip },
338 { "lm85b", lm85b }, 340 { "lm85b", lm85b },
339 { "lm85c", lm85c }, 341 { "lm85c", lm85c },
@@ -408,7 +410,8 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
408 struct lm85_data *data = lm85_update_device(dev); 410 struct lm85_data *data = lm85_update_device(dev);
409 int vid; 411 int vid;
410 412
411 if (data->type == adt7463 && (data->vid & 0x80)) { 413 if ((data->type == adt7463 || data->type == adt7468) &&
414 (data->vid & 0x80)) {
412 /* 6-pin VID (VRM 10) */ 415 /* 6-pin VID (VRM 10) */
413 vid = vid_from_reg(data->vid & 0x3f, data->vrm); 416 vid = vid_from_reg(data->vid & 0x3f, data->vrm);
414 } else { 417 } else {
@@ -1153,7 +1156,8 @@ static int lm85_detect(struct i2c_client *client, int kind,
1153 address, company, verstep); 1156 address, company, verstep);
1154 1157
1155 /* All supported chips have the version in common */ 1158 /* All supported chips have the version in common */
1156 if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC) { 1159 if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC &&
1160 (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) {
1157 dev_dbg(&adapter->dev, "Autodetection failed: " 1161 dev_dbg(&adapter->dev, "Autodetection failed: "
1158 "unsupported version\n"); 1162 "unsupported version\n");
1159 return -ENODEV; 1163 return -ENODEV;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 96a701866726..1aff7575799d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -32,10 +32,10 @@
32 * supported by this driver. These chips lack the remote temperature 32 * supported by this driver. These chips lack the remote temperature
33 * offset feature. 33 * offset feature.
34 * 34 *
35 * This driver also supports the MAX6646, MAX6647 and MAX6649 chips 35 * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
36 * made by Maxim. These are again similar to the LM86, but they use 36 * MAX6692 chips made by Maxim. These are again similar to the LM86,
37 * unsigned temperature values and can report temperatures from 0 to 37 * but they use unsigned temperature values and can report temperatures
38 * 145 degrees. 38 * from 0 to 145 degrees.
39 * 39 *
40 * This driver also supports the MAX6680 and MAX6681, two other sensor 40 * This driver also supports the MAX6680 and MAX6681, two other sensor
41 * chips made by Maxim. These are quite similar to the other Maxim 41 * chips made by Maxim. These are quite similar to the other Maxim
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index a01b4488208b..4a65b96db2c8 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2490,12 +2490,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2490 int ret = 0; 2490 int ret = 0;
2491 struct nes_vnic *nesvnic; 2491 struct nes_vnic *nesvnic;
2492 struct nes_device *nesdev; 2492 struct nes_device *nesdev;
2493 struct nes_ib_device *nesibdev;
2493 2494
2494 nesvnic = to_nesvnic(nesqp->ibqp.device); 2495 nesvnic = to_nesvnic(nesqp->ibqp.device);
2495 if (!nesvnic) 2496 if (!nesvnic)
2496 return -EINVAL; 2497 return -EINVAL;
2497 2498
2498 nesdev = nesvnic->nesdev; 2499 nesdev = nesvnic->nesdev;
2500 nesibdev = nesvnic->nesibdev;
2499 2501
2500 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", 2502 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2501 atomic_read(&nesvnic->netdev->refcnt)); 2503 atomic_read(&nesvnic->netdev->refcnt));
@@ -2507,6 +2509,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2507 } else { 2509 } else {
2508 /* Need to free the Last Streaming Mode Message */ 2510 /* Need to free the Last Streaming Mode Message */
2509 if (nesqp->ietf_frame) { 2511 if (nesqp->ietf_frame) {
2512 if (nesqp->lsmm_mr)
2513 nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
2510 pci_free_consistent(nesdev->pcidev, 2514 pci_free_consistent(nesdev->pcidev,
2511 nesqp->private_data_len+sizeof(struct ietf_mpa_frame), 2515 nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
2512 nesqp->ietf_frame, nesqp->ietf_frame_pbase); 2516 nesqp->ietf_frame, nesqp->ietf_frame_pbase);
@@ -2543,6 +2547,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2543 u32 crc_value; 2547 u32 crc_value;
2544 int ret; 2548 int ret;
2545 int passive_state; 2549 int passive_state;
2550 struct nes_ib_device *nesibdev;
2551 struct ib_mr *ibmr = NULL;
2552 struct ib_phys_buf ibphysbuf;
2553 struct nes_pd *nespd;
2554
2555
2546 2556
2547 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); 2557 ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
2548 if (!ibqp) 2558 if (!ibqp)
@@ -2601,6 +2611,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2601 if (cm_id->remote_addr.sin_addr.s_addr != 2611 if (cm_id->remote_addr.sin_addr.s_addr !=
2602 cm_id->local_addr.sin_addr.s_addr) { 2612 cm_id->local_addr.sin_addr.s_addr) {
2603 u64temp = (unsigned long)nesqp; 2613 u64temp = (unsigned long)nesqp;
2614 nesibdev = nesvnic->nesibdev;
2615 nespd = nesqp->nespd;
2616 ibphysbuf.addr = nesqp->ietf_frame_pbase;
2617 ibphysbuf.size = conn_param->private_data_len +
2618 sizeof(struct ietf_mpa_frame);
2619 ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
2620 &ibphysbuf, 1,
2621 IB_ACCESS_LOCAL_WRITE,
2622 (u64 *)&nesqp->ietf_frame);
2623 if (!ibmr) {
2624 nes_debug(NES_DBG_CM, "Unable to register memory region"
2625 "for lSMM for cm_node = %p \n",
2626 cm_node);
2627 return -ENOMEM;
2628 }
2629
2630 ibmr->pd = &nespd->ibpd;
2631 ibmr->device = nespd->ibpd.device;
2632 nesqp->lsmm_mr = ibmr;
2633
2604 u64temp |= NES_SW_CONTEXT_ALIGN>>1; 2634 u64temp |= NES_SW_CONTEXT_ALIGN>>1;
2605 set_wqe_64bit_value(wqe->wqe_words, 2635 set_wqe_64bit_value(wqe->wqe_words,
2606 NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, 2636 NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
@@ -2611,14 +2641,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2611 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 2641 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
2612 cpu_to_le32(conn_param->private_data_len + 2642 cpu_to_le32(conn_param->private_data_len +
2613 sizeof(struct ietf_mpa_frame)); 2643 sizeof(struct ietf_mpa_frame));
2614 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 2644 set_wqe_64bit_value(wqe->wqe_words,
2615 cpu_to_le32((u32)nesqp->ietf_frame_pbase); 2645 NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
2616 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 2646 (u64)nesqp->ietf_frame);
2617 cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
2618 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 2647 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
2619 cpu_to_le32(conn_param->private_data_len + 2648 cpu_to_le32(conn_param->private_data_len +
2620 sizeof(struct ietf_mpa_frame)); 2649 sizeof(struct ietf_mpa_frame));
2621 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; 2650 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
2622 2651
2623 nesqp->nesqp_context->ird_ord_sizes |= 2652 nesqp->nesqp_context->ird_ord_sizes |=
2624 cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 2653 cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 4fdb72454f94..d93a6562817c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1360,8 +1360,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1360 NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT); 1360 NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
1361 nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size << 1361 nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
1362 NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT); 1362 NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
1363 if (!udata) {
1363 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN); 1364 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
1364 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN); 1365 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
1366 }
1365 nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number + 1367 nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
1366 ((u32)nesqp->nesrcq->hw_cq.cq_number << 16)); 1368 ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
1367 u64temp = (u64)nesqp->hwqp.sq_pbase; 1369 u64temp = (u64)nesqp->hwqp.sq_pbase;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 6c6b4da5184f..ae0ca9bc83bd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -134,6 +134,7 @@ struct nes_qp {
134 struct ietf_mpa_frame *ietf_frame; 134 struct ietf_mpa_frame *ietf_frame;
135 dma_addr_t ietf_frame_pbase; 135 dma_addr_t ietf_frame_pbase;
136 wait_queue_head_t state_waitq; 136 wait_queue_head_t state_waitq;
137 struct ib_mr *lsmm_mr;
137 unsigned long socket; 138 unsigned long socket;
138 struct nes_hw_qp hwqp; 139 struct nes_hw_qp hwqp;
139 struct work_struct work; 140 struct work_struct work;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2be574c0a27a..ed5727c089a9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -269,12 +269,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
269 return mddev; 269 return mddev;
270} 270}
271 271
272static void mddev_delayed_delete(struct work_struct *ws) 272static void mddev_delayed_delete(struct work_struct *ws);
273{
274 mddev_t *mddev = container_of(ws, mddev_t, del_work);
275 kobject_del(&mddev->kobj);
276 kobject_put(&mddev->kobj);
277}
278 273
279static void mddev_put(mddev_t *mddev) 274static void mddev_put(mddev_t *mddev)
280{ 275{
@@ -3811,6 +3806,21 @@ static struct kobj_type md_ktype = {
3811 3806
3812int mdp_major = 0; 3807int mdp_major = 0;
3813 3808
3809static void mddev_delayed_delete(struct work_struct *ws)
3810{
3811 mddev_t *mddev = container_of(ws, mddev_t, del_work);
3812
3813 if (mddev->private == &md_redundancy_group) {
3814 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3815 if (mddev->sysfs_action)
3816 sysfs_put(mddev->sysfs_action);
3817 mddev->sysfs_action = NULL;
3818 mddev->private = NULL;
3819 }
3820 kobject_del(&mddev->kobj);
3821 kobject_put(&mddev->kobj);
3822}
3823
3814static int md_alloc(dev_t dev, char *name) 3824static int md_alloc(dev_t dev, char *name)
3815{ 3825{
3816 static DEFINE_MUTEX(disks_mutex); 3826 static DEFINE_MUTEX(disks_mutex);
@@ -4313,13 +4323,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4313 mddev->queue->merge_bvec_fn = NULL; 4323 mddev->queue->merge_bvec_fn = NULL;
4314 mddev->queue->unplug_fn = NULL; 4324 mddev->queue->unplug_fn = NULL;
4315 mddev->queue->backing_dev_info.congested_fn = NULL; 4325 mddev->queue->backing_dev_info.congested_fn = NULL;
4316 if (mddev->pers->sync_request) {
4317 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4318 if (mddev->sysfs_action)
4319 sysfs_put(mddev->sysfs_action);
4320 mddev->sysfs_action = NULL;
4321 }
4322 module_put(mddev->pers->owner); 4326 module_put(mddev->pers->owner);
4327 if (mddev->pers->sync_request)
4328 mddev->private = &md_redundancy_group;
4323 mddev->pers = NULL; 4329 mddev->pers = NULL;
4324 /* tell userspace to handle 'inactive' */ 4330 /* tell userspace to handle 'inactive' */
4325 sysfs_notify_dirent(mddev->sysfs_state); 4331 sysfs_notify_dirent(mddev->sysfs_state);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 84d5ea1ec171..b457a05b28d9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -1383,6 +1383,11 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
1383 wm8350->power.rev_g_coeff = 1; 1383 wm8350->power.rev_g_coeff = 1;
1384 break; 1384 break;
1385 1385
1386 case 1:
1387 dev_info(wm8350->dev, "WM8351 Rev B\n");
1388 wm8350->power.rev_g_coeff = 1;
1389 break;
1390
1386 default: 1391 default:
1387 dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n"); 1392 dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n");
1388 ret = -ENODEV; 1393 ret = -ENODEV;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f4a67c65d301..2db166b7096f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -793,8 +793,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
793 host->mem->start + host->sdidata); 793 host->mem->start + host->sdidata);
794 794
795 if (!setup_ok) { 795 if (!setup_ok) {
796 s3c2410_dma_config(host->dma, 4, 796 s3c2410_dma_config(host->dma, 4, 0);
797 (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
798 s3c2410_dma_set_buffdone_fn(host->dma, 797 s3c2410_dma_set_buffdone_fn(host->dma,
799 s3cmci_dma_done_callback); 798 s3cmci_dma_done_callback);
800 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); 799 s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d44f741ae229..6d9f810565c8 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -821,7 +821,8 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
821 if (!(info->flags & IS_POW2PS)) 821 if (!(info->flags & IS_POW2PS))
822 return info; 822 return info;
823 } 823 }
824 } 824 } else
825 return info;
825 } 826 }
826 } 827 }
827 828
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 4b122e7ab4b3..229718222db7 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -46,16 +46,19 @@ static int physmap_flash_remove(struct platform_device *dev)
46 46
47 physmap_data = dev->dev.platform_data; 47 physmap_data = dev->dev.platform_data;
48 48
49 if (info->cmtd) {
49#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
50 if (info->nr_parts) { 51 if (info->nr_parts || physmap_data->nr_parts)
51 del_mtd_partitions(info->cmtd); 52 del_mtd_partitions(info->cmtd);
52 kfree(info->parts); 53 else
53 } else if (physmap_data->nr_parts) 54 del_mtd_device(info->cmtd);
54 del_mtd_partitions(info->cmtd);
55 else
56 del_mtd_device(info->cmtd);
57#else 55#else
58 del_mtd_device(info->cmtd); 56 del_mtd_device(info->cmtd);
57#endif
58 }
59#ifdef CONFIG_MTD_PARTITIONS
60 if (info->nr_parts)
61 kfree(info->parts);
59#endif 62#endif
60 63
61#ifdef CONFIG_MTD_CONCAT 64#ifdef CONFIG_MTD_CONCAT
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index d4fb4acdbebd..4e9bd380a5c2 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2649,8 +2649,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2649 int err = -ENODEV; 2649 int err = -ENODEV;
2650 2650
2651 sbus_dp = to_of_device(op->dev.parent)->node; 2651 sbus_dp = to_of_device(op->dev.parent)->node;
2652 if (is_qfe)
2653 sbus_dp = to_of_device(op->dev.parent->parent)->node;
2654 2652
2655 /* We can match PCI devices too, do not accept those here. */ 2653 /* We can match PCI devices too, do not accept those here. */
2656 if (strcmp(sbus_dp->name, "sbus")) 2654 if (strcmp(sbus_dp->name, "sbus"))
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index eacfb13998bb..9aa4fe100a0d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -143,7 +143,7 @@ config HOTPLUG_PCI_SHPC
143 143
144config HOTPLUG_PCI_RPA 144config HOTPLUG_PCI_RPA
145 tristate "RPA PCI Hotplug driver" 145 tristate "RPA PCI Hotplug driver"
146 depends on PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE 146 depends on PPC_PSERIES && EEH && !HOTPLUG_PCI_FAKE
147 help 147 help
148 Say Y here if you have a RPA system that supports PCI Hotplug. 148 Say Y here if you have a RPA system that supports PCI Hotplug.
149 149
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index d0c973685868..382575007382 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -133,6 +133,9 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
133 bool enable) 133 bool enable)
134{ 134{
135 set_device_error_reporting(dev, &enable); 135 set_device_error_reporting(dev, &enable);
136
137 if (!dev->subordinate)
138 return;
136 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); 139 pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
137} 140}
138 141
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 248b4db91552..5ea566e20b37 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -103,6 +103,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
103static void pcie_portdrv_remove (struct pci_dev *dev) 103static void pcie_portdrv_remove (struct pci_dev *dev)
104{ 104{
105 pcie_port_device_remove(dev); 105 pcie_port_device_remove(dev);
106 pci_disable_device(dev);
106 kfree(pci_get_drvdata(dev)); 107 kfree(pci_get_drvdata(dev));
107} 108}
108 109
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f20d55368edb..92b9efe9bcaf 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -23,6 +23,7 @@
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/pci-aspm.h>
26#include "pci.h" 27#include "pci.h"
27 28
28int isa_dma_bridge_buggy; 29int isa_dma_bridge_buggy;
@@ -1749,6 +1750,30 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1749} 1750}
1750DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); 1751DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
1751 1752
1753/*
1754 * The 82575 and 82598 may experience data corruption issues when transitioning
1755 * out of L0S. To prevent this we need to disable L0S on the pci-e link
1756 */
1757static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
1758{
1759 dev_info(&dev->dev, "Disabling L0s\n");
1760 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
1761}
1762DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
1763DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
1764DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
1765DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
1766DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
1767DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
1768DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
1769DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
1770DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
1771DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
1772DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
1773DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
1774DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
1775DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
1776
1752static void __devinit fixup_rev1_53c810(struct pci_dev* dev) 1777static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1753{ 1778{
1754 /* rev 1 ncr53c810 chips don't set the class at all which means 1779 /* rev 1 ncr53c810 chips don't set the class at all which means
@@ -2097,7 +2122,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
2097 2122
2098 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 2123 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2099 &flags) == 0) { 2124 &flags) == 0) {
2100 dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); 2125 dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
2101 2126
2102 pci_write_config_byte(dev, pos + HT_MSI_FLAGS, 2127 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2103 flags & ~HT_MSI_FLAGS_ENABLE); 2128 flags & ~HT_MSI_FLAGS_ENABLE);
@@ -2141,6 +2166,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
2141 int pos; 2166 int pos;
2142 int found; 2167 int found;
2143 2168
2169 /* Enabling HT MSI mapping on this device breaks MCP51 */
2170 if (dev->device == 0x270)
2171 return;
2172
2144 /* check if there is HT MSI cap or enabled on this device */ 2173 /* check if there is HT MSI cap or enabled on this device */
2145 found = ht_check_msi_mapping(dev); 2174 found = ht_check_msi_mapping(dev);
2146 2175
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 94c9f911824e..6bcca616a704 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1297,7 +1297,7 @@ static int __init acer_wmi_init(void)
1297 1297
1298 set_quirks(); 1298 set_quirks();
1299 1299
1300 if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { 1300 if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
1301 interface->capability &= ~ACER_CAP_BRIGHTNESS; 1301 interface->capability &= ~ACER_CAP_BRIGHTNESS;
1302 printk(ACER_INFO "Brightness must be controlled by " 1302 printk(ACER_INFO "Brightness must be controlled by "
1303 "generic video driver\n"); 1303 "generic video driver\n");
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 1d768928e0bb..a52d4a11652d 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -180,10 +180,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
180 di->empty_uAh = battery_interpolate(scale, di->temp_C / 10); 180 di->empty_uAh = battery_interpolate(scale, di->temp_C / 10);
181 di->empty_uAh *= 1000; /* convert to µAh */ 181 di->empty_uAh *= 1000; /* convert to µAh */
182 182
183 /* From Maxim Application Note 131: remaining capacity = 183 if (di->full_active_uAh == di->empty_uAh)
184 * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */ 184 di->rem_capacity = 0;
185 di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) / 185 else
186 (di->full_active_uAh - di->empty_uAh); 186 /* From Maxim Application Note 131: remaining capacity =
187 * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */
188 di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) /
189 (di->full_active_uAh - di->empty_uAh);
187 190
188 if (di->rem_capacity < 0) 191 if (di->rem_capacity < 0)
189 di->rem_capacity = 0; 192 di->rem_capacity = 0;
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index f08e169ba1b5..7e30e5f6e032 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -129,7 +129,7 @@ static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status)
129 bp->waiting = 1; 129 bp->waiting = 1;
130 add_wait_queue(&bp->wq, &wait); 130 add_wait_queue(&bp->wq, &wait);
131 while (limit-- > 0) { 131 while (limit-- > 0) {
132 unsigned long val; 132 long val;
133 133
134 val = wait_event_interruptible_timeout( 134 val = wait_event_interruptible_timeout(
135 bp->wq, 135 bp->wq,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a9a9893a5f95..e6d1fc8c54f1 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -38,9 +38,6 @@
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/genhd.h> 39#include <linux/genhd.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41
42#define MAJOR_NR JSFD_MAJOR
43
44#include <asm/uaccess.h> 41#include <asm/uaccess.h>
45#include <asm/pgtable.h> 42#include <asm/pgtable.h>
46#include <asm/io.h> 43#include <asm/io.h>
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 2181ce4d7ebd..35e8eb02b9e9 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1853,13 +1853,14 @@ static void aty128_bl_exit(struct backlight_device *bd)
1853 * Initialisation 1853 * Initialisation
1854 */ 1854 */
1855 1855
1856#ifdef CONFIG_PPC_PMAC 1856#ifdef CONFIG_PPC_PMAC__disabled
1857static void aty128_early_resume(void *data) 1857static void aty128_early_resume(void *data)
1858{ 1858{
1859 struct aty128fb_par *par = data; 1859 struct aty128fb_par *par = data;
1860 1860
1861 if (try_acquire_console_sem()) 1861 if (try_acquire_console_sem())
1862 return; 1862 return;
1863 pci_restore_state(par->pdev);
1863 aty128_do_resume(par->pdev); 1864 aty128_do_resume(par->pdev);
1864 release_console_sem(); 1865 release_console_sem();
1865} 1866}
@@ -1907,7 +1908,14 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
1907 /* Indicate sleep capability */ 1908 /* Indicate sleep capability */
1908 if (par->chip_gen == rage_M3) { 1909 if (par->chip_gen == rage_M3) {
1909 pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); 1910 pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1);
1911#if 0 /* Disable the early video resume hack for now as it's causing problems, among
1912 * others we now rely on the PCI core restoring the config space for us, which
1913 * isn't the case with that hack, and that code path causes various things to
1914 * be called with interrupts off while they shouldn't. I'm leaving the code in
1915 * as it can be useful for debugging purposes
1916 */
1910 pmac_set_early_video_resume(aty128_early_resume, par); 1917 pmac_set_early_video_resume(aty128_early_resume, par);
1918#endif
1911 } 1919 }
1912 1920
1913 /* Find default mode */ 1921 /* Find default mode */
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index ca5f0dc28546..81603f85e17e 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2762,12 +2762,13 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
2762 return rc; 2762 return rc;
2763} 2763}
2764 2764
2765#ifdef CONFIG_PPC_OF 2765#ifdef CONFIG_PPC_OF__disabled
2766static void radeonfb_early_resume(void *data) 2766static void radeonfb_early_resume(void *data)
2767{ 2767{
2768 struct radeonfb_info *rinfo = data; 2768 struct radeonfb_info *rinfo = data;
2769 2769
2770 rinfo->no_schedule = 1; 2770 rinfo->no_schedule = 1;
2771 pci_restore_state(rinfo->pdev);
2771 radeonfb_pci_resume(rinfo->pdev); 2772 radeonfb_pci_resume(rinfo->pdev);
2772 rinfo->no_schedule = 0; 2773 rinfo->no_schedule = 0;
2773} 2774}
@@ -2834,7 +2835,14 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
2834 */ 2835 */
2835 if (rinfo->pm_mode != radeon_pm_none) { 2836 if (rinfo->pm_mode != radeon_pm_none) {
2836 pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, rinfo->of_node, 0, 1); 2837 pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, rinfo->of_node, 0, 1);
2838#if 0 /* Disable the early video resume hack for now as it's causing problems, among
2839 * others we now rely on the PCI core restoring the config space for us, which
2840 * isn't the case with that hack, and that code path causes various things to
2841 * be called with interrupts off while they shouldn't. I'm leaving the code in
2842 * as it can be useful for debugging purposes
2843 */
2837 pmac_set_early_video_resume(radeonfb_early_resume, rinfo); 2844 pmac_set_early_video_resume(radeonfb_early_resume, rinfo);
2845#endif
2838 } 2846 }
2839 2847
2840#if 0 2848#if 0
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index a24e680d2b9c..2e940199fc89 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -993,6 +993,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
993 struct i810fb_par *par = info->par; 993 struct i810fb_par *par = info->par;
994 int line_length, vidmem, mode_valid = 0, retval = 0; 994 int line_length, vidmem, mode_valid = 0, retval = 0;
995 u32 vyres = var->yres_virtual, vxres = var->xres_virtual; 995 u32 vyres = var->yres_virtual, vxres = var->xres_virtual;
996
996 /* 997 /*
997 * Memory limit 998 * Memory limit
998 */ 999 */
@@ -1002,12 +1003,12 @@ static int i810_check_params(struct fb_var_screeninfo *var,
1002 if (vidmem > par->fb.size) { 1003 if (vidmem > par->fb.size) {
1003 vyres = par->fb.size/line_length; 1004 vyres = par->fb.size/line_length;
1004 if (vyres < var->yres) { 1005 if (vyres < var->yres) {
1005 vyres = yres; 1006 vyres = info->var.yres;
1006 vxres = par->fb.size/vyres; 1007 vxres = par->fb.size/vyres;
1007 vxres /= var->bits_per_pixel >> 3; 1008 vxres /= var->bits_per_pixel >> 3;
1008 line_length = get_line_length(par, vxres, 1009 line_length = get_line_length(par, vxres,
1009 var->bits_per_pixel); 1010 var->bits_per_pixel);
1010 vidmem = line_length * yres; 1011 vidmem = line_length * info->var.yres;
1011 if (vxres < var->xres) { 1012 if (vxres < var->xres) {
1012 printk("i810fb: required video memory, " 1013 printk("i810fb: required video memory, "
1013 "%d bytes, for %dx%d-%d (virtual) " 1014 "%d bytes, for %dx%d-%d (virtual) "
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0e2b8fd24df1..2c5d069e5f06 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -446,7 +446,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
446{ 446{
447 struct sh_mobile_lcdc_chan *ch; 447 struct sh_mobile_lcdc_chan *ch;
448 struct sh_mobile_lcdc_board_cfg *board_cfg; 448 struct sh_mobile_lcdc_board_cfg *board_cfg;
449 unsigned long tmp;
450 int k; 449 int k;
451 450
452 /* tell the board code to disable the panel */ 451 /* tell the board code to disable the panel */
@@ -456,9 +455,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
456 if (board_cfg->display_off) 455 if (board_cfg->display_off)
457 board_cfg->display_off(board_cfg->board_data); 456 board_cfg->display_off(board_cfg->board_data);
458 457
459 /* cleanup deferred io if SYS bus */ 458 /* cleanup deferred io if enabled */
460 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; 459 if (ch->info.fbdefio) {
461 if (ch->ldmt1r_value & (1 << 12) && tmp) {
462 fb_deferred_io_cleanup(&ch->info); 460 fb_deferred_io_cleanup(&ch->info);
463 ch->info.fbdefio = NULL; 461 ch->info.fbdefio = NULL;
464 } 462 }
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 9e1138a75e8b..a411702413d6 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -39,7 +39,7 @@ static u8 w1_gpio_read_bit(void *data)
39{ 39{
40 struct w1_gpio_platform_data *pdata = data; 40 struct w1_gpio_platform_data *pdata = data;
41 41
42 return gpio_get_value(pdata->pin); 42 return gpio_get_value(pdata->pin) ? 1 : 0;
43} 43}
44 44
45static int __init w1_gpio_probe(struct platform_device *pdev) 45static int __init w1_gpio_probe(struct platform_device *pdev)