aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-03-24 02:17:25 -0400
committerPaul Mundt <lethal@linux-sh.org>2011-03-24 02:17:25 -0400
commita3d3362287fbe96fe90abdb5c6d1a35471129a8c (patch)
treead3c85ed1feef470c66599eb514e30f43c2db5dd /drivers
parentfb7f045ace0624f1e59a7db8497e460bd54b1cbc (diff)
parent4bbba111d94781d34081c37856bbc5eb33f6c72a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh-latest
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/block/rbd.c361
-rw-r--r--drivers/dca/dca-core.c6
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--drivers/dma/dw_dmac.c103
-rw-r--r--drivers/dma/dw_dmac_regs.h12
-rw-r--r--drivers/dma/fsldma.c551
-rw-r--r--drivers/dma/fsldma.h6
-rw-r--r--drivers/dma/mxs-dma.c724
-rw-r--r--drivers/dma/pch_dma.c35
-rw-r--r--drivers/dma/ste_dma40.c1402
-rw-r--r--drivers/dma/ste_dma40_ll.c218
-rw-r--r--drivers/dma/ste_dma40_ll.h66
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/sigma.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/hid/hid-picolcd.c1
-rw-r--r--drivers/i2c/busses/Kconfig32
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c535
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c161
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c176
-rw-r--r--drivers/i2c/busses/i2c-pxa.c115
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-bd2802.c47
-rw-r--r--drivers/leds/leds-lm3530.c378
-rw-r--r--drivers/leds/leds-lp5521.c14
-rw-r--r--drivers/leds/leds-lp5523.c20
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/bh1780gli.c19
-rw-r--r--drivers/misc/bmp085.c6
-rw-r--r--drivers/misc/ep93xx_pwm.c6
-rw-r--r--drivers/misc/hmc6352.c4
-rw-r--r--drivers/misc/pch_phub.c1
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c908
-rw-r--r--drivers/mmc/host/omap.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/ftmac100.c2
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/ppp_deflate.c2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/asus_acpi.c1
-rw-r--r--drivers/platform/x86/classmate-laptop.c1
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1
-rw-r--r--drivers/platform/x86/msi-laptop.c1
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/panasonic-laptop.c1
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/resource.c7
-rw-r--r--drivers/pps/generators/pps_gen_parport.c5
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ds1374.c19
-rw-r--r--drivers/rtc/rtc-ds1511.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c176
-rw-r--r--drivers/rtc/rtc-tegra.c488
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c1
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/vhost/net.c159
-rw-r--r--drivers/vhost/vhost.c55
-rw-r--r--drivers/video/atmel_lcdfb.c1
-rw-r--r--drivers/video/aty/aty128fb.c1
-rw-r--r--drivers/video/aty/atyfb_base.c1
-rw-r--r--drivers/video/aty/radeon_backlight.c1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/Kconfig18
-rw-r--r--drivers/video/backlight/Makefile3
-rw-r--r--drivers/video/backlight/adp5520_bl.c1
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adx_bl.c1
-rw-r--r--drivers/video/backlight/apple_bl.c241
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c1
-rw-r--r--drivers/video/backlight/backlight.c24
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/cr_bllcd.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c5
-rw-r--r--drivers/video/backlight/jornada720_lcd.c4
-rw-r--r--drivers/video/backlight/kb3886_bl.c1
-rw-r--r--drivers/video/backlight/ld9040.c819
-rw-r--r--drivers/video/backlight/ld9040_gamma.h200
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c400
-rw-r--r--drivers/video/backlight/omap1_bl.c1
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c12
-rw-r--r--drivers/video/backlight/s6e63m0.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c1
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c1
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/nvidia/nv_backlight.c1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/omap2/displays/panel-taal.c2
-rw-r--r--drivers/video/riva/fbdev.c1
-rw-r--r--drivers/video/via/viafbdev.h3
138 files changed, 7178 insertions, 2085 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 90f8f7676d1f..a18e497f1c3c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -782,6 +782,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
782 782
783 if (acpi_video_backlight_support()) { 783 if (acpi_video_backlight_support()) {
784 struct backlight_properties props; 784 struct backlight_properties props;
785 struct pci_dev *pdev;
786 acpi_handle acpi_parent;
787 struct device *parent = NULL;
785 int result; 788 int result;
786 static int count = 0; 789 static int count = 0;
787 char *name; 790 char *name;
@@ -794,9 +797,20 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
794 return; 797 return;
795 count++; 798 count++;
796 799
800 acpi_get_parent(device->dev->handle, &acpi_parent);
801
802 pdev = acpi_get_pci_dev(acpi_parent);
803 if (pdev) {
804 parent = &pdev->dev;
805 pci_dev_put(pdev);
806 }
807
797 memset(&props, 0, sizeof(struct backlight_properties)); 808 memset(&props, 0, sizeof(struct backlight_properties));
809 props.type = BACKLIGHT_FIRMWARE;
798 props.max_brightness = device->brightness->count - 3; 810 props.max_brightness = device->brightness->count - 3;
799 device->backlight = backlight_device_register(name, NULL, device, 811 device->backlight = backlight_device_register(name,
812 parent,
813 device,
800 &acpi_backlight_ops, 814 &acpi_backlight_ops,
801 &props); 815 &props);
802 kfree(name); 816 kfree(name);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e1e38b11f48a..16dc3645291c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -31,6 +31,7 @@
31#include <linux/ceph/osd_client.h> 31#include <linux/ceph/osd_client.h>
32#include <linux/ceph/mon_client.h> 32#include <linux/ceph/mon_client.h>
33#include <linux/ceph/decode.h> 33#include <linux/ceph/decode.h>
34#include <linux/parser.h>
34 35
35#include <linux/kernel.h> 36#include <linux/kernel.h>
36#include <linux/device.h> 37#include <linux/device.h>
@@ -54,6 +55,8 @@
54 55
55#define DEV_NAME_LEN 32 56#define DEV_NAME_LEN 32
56 57
58#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
59
57/* 60/*
58 * block device image metadata (in-memory version) 61 * block device image metadata (in-memory version)
59 */ 62 */
@@ -71,6 +74,12 @@ struct rbd_image_header {
71 74
72 char *snap_names; 75 char *snap_names;
73 u64 *snap_sizes; 76 u64 *snap_sizes;
77
78 u64 obj_version;
79};
80
81struct rbd_options {
82 int notify_timeout;
74}; 83};
75 84
76/* 85/*
@@ -78,6 +87,7 @@ struct rbd_image_header {
78 */ 87 */
79struct rbd_client { 88struct rbd_client {
80 struct ceph_client *client; 89 struct ceph_client *client;
90 struct rbd_options *rbd_opts;
81 struct kref kref; 91 struct kref kref;
82 struct list_head node; 92 struct list_head node;
83}; 93};
@@ -124,6 +134,9 @@ struct rbd_device {
124 char pool_name[RBD_MAX_POOL_NAME_LEN]; 134 char pool_name[RBD_MAX_POOL_NAME_LEN];
125 int poolid; 135 int poolid;
126 136
137 struct ceph_osd_event *watch_event;
138 struct ceph_osd_request *watch_request;
139
127 char snap_name[RBD_MAX_SNAP_NAME_LEN]; 140 char snap_name[RBD_MAX_SNAP_NAME_LEN];
128 u32 cur_snap; /* index+1 of current snapshot within snap context 141 u32 cur_snap; /* index+1 of current snapshot within snap context
129 0 - for the head */ 142 0 - for the head */
@@ -177,6 +190,8 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
177 put_device(&rbd_dev->dev); 190 put_device(&rbd_dev->dev);
178} 191}
179 192
193static int __rbd_update_snaps(struct rbd_device *rbd_dev);
194
180static int rbd_open(struct block_device *bdev, fmode_t mode) 195static int rbd_open(struct block_device *bdev, fmode_t mode)
181{ 196{
182 struct gendisk *disk = bdev->bd_disk; 197 struct gendisk *disk = bdev->bd_disk;
@@ -211,7 +226,8 @@ static const struct block_device_operations rbd_bd_ops = {
211 * Initialize an rbd client instance. 226 * Initialize an rbd client instance.
212 * We own *opt. 227 * We own *opt.
213 */ 228 */
214static struct rbd_client *rbd_client_create(struct ceph_options *opt) 229static struct rbd_client *rbd_client_create(struct ceph_options *opt,
230 struct rbd_options *rbd_opts)
215{ 231{
216 struct rbd_client *rbdc; 232 struct rbd_client *rbdc;
217 int ret = -ENOMEM; 233 int ret = -ENOMEM;
@@ -233,6 +249,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt)
233 if (ret < 0) 249 if (ret < 0)
234 goto out_err; 250 goto out_err;
235 251
252 rbdc->rbd_opts = rbd_opts;
253
236 spin_lock(&node_lock); 254 spin_lock(&node_lock);
237 list_add_tail(&rbdc->node, &rbd_client_list); 255 list_add_tail(&rbdc->node, &rbd_client_list);
238 spin_unlock(&node_lock); 256 spin_unlock(&node_lock);
@@ -267,6 +285,59 @@ static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
267} 285}
268 286
269/* 287/*
288 * mount options
289 */
290enum {
291 Opt_notify_timeout,
292 Opt_last_int,
293 /* int args above */
294 Opt_last_string,
295 /* string args above */
296};
297
298static match_table_t rbdopt_tokens = {
299 {Opt_notify_timeout, "notify_timeout=%d"},
300 /* int args above */
301 /* string args above */
302 {-1, NULL}
303};
304
305static int parse_rbd_opts_token(char *c, void *private)
306{
307 struct rbd_options *rbdopt = private;
308 substring_t argstr[MAX_OPT_ARGS];
309 int token, intval, ret;
310
311 token = match_token((char *)c, rbdopt_tokens, argstr);
312 if (token < 0)
313 return -EINVAL;
314
315 if (token < Opt_last_int) {
316 ret = match_int(&argstr[0], &intval);
317 if (ret < 0) {
318 pr_err("bad mount option arg (not int) "
319 "at '%s'\n", c);
320 return ret;
321 }
322 dout("got int token %d val %d\n", token, intval);
323 } else if (token > Opt_last_int && token < Opt_last_string) {
324 dout("got string token %d val %s\n", token,
325 argstr[0].from);
326 } else {
327 dout("got token %d\n", token);
328 }
329
330 switch (token) {
331 case Opt_notify_timeout:
332 rbdopt->notify_timeout = intval;
333 break;
334 default:
335 BUG_ON(token);
336 }
337 return 0;
338}
339
340/*
270 * Get a ceph client with specific addr and configuration, if one does 341 * Get a ceph client with specific addr and configuration, if one does
271 * not exist create it. 342 * not exist create it.
272 */ 343 */
@@ -276,11 +347,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
276 struct rbd_client *rbdc; 347 struct rbd_client *rbdc;
277 struct ceph_options *opt; 348 struct ceph_options *opt;
278 int ret; 349 int ret;
350 struct rbd_options *rbd_opts;
351
352 rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
353 if (!rbd_opts)
354 return -ENOMEM;
355
356 rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
279 357
280 ret = ceph_parse_options(&opt, options, mon_addr, 358 ret = ceph_parse_options(&opt, options, mon_addr,
281 mon_addr + strlen(mon_addr), NULL, NULL); 359 mon_addr + strlen(mon_addr), parse_rbd_opts_token, rbd_opts);
282 if (ret < 0) 360 if (ret < 0)
283 return ret; 361 goto done_err;
284 362
285 spin_lock(&node_lock); 363 spin_lock(&node_lock);
286 rbdc = __rbd_client_find(opt); 364 rbdc = __rbd_client_find(opt);
@@ -296,13 +374,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
296 } 374 }
297 spin_unlock(&node_lock); 375 spin_unlock(&node_lock);
298 376
299 rbdc = rbd_client_create(opt); 377 rbdc = rbd_client_create(opt, rbd_opts);
300 if (IS_ERR(rbdc)) 378 if (IS_ERR(rbdc)) {
301 return PTR_ERR(rbdc); 379 ret = PTR_ERR(rbdc);
380 goto done_err;
381 }
302 382
303 rbd_dev->rbd_client = rbdc; 383 rbd_dev->rbd_client = rbdc;
304 rbd_dev->client = rbdc->client; 384 rbd_dev->client = rbdc->client;
305 return 0; 385 return 0;
386done_err:
387 kfree(rbd_opts);
388 return ret;
306} 389}
307 390
308/* 391/*
@@ -318,6 +401,7 @@ static void rbd_client_release(struct kref *kref)
318 spin_unlock(&node_lock); 401 spin_unlock(&node_lock);
319 402
320 ceph_destroy_client(rbdc->client); 403 ceph_destroy_client(rbdc->client);
404 kfree(rbdc->rbd_opts);
321 kfree(rbdc); 405 kfree(rbdc);
322} 406}
323 407
@@ -666,7 +750,9 @@ static int rbd_do_request(struct request *rq,
666 struct ceph_osd_req_op *ops, 750 struct ceph_osd_req_op *ops,
667 int num_reply, 751 int num_reply,
668 void (*rbd_cb)(struct ceph_osd_request *req, 752 void (*rbd_cb)(struct ceph_osd_request *req,
669 struct ceph_msg *msg)) 753 struct ceph_msg *msg),
754 struct ceph_osd_request **linger_req,
755 u64 *ver)
670{ 756{
671 struct ceph_osd_request *req; 757 struct ceph_osd_request *req;
672 struct ceph_file_layout *layout; 758 struct ceph_file_layout *layout;
@@ -729,12 +815,20 @@ static int rbd_do_request(struct request *rq,
729 req->r_oid, req->r_oid_len); 815 req->r_oid, req->r_oid_len);
730 up_read(&header->snap_rwsem); 816 up_read(&header->snap_rwsem);
731 817
818 if (linger_req) {
819 ceph_osdc_set_request_linger(&dev->client->osdc, req);
820 *linger_req = req;
821 }
822
732 ret = ceph_osdc_start_request(&dev->client->osdc, req, false); 823 ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
733 if (ret < 0) 824 if (ret < 0)
734 goto done_err; 825 goto done_err;
735 826
736 if (!rbd_cb) { 827 if (!rbd_cb) {
737 ret = ceph_osdc_wait_request(&dev->client->osdc, req); 828 ret = ceph_osdc_wait_request(&dev->client->osdc, req);
829 if (ver)
830 *ver = le64_to_cpu(req->r_reassert_version.version);
831 dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version));
738 ceph_osdc_put_request(req); 832 ceph_osdc_put_request(req);
739 } 833 }
740 return ret; 834 return ret;
@@ -789,6 +883,11 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
789 kfree(req_data); 883 kfree(req_data);
790} 884}
791 885
886static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
887{
888 ceph_osdc_put_request(req);
889}
890
792/* 891/*
793 * Do a synchronous ceph osd operation 892 * Do a synchronous ceph osd operation
794 */ 893 */
@@ -801,7 +900,9 @@ static int rbd_req_sync_op(struct rbd_device *dev,
801 int num_reply, 900 int num_reply,
802 const char *obj, 901 const char *obj,
803 u64 ofs, u64 len, 902 u64 ofs, u64 len,
804 char *buf) 903 char *buf,
904 struct ceph_osd_request **linger_req,
905 u64 *ver)
805{ 906{
806 int ret; 907 int ret;
807 struct page **pages; 908 struct page **pages;
@@ -833,7 +934,8 @@ static int rbd_req_sync_op(struct rbd_device *dev,
833 flags, 934 flags,
834 ops, 935 ops,
835 2, 936 2,
836 NULL); 937 NULL,
938 linger_req, ver);
837 if (ret < 0) 939 if (ret < 0)
838 goto done_ops; 940 goto done_ops;
839 941
@@ -893,7 +995,7 @@ static int rbd_do_op(struct request *rq,
893 flags, 995 flags,
894 ops, 996 ops,
895 num_reply, 997 num_reply,
896 rbd_req_cb); 998 rbd_req_cb, 0, NULL);
897done: 999done:
898 kfree(seg_name); 1000 kfree(seg_name);
899 return ret; 1001 return ret;
@@ -940,18 +1042,174 @@ static int rbd_req_sync_read(struct rbd_device *dev,
940 u64 snapid, 1042 u64 snapid,
941 const char *obj, 1043 const char *obj,
942 u64 ofs, u64 len, 1044 u64 ofs, u64 len,
943 char *buf) 1045 char *buf,
1046 u64 *ver)
944{ 1047{
945 return rbd_req_sync_op(dev, NULL, 1048 return rbd_req_sync_op(dev, NULL,
946 (snapid ? snapid : CEPH_NOSNAP), 1049 (snapid ? snapid : CEPH_NOSNAP),
947 CEPH_OSD_OP_READ, 1050 CEPH_OSD_OP_READ,
948 CEPH_OSD_FLAG_READ, 1051 CEPH_OSD_FLAG_READ,
949 NULL, 1052 NULL,
950 1, obj, ofs, len, buf); 1053 1, obj, ofs, len, buf, NULL, ver);
951} 1054}
952 1055
953/* 1056/*
954 * Request sync osd read 1057 * Request sync osd watch
1058 */
1059static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1060 u64 ver,
1061 u64 notify_id,
1062 const char *obj)
1063{
1064 struct ceph_osd_req_op *ops;
1065 struct page **pages = NULL;
1066 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1067 if (ret < 0)
1068 return ret;
1069
1070 ops[0].watch.ver = cpu_to_le64(dev->header.obj_version);
1071 ops[0].watch.cookie = notify_id;
1072 ops[0].watch.flag = 0;
1073
1074 ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP,
1075 obj, 0, 0, NULL,
1076 pages, 0,
1077 CEPH_OSD_FLAG_READ,
1078 ops,
1079 1,
1080 rbd_simple_req_cb, 0, NULL);
1081
1082 rbd_destroy_ops(ops);
1083 return ret;
1084}
1085
1086static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1087{
1088 struct rbd_device *dev = (struct rbd_device *)data;
1089 if (!dev)
1090 return;
1091
1092 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1093 notify_id, (int)opcode);
1094 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1095 __rbd_update_snaps(dev);
1096 mutex_unlock(&ctl_mutex);
1097
1098 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
1099}
1100
1101/*
1102 * Request sync osd watch
1103 */
1104static int rbd_req_sync_watch(struct rbd_device *dev,
1105 const char *obj,
1106 u64 ver)
1107{
1108 struct ceph_osd_req_op *ops;
1109 struct ceph_osd_client *osdc = &dev->client->osdc;
1110
1111 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
1112 if (ret < 0)
1113 return ret;
1114
1115 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
1116 (void *)dev, &dev->watch_event);
1117 if (ret < 0)
1118 goto fail;
1119
1120 ops[0].watch.ver = cpu_to_le64(ver);
1121 ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
1122 ops[0].watch.flag = 1;
1123
1124 ret = rbd_req_sync_op(dev, NULL,
1125 CEPH_NOSNAP,
1126 0,
1127 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1128 ops,
1129 1, obj, 0, 0, NULL,
1130 &dev->watch_request, NULL);
1131
1132 if (ret < 0)
1133 goto fail_event;
1134
1135 rbd_destroy_ops(ops);
1136 return 0;
1137
1138fail_event:
1139 ceph_osdc_cancel_event(dev->watch_event);
1140 dev->watch_event = NULL;
1141fail:
1142 rbd_destroy_ops(ops);
1143 return ret;
1144}
1145
1146struct rbd_notify_info {
1147 struct rbd_device *dev;
1148};
1149
1150static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1151{
1152 struct rbd_device *dev = (struct rbd_device *)data;
1153 if (!dev)
1154 return;
1155
1156 dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1157 notify_id, (int)opcode);
1158}
1159
1160/*
1161 * Request sync osd notify
1162 */
1163static int rbd_req_sync_notify(struct rbd_device *dev,
1164 const char *obj)
1165{
1166 struct ceph_osd_req_op *ops;
1167 struct ceph_osd_client *osdc = &dev->client->osdc;
1168 struct ceph_osd_event *event;
1169 struct rbd_notify_info info;
1170 int payload_len = sizeof(u32) + sizeof(u32);
1171 int ret;
1172
1173 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len);
1174 if (ret < 0)
1175 return ret;
1176
1177 info.dev = dev;
1178
1179 ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
1180 (void *)&info, &event);
1181 if (ret < 0)
1182 goto fail;
1183
1184 ops[0].watch.ver = 1;
1185 ops[0].watch.flag = 1;
1186 ops[0].watch.cookie = event->cookie;
1187 ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
1188 ops[0].watch.timeout = 12;
1189
1190 ret = rbd_req_sync_op(dev, NULL,
1191 CEPH_NOSNAP,
1192 0,
1193 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1194 ops,
1195 1, obj, 0, 0, NULL, NULL, NULL);
1196 if (ret < 0)
1197 goto fail_event;
1198
1199 ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT);
1200 dout("ceph_osdc_wait_event returned %d\n", ret);
1201 rbd_destroy_ops(ops);
1202 return 0;
1203
1204fail_event:
1205 ceph_osdc_cancel_event(event);
1206fail:
1207 rbd_destroy_ops(ops);
1208 return ret;
1209}
1210
1211/*
1212 * Request sync osd rollback
955 */ 1213 */
956static int rbd_req_sync_rollback_obj(struct rbd_device *dev, 1214static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
957 u64 snapid, 1215 u64 snapid,
@@ -969,13 +1227,10 @@ static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
969 0, 1227 0,
970 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1228 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
971 ops, 1229 ops,
972 1, obj, 0, 0, NULL); 1230 1, obj, 0, 0, NULL, NULL, NULL);
973 1231
974 rbd_destroy_ops(ops); 1232 rbd_destroy_ops(ops);
975 1233
976 if (ret < 0)
977 return ret;
978
979 return ret; 1234 return ret;
980} 1235}
981 1236
@@ -987,7 +1242,8 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
987 const char *cls, 1242 const char *cls,
988 const char *method, 1243 const char *method,
989 const char *data, 1244 const char *data,
990 int len) 1245 int len,
1246 u64 *ver)
991{ 1247{
992 struct ceph_osd_req_op *ops; 1248 struct ceph_osd_req_op *ops;
993 int cls_len = strlen(cls); 1249 int cls_len = strlen(cls);
@@ -1010,7 +1266,7 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
1010 0, 1266 0,
1011 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1267 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1012 ops, 1268 ops,
1013 1, obj, 0, 0, NULL); 1269 1, obj, 0, 0, NULL, NULL, ver);
1014 1270
1015 rbd_destroy_ops(ops); 1271 rbd_destroy_ops(ops);
1016 1272
@@ -1156,6 +1412,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1156 struct rbd_image_header_ondisk *dh; 1412 struct rbd_image_header_ondisk *dh;
1157 int snap_count = 0; 1413 int snap_count = 0;
1158 u64 snap_names_len = 0; 1414 u64 snap_names_len = 0;
1415 u64 ver;
1159 1416
1160 while (1) { 1417 while (1) {
1161 int len = sizeof(*dh) + 1418 int len = sizeof(*dh) +
@@ -1171,7 +1428,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1171 NULL, CEPH_NOSNAP, 1428 NULL, CEPH_NOSNAP,
1172 rbd_dev->obj_md_name, 1429 rbd_dev->obj_md_name,
1173 0, len, 1430 0, len,
1174 (char *)dh); 1431 (char *)dh, &ver);
1175 if (rc < 0) 1432 if (rc < 0)
1176 goto out_dh; 1433 goto out_dh;
1177 1434
@@ -1188,6 +1445,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1188 } 1445 }
1189 break; 1446 break;
1190 } 1447 }
1448 header->obj_version = ver;
1191 1449
1192out_dh: 1450out_dh:
1193 kfree(dh); 1451 kfree(dh);
@@ -1205,6 +1463,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1205 u64 new_snapid; 1463 u64 new_snapid;
1206 int ret; 1464 int ret;
1207 void *data, *data_start, *data_end; 1465 void *data, *data_start, *data_end;
1466 u64 ver;
1208 1467
1209 /* we should create a snapshot only if we're pointing at the head */ 1468 /* we should create a snapshot only if we're pointing at the head */
1210 if (dev->cur_snap) 1469 if (dev->cur_snap)
@@ -1227,7 +1486,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1227 ceph_encode_64_safe(&data, data_end, new_snapid, bad); 1486 ceph_encode_64_safe(&data, data_end, new_snapid, bad);
1228 1487
1229 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", 1488 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
1230 data_start, data - data_start); 1489 data_start, data - data_start, &ver);
1231 1490
1232 kfree(data_start); 1491 kfree(data_start);
1233 1492
@@ -1259,6 +1518,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1259 int ret; 1518 int ret;
1260 struct rbd_image_header h; 1519 struct rbd_image_header h;
1261 u64 snap_seq; 1520 u64 snap_seq;
1521 int follow_seq = 0;
1262 1522
1263 ret = rbd_read_header(rbd_dev, &h); 1523 ret = rbd_read_header(rbd_dev, &h);
1264 if (ret < 0) 1524 if (ret < 0)
@@ -1267,6 +1527,11 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1267 down_write(&rbd_dev->header.snap_rwsem); 1527 down_write(&rbd_dev->header.snap_rwsem);
1268 1528
1269 snap_seq = rbd_dev->header.snapc->seq; 1529 snap_seq = rbd_dev->header.snapc->seq;
1530 if (rbd_dev->header.total_snaps &&
1531 rbd_dev->header.snapc->snaps[0] == snap_seq)
1532 /* pointing at the head, will need to follow that
1533 if head moves */
1534 follow_seq = 1;
1270 1535
1271 kfree(rbd_dev->header.snapc); 1536 kfree(rbd_dev->header.snapc);
1272 kfree(rbd_dev->header.snap_names); 1537 kfree(rbd_dev->header.snap_names);
@@ -1277,7 +1542,10 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1277 rbd_dev->header.snap_names = h.snap_names; 1542 rbd_dev->header.snap_names = h.snap_names;
1278 rbd_dev->header.snap_names_len = h.snap_names_len; 1543 rbd_dev->header.snap_names_len = h.snap_names_len;
1279 rbd_dev->header.snap_sizes = h.snap_sizes; 1544 rbd_dev->header.snap_sizes = h.snap_sizes;
1280 rbd_dev->header.snapc->seq = snap_seq; 1545 if (follow_seq)
1546 rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0];
1547 else
1548 rbd_dev->header.snapc->seq = snap_seq;
1281 1549
1282 ret = __rbd_init_snaps_header(rbd_dev); 1550 ret = __rbd_init_snaps_header(rbd_dev);
1283 1551
@@ -1699,7 +1967,28 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
1699 device_unregister(&rbd_dev->dev); 1967 device_unregister(&rbd_dev->dev);
1700} 1968}
1701 1969
1702static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) 1970static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
1971{
1972 int ret, rc;
1973
1974 do {
1975 ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name,
1976 rbd_dev->header.obj_version);
1977 if (ret == -ERANGE) {
1978 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1979 rc = __rbd_update_snaps(rbd_dev);
1980 mutex_unlock(&ctl_mutex);
1981 if (rc < 0)
1982 return rc;
1983 }
1984 } while (ret == -ERANGE);
1985
1986 return ret;
1987}
1988
1989static ssize_t rbd_add(struct bus_type *bus,
1990 const char *buf,
1991 size_t count)
1703{ 1992{
1704 struct ceph_osd_client *osdc; 1993 struct ceph_osd_client *osdc;
1705 struct rbd_device *rbd_dev; 1994 struct rbd_device *rbd_dev;
@@ -1797,6 +2086,10 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
1797 if (rc) 2086 if (rc)
1798 goto err_out_bus; 2087 goto err_out_bus;
1799 2088
2089 rc = rbd_init_watch_dev(rbd_dev);
2090 if (rc)
2091 goto err_out_bus;
2092
1800 return count; 2093 return count;
1801 2094
1802err_out_bus: 2095err_out_bus:
@@ -1849,6 +2142,12 @@ static void rbd_dev_release(struct device *dev)
1849 struct rbd_device *rbd_dev = 2142 struct rbd_device *rbd_dev =
1850 container_of(dev, struct rbd_device, dev); 2143 container_of(dev, struct rbd_device, dev);
1851 2144
2145 if (rbd_dev->watch_request)
2146 ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
2147 rbd_dev->watch_request);
2148 if (rbd_dev->watch_event)
2149 ceph_osdc_cancel_event(rbd_dev->watch_event);
2150
1852 rbd_put_client(rbd_dev); 2151 rbd_put_client(rbd_dev);
1853 2152
1854 /* clean up and free blkdev */ 2153 /* clean up and free blkdev */
@@ -1914,14 +2213,24 @@ static ssize_t rbd_snap_add(struct device *dev,
1914 ret = rbd_header_add_snap(rbd_dev, 2213 ret = rbd_header_add_snap(rbd_dev,
1915 name, GFP_KERNEL); 2214 name, GFP_KERNEL);
1916 if (ret < 0) 2215 if (ret < 0)
1917 goto done_unlock; 2216 goto err_unlock;
1918 2217
1919 ret = __rbd_update_snaps(rbd_dev); 2218 ret = __rbd_update_snaps(rbd_dev);
1920 if (ret < 0) 2219 if (ret < 0)
1921 goto done_unlock; 2220 goto err_unlock;
2221
2222 /* shouldn't hold ctl_mutex when notifying.. notify might
2223 trigger a watch callback that would need to get that mutex */
2224 mutex_unlock(&ctl_mutex);
2225
2226 /* make a best effort, don't error if failed */
2227 rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name);
1922 2228
1923 ret = count; 2229 ret = count;
1924done_unlock: 2230 kfree(name);
2231 return ret;
2232
2233err_unlock:
1925 mutex_unlock(&ctl_mutex); 2234 mutex_unlock(&ctl_mutex);
1926 kfree(name); 2235 kfree(name);
1927 return ret; 2236 return ret;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index c461eda62411..4abd089a094f 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -111,10 +111,8 @@ static void unregister_dca_providers(void)
111 /* at this point only one domain in the list is expected */ 111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node); 112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 113
114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { 114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 list_del(&dca->node); 115 list_move(&dca->node, &unregistered_providers);
116 list_add(&dca->node, &unregistered_providers);
117 }
118 116
119 dca_free_domain(domain); 117 dca_free_domain(domain);
120 118
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1c28816152fa..a572600e44eb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA
82 82
83config DW_DMAC 83config DW_DMAC
84 tristate "Synopsys DesignWare AHB DMA support" 84 tristate "Synopsys DesignWare AHB DMA support"
85 depends on AVR32 85 depends on HAVE_CLK
86 select DMA_ENGINE 86 select DMA_ENGINE
87 default y if CPU_AT32AP7000 87 default y if CPU_AT32AP7000
88 help 88 help
@@ -221,12 +221,20 @@ config IMX_SDMA
221 221
222config IMX_DMA 222config IMX_DMA
223 tristate "i.MX DMA support" 223 tristate "i.MX DMA support"
224 depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 224 depends on IMX_HAVE_DMA_V1
225 select DMA_ENGINE 225 select DMA_ENGINE
226 help 226 help
227 Support the i.MX DMA engine. This engine is integrated into 227 Support the i.MX DMA engine. This engine is integrated into
228 Freescale i.MX1/21/27 chips. 228 Freescale i.MX1/21/27 chips.
229 229
230config MXS_DMA
231 bool "MXS DMA support"
232 depends on SOC_IMX23 || SOC_IMX28
233 select DMA_ENGINE
234 help
235 Support the MXS DMA engine. This engine including APBH-DMA
236 and APBX-DMA is integrated into Freescale i.MX23/28 chips.
237
230config DMA_ENGINE 238config DMA_ENGINE
231 bool 239 bool
232 240
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 1be065a62f8c..836095ab3c5c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
19obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 19obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
20obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 20obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
21obj-$(CONFIG_IMX_DMA) += imx-dma.o 21obj-$(CONFIG_IMX_DMA) += imx-dma.o
22obj-$(CONFIG_MXS_DMA) += mxs-dma.o
22obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
23obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
24obj-$(CONFIG_PL330_DMA) += pl330.o 25obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 5589358b684d..e0888cb538d4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO);
54MODULE_PARM_DESC(pq_sources, 54MODULE_PARM_DESC(pq_sources,
55 "Number of p+q source buffers (default: 3)"); 55 "Number of p+q source buffers (default: 3)");
56 56
57static int timeout = 3000;
58module_param(timeout, uint, S_IRUGO);
59MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \
60 Pass -1 for infinite timeout");
61
57/* 62/*
58 * Initialization patterns. All bytes in the source buffer has bit 7 63 * Initialization patterns. All bytes in the source buffer has bit 7
59 * set, all bytes in the destination buffer has bit 7 cleared. 64 * set, all bytes in the destination buffer has bit 7 cleared.
@@ -285,7 +290,12 @@ static int dmatest_func(void *data)
285 290
286 set_user_nice(current, 10); 291 set_user_nice(current, 10);
287 292
288 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; 293 /*
294 * src buffers are freed by the DMAEngine code with dma_unmap_single()
295 * dst buffers are freed by ourselves below
296 */
297 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
298 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
289 299
290 while (!kthread_should_stop() 300 while (!kthread_should_stop()
291 && !(iterations && total_tests >= iterations)) { 301 && !(iterations && total_tests >= iterations)) {
@@ -294,7 +304,7 @@ static int dmatest_func(void *data)
294 dma_addr_t dma_srcs[src_cnt]; 304 dma_addr_t dma_srcs[src_cnt];
295 dma_addr_t dma_dsts[dst_cnt]; 305 dma_addr_t dma_dsts[dst_cnt];
296 struct completion cmp; 306 struct completion cmp;
297 unsigned long tmo = msecs_to_jiffies(3000); 307 unsigned long tmo = msecs_to_jiffies(timeout);
298 u8 align = 0; 308 u8 align = 0;
299 309
300 total_tests++; 310 total_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a3991ab0d67e..9c25c7d099e4 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -32,26 +32,30 @@
32 * which does not support descriptor writeback. 32 * which does not support descriptor writeback.
33 */ 33 */
34 34
35/* NOTE: DMS+SMS is system-specific. We should get this information 35#define DWC_DEFAULT_CTLLO(private) ({ \
36 * from the platform code somehow. 36 struct dw_dma_slave *__slave = (private); \
37 */ 37 int dms = __slave ? __slave->dst_master : 0; \
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ 38 int sms = __slave ? __slave->src_master : 1; \
39 | DWC_CTLL_SRC_MSIZE(0) \ 39 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
40 | DWC_CTLL_DMS(0) \ 40 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
41 | DWC_CTLL_SMS(1) \ 41 \
42 | DWC_CTLL_LLP_D_EN \ 42 (DWC_CTLL_DST_MSIZE(dmsize) \
43 | DWC_CTLL_LLP_S_EN) 43 | DWC_CTLL_SRC_MSIZE(smsize) \
44 | DWC_CTLL_LLP_D_EN \
45 | DWC_CTLL_LLP_S_EN \
46 | DWC_CTLL_DMS(dms) \
47 | DWC_CTLL_SMS(sms)); \
48 })
44 49
45/* 50/*
46 * This is configuration-dependent and usually a funny size like 4095. 51 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 * 52 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit 53 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor. 54 * words, we can do 16380 bytes per descriptor.
51 * 55 *
52 * This parameter is also system-specific. 56 * This parameter is also system-specific.
53 */ 57 */
54#define DWC_MAX_COUNT 2048U 58#define DWC_MAX_COUNT 4095U
55 59
56/* 60/*
57 * Number of descriptors to allocate for each channel. This should be 61 * Number of descriptors to allocate for each channel. This should be
@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 88 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
85} 89}
86 90
87static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
88{
89 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
90}
91
92static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 91static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93{ 92{
94 struct dw_desc *desc, *_desc; 93 struct dw_desc *desc, *_desc;
@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
201 dma_async_tx_callback callback; 200 dma_async_tx_callback callback;
202 void *param; 201 void *param;
203 struct dma_async_tx_descriptor *txd = &desc->txd; 202 struct dma_async_tx_descriptor *txd = &desc->txd;
203 struct dw_desc *child;
204 204
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
206 206
@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
209 param = txd->callback_param; 209 param = txd->callback_param;
210 210
211 dwc_sync_desc_for_cpu(dwc, desc); 211 dwc_sync_desc_for_cpu(dwc, desc);
212
213 /* async_tx_ack */
214 list_for_each_entry(child, &desc->tx_list, desc_node)
215 async_tx_ack(&child->txd);
216 async_tx_ack(&desc->txd);
217
212 list_splice_init(&desc->tx_list, &dwc->free_list); 218 list_splice_init(&desc->tx_list, &dwc->free_list);
213 list_move(&desc->desc_node, &dwc->free_list); 219 list_move(&desc->desc_node, &dwc->free_list);
214 220
@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
259 * Submit queued descriptors ASAP, i.e. before we go through 265 * Submit queued descriptors ASAP, i.e. before we go through
260 * the completed ones. 266 * the completed ones.
261 */ 267 */
262 if (!list_empty(&dwc->queue))
263 dwc_dostart(dwc, dwc_first_queued(dwc));
264 list_splice_init(&dwc->active_list, &list); 268 list_splice_init(&dwc->active_list, &list);
265 list_splice_init(&dwc->queue, &dwc->active_list); 269 if (!list_empty(&dwc->queue)) {
270 list_move(dwc->queue.next, &dwc->active_list);
271 dwc_dostart(dwc, dwc_first_active(dwc));
272 }
266 273
267 list_for_each_entry_safe(desc, _desc, &list, desc_node) 274 list_for_each_entry_safe(desc, _desc, &list, desc_node)
268 dwc_descriptor_complete(dwc, desc); 275 dwc_descriptor_complete(dwc, desc);
@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
291 return; 298 return;
292 } 299 }
293 300
301 if (list_empty(&dwc->active_list))
302 return;
303
294 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 304 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
295 305
296 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 306 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
319 cpu_relax(); 329 cpu_relax();
320 330
321 if (!list_empty(&dwc->queue)) { 331 if (!list_empty(&dwc->queue)) {
322 dwc_dostart(dwc, dwc_first_queued(dwc)); 332 list_move(dwc->queue.next, &dwc->active_list);
323 list_splice_init(&dwc->queue, &dwc->active_list); 333 dwc_dostart(dwc, dwc_first_active(dwc));
324 } 334 }
325} 335}
326 336
@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
346 */ 356 */
347 bad_desc = dwc_first_active(dwc); 357 bad_desc = dwc_first_active(dwc);
348 list_del_init(&bad_desc->desc_node); 358 list_del_init(&bad_desc->desc_node);
349 list_splice_init(&dwc->queue, dwc->active_list.prev); 359 list_move(dwc->queue.next, dwc->active_list.prev);
350 360
351 /* Clear the error flag and try to restart the controller */ 361 /* Clear the error flag and try to restart the controller */
352 dma_writel(dw, CLEAR.ERROR, dwc->mask); 362 dma_writel(dw, CLEAR.ERROR, dwc->mask);
@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
541 if (list_empty(&dwc->active_list)) { 551 if (list_empty(&dwc->active_list)) {
542 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 552 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
543 desc->txd.cookie); 553 desc->txd.cookie);
544 dwc_dostart(dwc, desc);
545 list_add_tail(&desc->desc_node, &dwc->active_list); 554 list_add_tail(&desc->desc_node, &dwc->active_list);
555 dwc_dostart(dwc, dwc_first_active(dwc));
546 } else { 556 } else {
547 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 557 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
548 desc->txd.cookie); 558 desc->txd.cookie);
@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
581 * We can be a lot more clever here, but this should take care 591 * We can be a lot more clever here, but this should take care
582 * of the most common optimization. 592 * of the most common optimization.
583 */ 593 */
584 if (!((src | dest | len) & 3)) 594 if (!((src | dest | len) & 7))
595 src_width = dst_width = 3;
596 else if (!((src | dest | len) & 3))
585 src_width = dst_width = 2; 597 src_width = dst_width = 2;
586 else if (!((src | dest | len) & 1)) 598 else if (!((src | dest | len) & 1))
587 src_width = dst_width = 1; 599 src_width = dst_width = 1;
588 else 600 else
589 src_width = dst_width = 0; 601 src_width = dst_width = 0;
590 602
591 ctllo = DWC_DEFAULT_CTLLO 603 ctllo = DWC_DEFAULT_CTLLO(chan->private)
592 | DWC_CTLL_DST_WIDTH(dst_width) 604 | DWC_CTLL_DST_WIDTH(dst_width)
593 | DWC_CTLL_SRC_WIDTH(src_width) 605 | DWC_CTLL_SRC_WIDTH(src_width)
594 | DWC_CTLL_DST_INC 606 | DWC_CTLL_DST_INC
@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
669 681
670 switch (direction) { 682 switch (direction) {
671 case DMA_TO_DEVICE: 683 case DMA_TO_DEVICE:
672 ctllo = (DWC_DEFAULT_CTLLO 684 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
673 | DWC_CTLL_DST_WIDTH(reg_width) 685 | DWC_CTLL_DST_WIDTH(reg_width)
674 | DWC_CTLL_DST_FIX 686 | DWC_CTLL_DST_FIX
675 | DWC_CTLL_SRC_INC 687 | DWC_CTLL_SRC_INC
676 | DWC_CTLL_FC_M2P); 688 | DWC_CTLL_FC(dws->fc));
677 reg = dws->tx_reg; 689 reg = dws->tx_reg;
678 for_each_sg(sgl, sg, sg_len, i) { 690 for_each_sg(sgl, sg, sg_len, i) {
679 struct dw_desc *desc; 691 struct dw_desc *desc;
@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
714 } 726 }
715 break; 727 break;
716 case DMA_FROM_DEVICE: 728 case DMA_FROM_DEVICE:
717 ctllo = (DWC_DEFAULT_CTLLO 729 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
718 | DWC_CTLL_SRC_WIDTH(reg_width) 730 | DWC_CTLL_SRC_WIDTH(reg_width)
719 | DWC_CTLL_DST_INC 731 | DWC_CTLL_DST_INC
720 | DWC_CTLL_SRC_FIX 732 | DWC_CTLL_SRC_FIX
721 | DWC_CTLL_FC_P2M); 733 | DWC_CTLL_FC(dws->fc));
722 734
723 reg = dws->rx_reg; 735 reg = dws->rx_reg;
724 for_each_sg(sgl, sg, sg_len, i) { 736 for_each_sg(sgl, sg, sg_len, i) {
@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan,
834 846
835 ret = dma_async_is_complete(cookie, last_complete, last_used); 847 ret = dma_async_is_complete(cookie, last_complete, last_used);
836 if (ret != DMA_SUCCESS) { 848 if (ret != DMA_SUCCESS) {
849 spin_lock_bh(&dwc->lock);
837 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 850 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
851 spin_unlock_bh(&dwc->lock);
838 852
839 last_complete = dwc->completed; 853 last_complete = dwc->completed;
840 last_used = chan->cookie; 854 last_used = chan->cookie;
@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
889 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 903 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
890 904
891 cfghi = dws->cfg_hi; 905 cfghi = dws->cfg_hi;
892 cfglo = dws->cfg_lo; 906 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
893 } 907 }
908
909 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
910
894 channel_writel(dwc, CFG_LO, cfglo); 911 channel_writel(dwc, CFG_LO, cfglo);
895 channel_writel(dwc, CFG_HI, cfghi); 912 channel_writel(dwc, CFG_HI, cfghi);
896 913
@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1126 case DMA_TO_DEVICE: 1143 case DMA_TO_DEVICE:
1127 desc->lli.dar = dws->tx_reg; 1144 desc->lli.dar = dws->tx_reg;
1128 desc->lli.sar = buf_addr + (period_len * i); 1145 desc->lli.sar = buf_addr + (period_len * i);
1129 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1146 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1130 | DWC_CTLL_DST_WIDTH(reg_width) 1147 | DWC_CTLL_DST_WIDTH(reg_width)
1131 | DWC_CTLL_SRC_WIDTH(reg_width) 1148 | DWC_CTLL_SRC_WIDTH(reg_width)
1132 | DWC_CTLL_DST_FIX 1149 | DWC_CTLL_DST_FIX
1133 | DWC_CTLL_SRC_INC 1150 | DWC_CTLL_SRC_INC
1134 | DWC_CTLL_FC_M2P 1151 | DWC_CTLL_FC(dws->fc)
1135 | DWC_CTLL_INT_EN); 1152 | DWC_CTLL_INT_EN);
1136 break; 1153 break;
1137 case DMA_FROM_DEVICE: 1154 case DMA_FROM_DEVICE:
1138 desc->lli.dar = buf_addr + (period_len * i); 1155 desc->lli.dar = buf_addr + (period_len * i);
1139 desc->lli.sar = dws->rx_reg; 1156 desc->lli.sar = dws->rx_reg;
1140 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1157 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1141 | DWC_CTLL_SRC_WIDTH(reg_width) 1158 | DWC_CTLL_SRC_WIDTH(reg_width)
1142 | DWC_CTLL_DST_WIDTH(reg_width) 1159 | DWC_CTLL_DST_WIDTH(reg_width)
1143 | DWC_CTLL_DST_INC 1160 | DWC_CTLL_DST_INC
1144 | DWC_CTLL_SRC_FIX 1161 | DWC_CTLL_SRC_FIX
1145 | DWC_CTLL_FC_P2M 1162 | DWC_CTLL_FC(dws->fc)
1146 | DWC_CTLL_INT_EN); 1163 | DWC_CTLL_INT_EN);
1147 break; 1164 break;
1148 default: 1165 default:
@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev)
1307 dwc->chan.device = &dw->dma; 1324 dwc->chan.device = &dw->dma;
1308 dwc->chan.cookie = dwc->completed = 1; 1325 dwc->chan.cookie = dwc->completed = 1;
1309 dwc->chan.chan_id = i; 1326 dwc->chan.chan_id = i;
1310 list_add_tail(&dwc->chan.device_node, &dw->dma.channels); 1327 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1328 list_add_tail(&dwc->chan.device_node,
1329 &dw->dma.channels);
1330 else
1331 list_add(&dwc->chan.device_node, &dw->dma.channels);
1332
1333 /* 7 is highest priority & 0 is lowest. */
1334 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1335 dwc->priority = 7 - i;
1336 else
1337 dwc->priority = i;
1311 1338
1312 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1339 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1313 spin_lock_init(&dwc->lock); 1340 spin_lock_init(&dwc->lock);
@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev)
1335 1362
1336 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1363 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1337 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1364 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1365 if (pdata->is_private)
1366 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1338 dw->dma.dev = &pdev->dev; 1367 dw->dma.dev = &pdev->dev;
1339 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1368 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1340 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1369 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1447,7 +1476,7 @@ static int __init dw_init(void)
1447{ 1476{
1448 return platform_driver_probe(&dw_driver, dw_probe); 1477 return platform_driver_probe(&dw_driver, dw_probe);
1449} 1478}
1450module_init(dw_init); 1479subsys_initcall(dw_init);
1451 1480
1452static void __exit dw_exit(void) 1481static void __exit dw_exit(void)
1453{ 1482{
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index d9a939f67f46..720f821527f8 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -86,6 +86,7 @@ struct dw_dma_regs {
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) 86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ 87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ 88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC(n) ((n) << 20)
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ 90#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ 91#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ 92#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
@@ -101,6 +102,8 @@ struct dw_dma_regs {
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff 102#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102 103
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ 104/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
105#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
106#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ 107#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ 108#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ 109#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
@@ -134,6 +137,7 @@ struct dw_dma_chan {
134 struct dma_chan chan; 137 struct dma_chan chan;
135 void __iomem *ch_regs; 138 void __iomem *ch_regs;
136 u8 mask; 139 u8 mask;
140 u8 priority;
137 141
138 spinlock_t lock; 142 spinlock_t lock;
139 143
@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
155} 159}
156 160
157#define channel_readl(dwc, name) \ 161#define channel_readl(dwc, name) \
158 __raw_readl(&(__dwc_regs(dwc)->name)) 162 readl(&(__dwc_regs(dwc)->name))
159#define channel_writel(dwc, name, val) \ 163#define channel_writel(dwc, name, val) \
160 __raw_writel((val), &(__dwc_regs(dwc)->name)) 164 writel((val), &(__dwc_regs(dwc)->name))
161 165
162static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 166static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
163{ 167{
@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
181} 185}
182 186
183#define dma_readl(dw, name) \ 187#define dma_readl(dw, name) \
184 __raw_readl(&(__dw_regs(dw)->name)) 188 readl(&(__dw_regs(dw)->name))
185#define dma_writel(dw, name, val) \ 189#define dma_writel(dw, name, val) \
186 __raw_writel((val), &(__dw_regs(dw)->name)) 190 writel((val), &(__dw_regs(dw)->name))
187 191
188#define channel_set_bit(dw, reg, mask) \ 192#define channel_set_bit(dw, reg, mask) \
189 dma_writel(dw, reg, ((mask) << 8) | (mask)) 193 dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e3854a8f0de0..6b396759e7f5 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,35 +37,16 @@
37 37
38#include "fsldma.h" 38#include "fsldma.h"
39 39
40static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
41 44
42static void dma_init(struct fsldma_chan *chan) 45static const char msg_ld_oom[] = "No free memory for link descriptor";
43{
44 /* Reset the channel */
45 DMA_OUT(chan, &chan->regs->mr, 0, 32);
46 46
47 switch (chan->feature & FSL_DMA_IP_MASK) { 47/*
48 case FSL_DMA_IP_85XX: 48 * Register Helpers
49 /* Set the channel to below modes: 49 */
50 * EIE - Error interrupt enable
51 * EOSIE - End of segments interrupt enable (basic mode)
52 * EOLNIE - End of links interrupt enable
53 * BWC - Bandwidth sharing among channels
54 */
55 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
56 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
57 | FSL_DMA_MR_EOSIE, 32);
58 break;
59 case FSL_DMA_IP_83XX:
60 /* Set the channel to below modes:
61 * EOTIE - End-of-transfer interrupt enable
62 * PRC_RM - PCI read multiple
63 */
64 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
65 | FSL_DMA_MR_PRC_RM, 32);
66 break;
67 }
68}
69 50
70static void set_sr(struct fsldma_chan *chan, u32 val) 51static void set_sr(struct fsldma_chan *chan, u32 val)
71{ 52{
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan)
77 return DMA_IN(chan, &chan->regs->sr, 32); 58 return DMA_IN(chan, &chan->regs->sr, 32);
78} 59}
79 60
61static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62{
63 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64}
65
66static dma_addr_t get_cdar(struct fsldma_chan *chan)
67{
68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69}
70
71static u32 get_bcr(struct fsldma_chan *chan)
72{
73 return DMA_IN(chan, &chan->regs->bcr, 32);
74}
75
76/*
77 * Descriptor Helpers
78 */
79
80static void set_desc_cnt(struct fsldma_chan *chan, 80static void set_desc_cnt(struct fsldma_chan *chan,
81 struct fsl_dma_ld_hw *hw, u32 count) 81 struct fsl_dma_ld_hw *hw, u32 count)
82{ 82{
83 hw->count = CPU_TO_DMA(chan, count, 32); 83 hw->count = CPU_TO_DMA(chan, count, 32);
84} 84}
85 85
86static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87{
88 return DMA_TO_CPU(chan, desc->hw.count, 32);
89}
90
86static void set_desc_src(struct fsldma_chan *chan, 91static void set_desc_src(struct fsldma_chan *chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t src) 92 struct fsl_dma_ld_hw *hw, dma_addr_t src)
88{ 93{
89 u64 snoop_bits; 94 u64 snoop_bits;
90 95
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan,
93 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 98 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
94} 99}
95 100
101static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102 struct fsl_desc_sw *desc)
103{
104 u64 snoop_bits;
105
106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109}
110
96static void set_desc_dst(struct fsldma_chan *chan, 111static void set_desc_dst(struct fsldma_chan *chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 112 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
98{ 113{
99 u64 snoop_bits; 114 u64 snoop_bits;
100 115
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan,
103 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 118 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
104} 119}
105 120
121static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
123{
124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129}
130
106static void set_desc_next(struct fsldma_chan *chan, 131static void set_desc_next(struct fsldma_chan *chan,
107 struct fsl_dma_ld_hw *hw, dma_addr_t next) 132 struct fsl_dma_ld_hw *hw, dma_addr_t next)
108{ 133{
109 u64 snoop_bits; 134 u64 snoop_bits;
110 135
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan,
113 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 138 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
114} 139}
115 140
116static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 141static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
117{ 142{
118 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 143 u64 snoop_bits;
119}
120 144
121static dma_addr_t get_cdar(struct fsldma_chan *chan) 145 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
122{ 146 ? FSL_DMA_SNEN : 0;
123 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
124}
125 147
126static dma_addr_t get_ndar(struct fsldma_chan *chan) 148 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
127{ 149 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
128 return DMA_IN(chan, &chan->regs->ndar, 64); 150 | snoop_bits, 64);
129} 151}
130 152
131static u32 get_bcr(struct fsldma_chan *chan) 153/*
154 * DMA Engine Hardware Control Helpers
155 */
156
157static void dma_init(struct fsldma_chan *chan)
132{ 158{
133 return DMA_IN(chan, &chan->regs->bcr, 32); 159 /* Reset the channel */
160 DMA_OUT(chan, &chan->regs->mr, 0, 32);
161
162 switch (chan->feature & FSL_DMA_IP_MASK) {
163 case FSL_DMA_IP_85XX:
164 /* Set the channel to below modes:
165 * EIE - Error interrupt enable
166 * EOLNIE - End of links interrupt enable
167 * BWC - Bandwidth sharing among channels
168 */
169 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171 break;
172 case FSL_DMA_IP_83XX:
173 /* Set the channel to below modes:
174 * EOTIE - End-of-transfer interrupt enable
175 * PRC_RM - PCI read multiple
176 */
177 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178 | FSL_DMA_MR_PRC_RM, 32);
179 break;
180 }
134} 181}
135 182
136static int dma_is_idle(struct fsldma_chan *chan) 183static int dma_is_idle(struct fsldma_chan *chan)
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan)
139 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 186 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
140} 187}
141 188
189/*
190 * Start the DMA controller
191 *
192 * Preconditions:
193 * - the CDAR register must point to the start descriptor
194 * - the MRn[CS] bit must be cleared
195 */
142static void dma_start(struct fsldma_chan *chan) 196static void dma_start(struct fsldma_chan *chan)
143{ 197{
144 u32 mode; 198 u32 mode;
145 199
146 mode = DMA_IN(chan, &chan->regs->mr, 32); 200 mode = DMA_IN(chan, &chan->regs->mr, 32);
147 201
148 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 202 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
149 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 203 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
150 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 204 mode |= FSL_DMA_MR_EMP_EN;
151 mode |= FSL_DMA_MR_EMP_EN; 205 } else {
152 } else { 206 mode &= ~FSL_DMA_MR_EMP_EN;
153 mode &= ~FSL_DMA_MR_EMP_EN;
154 }
155 } 207 }
156 208
157 if (chan->feature & FSL_DMA_CHAN_START_EXT) 209 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
158 mode |= FSL_DMA_MR_EMS_EN; 210 mode |= FSL_DMA_MR_EMS_EN;
159 else 211 } else {
212 mode &= ~FSL_DMA_MR_EMS_EN;
160 mode |= FSL_DMA_MR_CS; 213 mode |= FSL_DMA_MR_CS;
214 }
161 215
162 DMA_OUT(chan, &chan->regs->mr, mode, 32); 216 DMA_OUT(chan, &chan->regs->mr, mode, 32);
163} 217}
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
167 u32 mode; 221 u32 mode;
168 int i; 222 int i;
169 223
224 /* read the mode register */
170 mode = DMA_IN(chan, &chan->regs->mr, 32); 225 mode = DMA_IN(chan, &chan->regs->mr, 32);
171 mode |= FSL_DMA_MR_CA;
172 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173 226
174 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 227 /*
228 * The 85xx controller supports channel abort, which will stop
229 * the current transfer. On 83xx, this bit is the transfer error
230 * mask bit, which should not be changed.
231 */
232 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
233 mode |= FSL_DMA_MR_CA;
234 DMA_OUT(chan, &chan->regs->mr, mode, 32);
235
236 mode &= ~FSL_DMA_MR_CA;
237 }
238
239 /* stop the DMA controller */
240 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
175 DMA_OUT(chan, &chan->regs->mr, mode, 32); 241 DMA_OUT(chan, &chan->regs->mr, mode, 32);
176 242
243 /* wait for the DMA controller to become idle */
177 for (i = 0; i < 100; i++) { 244 for (i = 0; i < 100; i++) {
178 if (dma_is_idle(chan)) 245 if (dma_is_idle(chan))
179 return; 246 return;
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan)
182 } 249 }
183 250
184 if (!dma_is_idle(chan)) 251 if (!dma_is_idle(chan))
185 dev_err(chan->dev, "DMA halt timeout!\n"); 252 chan_err(chan, "DMA halt timeout!\n");
186}
187
188static void set_ld_eol(struct fsldma_chan *chan,
189 struct fsl_desc_sw *desc)
190{
191 u64 snoop_bits;
192
193 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
194 ? FSL_DMA_SNEN : 0;
195
196 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
197 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
198 | snoop_bits, 64);
199} 253}
200 254
201/** 255/**
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
321 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 375 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
322} 376}
323 377
324static void append_ld_queue(struct fsldma_chan *chan, 378static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
325 struct fsl_desc_sw *desc)
326{ 379{
327 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 380 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
328 381
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
363 cookie = chan->common.cookie; 416 cookie = chan->common.cookie;
364 list_for_each_entry(child, &desc->tx_list, node) { 417 list_for_each_entry(child, &desc->tx_list, node) {
365 cookie++; 418 cookie++;
366 if (cookie < 0) 419 if (cookie < DMA_MIN_COOKIE)
367 cookie = 1; 420 cookie = DMA_MIN_COOKIE;
368 421
369 child->async_tx.cookie = cookie; 422 child->async_tx.cookie = cookie;
370 } 423 }
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
385 * 438 *
386 * Return - The descriptor allocated. NULL for failed. 439 * Return - The descriptor allocated. NULL for failed.
387 */ 440 */
388static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 441static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
389 struct fsldma_chan *chan)
390{ 442{
391 struct fsl_desc_sw *desc; 443 struct fsl_desc_sw *desc;
392 dma_addr_t pdesc; 444 dma_addr_t pdesc;
393 445
394 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 446 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
395 if (!desc) { 447 if (!desc) {
396 dev_dbg(chan->dev, "out of memory for link desc\n"); 448 chan_dbg(chan, "out of memory for link descriptor\n");
397 return NULL; 449 return NULL;
398 } 450 }
399 451
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
403 desc->async_tx.tx_submit = fsl_dma_tx_submit; 455 desc->async_tx.tx_submit = fsl_dma_tx_submit;
404 desc->async_tx.phys = pdesc; 456 desc->async_tx.phys = pdesc;
405 457
458#ifdef FSL_DMA_LD_DEBUG
459 chan_dbg(chan, "LD %p allocated\n", desc);
460#endif
461
406 return desc; 462 return desc;
407} 463}
408 464
409
410/** 465/**
411 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 466 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
412 * @chan : Freescale DMA channel 467 * @chan : Freescale DMA channel
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
427 * We need the descriptor to be aligned to 32bytes 482 * We need the descriptor to be aligned to 32bytes
428 * for meeting FSL DMA specification requirement. 483 * for meeting FSL DMA specification requirement.
429 */ 484 */
430 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 485 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
431 chan->dev,
432 sizeof(struct fsl_desc_sw), 486 sizeof(struct fsl_desc_sw),
433 __alignof__(struct fsl_desc_sw), 0); 487 __alignof__(struct fsl_desc_sw), 0);
434 if (!chan->desc_pool) { 488 if (!chan->desc_pool) {
435 dev_err(chan->dev, "unable to allocate channel %d " 489 chan_err(chan, "unable to allocate descriptor pool\n");
436 "descriptor pool\n", chan->id);
437 return -ENOMEM; 490 return -ENOMEM;
438 } 491 }
439 492
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
455 508
456 list_for_each_entry_safe(desc, _desc, list, node) { 509 list_for_each_entry_safe(desc, _desc, list, node) {
457 list_del(&desc->node); 510 list_del(&desc->node);
511#ifdef FSL_DMA_LD_DEBUG
512 chan_dbg(chan, "LD %p free\n", desc);
513#endif
458 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 514 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
459 } 515 }
460} 516}
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
466 522
467 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 523 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
468 list_del(&desc->node); 524 list_del(&desc->node);
525#ifdef FSL_DMA_LD_DEBUG
526 chan_dbg(chan, "LD %p free\n", desc);
527#endif
469 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 528 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
470 } 529 }
471} 530}
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
479 struct fsldma_chan *chan = to_fsl_chan(dchan); 538 struct fsldma_chan *chan = to_fsl_chan(dchan);
480 unsigned long flags; 539 unsigned long flags;
481 540
482 dev_dbg(chan->dev, "Free all channel resources.\n"); 541 chan_dbg(chan, "free all channel resources\n");
483 spin_lock_irqsave(&chan->desc_lock, flags); 542 spin_lock_irqsave(&chan->desc_lock, flags);
484 fsldma_free_desc_list(chan, &chan->ld_pending); 543 fsldma_free_desc_list(chan, &chan->ld_pending);
485 fsldma_free_desc_list(chan, &chan->ld_running); 544 fsldma_free_desc_list(chan, &chan->ld_running);
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
502 561
503 new = fsl_dma_alloc_descriptor(chan); 562 new = fsl_dma_alloc_descriptor(chan);
504 if (!new) { 563 if (!new) {
505 dev_err(chan->dev, msg_ld_oom); 564 chan_err(chan, "%s\n", msg_ld_oom);
506 return NULL; 565 return NULL;
507 } 566 }
508 567
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
512 /* Insert the link descriptor to the LD ring */ 571 /* Insert the link descriptor to the LD ring */
513 list_add_tail(&new->node, &new->tx_list); 572 list_add_tail(&new->node, &new->tx_list);
514 573
515 /* Set End-of-link to the last link descriptor of new list*/ 574 /* Set End-of-link to the last link descriptor of new list */
516 set_ld_eol(chan, new); 575 set_ld_eol(chan, new);
517 576
518 return &new->async_tx; 577 return &new->async_tx;
519} 578}
520 579
521static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 580static struct dma_async_tx_descriptor *
522 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 581fsl_dma_prep_memcpy(struct dma_chan *dchan,
582 dma_addr_t dma_dst, dma_addr_t dma_src,
523 size_t len, unsigned long flags) 583 size_t len, unsigned long flags)
524{ 584{
525 struct fsldma_chan *chan; 585 struct fsldma_chan *chan;
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
539 /* Allocate the link descriptor from DMA pool */ 599 /* Allocate the link descriptor from DMA pool */
540 new = fsl_dma_alloc_descriptor(chan); 600 new = fsl_dma_alloc_descriptor(chan);
541 if (!new) { 601 if (!new) {
542 dev_err(chan->dev, msg_ld_oom); 602 chan_err(chan, "%s\n", msg_ld_oom);
543 goto fail; 603 goto fail;
544 } 604 }
545#ifdef FSL_DMA_LD_DEBUG
546 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
547#endif
548 605
549 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 606 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
550 607
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
572 new->async_tx.flags = flags; /* client is in control of this ack */ 629 new->async_tx.flags = flags; /* client is in control of this ack */
573 new->async_tx.cookie = -EBUSY; 630 new->async_tx.cookie = -EBUSY;
574 631
575 /* Set End-of-link to the last link descriptor of new list*/ 632 /* Set End-of-link to the last link descriptor of new list */
576 set_ld_eol(chan, new); 633 set_ld_eol(chan, new);
577 634
578 return &first->async_tx; 635 return &first->async_tx;
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
627 /* allocate and populate the descriptor */ 684 /* allocate and populate the descriptor */
628 new = fsl_dma_alloc_descriptor(chan); 685 new = fsl_dma_alloc_descriptor(chan);
629 if (!new) { 686 if (!new) {
630 dev_err(chan->dev, msg_ld_oom); 687 chan_err(chan, "%s\n", msg_ld_oom);
631 goto fail; 688 goto fail;
632 } 689 }
633#ifdef FSL_DMA_LD_DEBUG
634 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
635#endif
636 690
637 set_desc_cnt(chan, &new->hw, len); 691 set_desc_cnt(chan, &new->hw, len);
638 set_desc_src(chan, &new->hw, src); 692 set_desc_src(chan, &new->hw, src);
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
744 798
745 switch (cmd) { 799 switch (cmd) {
746 case DMA_TERMINATE_ALL: 800 case DMA_TERMINATE_ALL:
801 spin_lock_irqsave(&chan->desc_lock, flags);
802
747 /* Halt the DMA engine */ 803 /* Halt the DMA engine */
748 dma_halt(chan); 804 dma_halt(chan);
749 805
750 spin_lock_irqsave(&chan->desc_lock, flags);
751
752 /* Remove and free all of the descriptors in the LD queue */ 806 /* Remove and free all of the descriptors in the LD queue */
753 fsldma_free_desc_list(chan, &chan->ld_pending); 807 fsldma_free_desc_list(chan, &chan->ld_pending);
754 fsldma_free_desc_list(chan, &chan->ld_running); 808 fsldma_free_desc_list(chan, &chan->ld_running);
809 chan->idle = true;
755 810
756 spin_unlock_irqrestore(&chan->desc_lock, flags); 811 spin_unlock_irqrestore(&chan->desc_lock, flags);
757 return 0; 812 return 0;
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
789} 844}
790 845
791/** 846/**
792 * fsl_dma_update_completed_cookie - Update the completed cookie. 847 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
793 * @chan : Freescale DMA channel
794 *
795 * CONTEXT: hardirq
796 */
797static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
798{
799 struct fsl_desc_sw *desc;
800 unsigned long flags;
801 dma_cookie_t cookie;
802
803 spin_lock_irqsave(&chan->desc_lock, flags);
804
805 if (list_empty(&chan->ld_running)) {
806 dev_dbg(chan->dev, "no running descriptors\n");
807 goto out_unlock;
808 }
809
810 /* Get the last descriptor, update the cookie to that */
811 desc = to_fsl_desc(chan->ld_running.prev);
812 if (dma_is_idle(chan))
813 cookie = desc->async_tx.cookie;
814 else {
815 cookie = desc->async_tx.cookie - 1;
816 if (unlikely(cookie < DMA_MIN_COOKIE))
817 cookie = DMA_MAX_COOKIE;
818 }
819
820 chan->completed_cookie = cookie;
821
822out_unlock:
823 spin_unlock_irqrestore(&chan->desc_lock, flags);
824}
825
826/**
827 * fsldma_desc_status - Check the status of a descriptor
828 * @chan: Freescale DMA channel 848 * @chan: Freescale DMA channel
829 * @desc: DMA SW descriptor 849 * @desc: descriptor to cleanup and free
830 *
831 * This function will return the status of the given descriptor
832 */
833static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
834 struct fsl_desc_sw *desc)
835{
836 return dma_async_is_complete(desc->async_tx.cookie,
837 chan->completed_cookie,
838 chan->common.cookie);
839}
840
841/**
842 * fsl_chan_ld_cleanup - Clean up link descriptors
843 * @chan : Freescale DMA channel
844 * 850 *
845 * This function clean up the ld_queue of DMA channel. 851 * This function is used on a descriptor which has been executed by the DMA
852 * controller. It will run any callbacks, submit any dependencies, and then
853 * free the descriptor.
846 */ 854 */
847static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 855static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
856 struct fsl_desc_sw *desc)
848{ 857{
849 struct fsl_desc_sw *desc, *_desc; 858 struct dma_async_tx_descriptor *txd = &desc->async_tx;
850 unsigned long flags; 859 struct device *dev = chan->common.device->dev;
851 860 dma_addr_t src = get_desc_src(chan, desc);
852 spin_lock_irqsave(&chan->desc_lock, flags); 861 dma_addr_t dst = get_desc_dst(chan, desc);
853 862 u32 len = get_desc_cnt(chan, desc);
854 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 863
855 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 864 /* Run the link descriptor callback function */
856 dma_async_tx_callback callback; 865 if (txd->callback) {
857 void *callback_param; 866#ifdef FSL_DMA_LD_DEBUG
858 867 chan_dbg(chan, "LD %p callback\n", desc);
859 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 868#endif
860 break; 869 txd->callback(txd->callback_param);
870 }
861 871
862 /* Remove from the list of running transactions */ 872 /* Run any dependencies */
863 list_del(&desc->node); 873 dma_run_dependencies(txd);
864 874
865 /* Run the link descriptor callback function */ 875 /* Unmap the dst buffer, if requested */
866 callback = desc->async_tx.callback; 876 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
867 callback_param = desc->async_tx.callback_param; 877 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
868 if (callback) { 878 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
869 spin_unlock_irqrestore(&chan->desc_lock, flags); 879 else
870 dev_dbg(chan->dev, "LD %p callback\n", desc); 880 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
871 callback(callback_param); 881 }
872 spin_lock_irqsave(&chan->desc_lock, flags);
873 }
874 882
875 /* Run any dependencies, then free the descriptor */ 883 /* Unmap the src buffer, if requested */
876 dma_run_dependencies(&desc->async_tx); 884 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
877 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 885 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
886 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
887 else
888 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
878 } 889 }
879 890
880 spin_unlock_irqrestore(&chan->desc_lock, flags); 891#ifdef FSL_DMA_LD_DEBUG
892 chan_dbg(chan, "LD %p free\n", desc);
893#endif
894 dma_pool_free(chan->desc_pool, desc, txd->phys);
881} 895}
882 896
883/** 897/**
884 * fsl_chan_xfer_ld_queue - transfer any pending transactions 898 * fsl_chan_xfer_ld_queue - transfer any pending transactions
885 * @chan : Freescale DMA channel 899 * @chan : Freescale DMA channel
886 * 900 *
887 * This will make sure that any pending transactions will be run. 901 * HARDWARE STATE: idle
888 * If the DMA controller is idle, it will be started. Otherwise, 902 * LOCKING: must hold chan->desc_lock
889 * the DMA controller's interrupt handler will start any pending
890 * transactions when it becomes idle.
891 */ 903 */
892static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 904static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
893{ 905{
894 struct fsl_desc_sw *desc; 906 struct fsl_desc_sw *desc;
895 unsigned long flags;
896
897 spin_lock_irqsave(&chan->desc_lock, flags);
898 907
899 /* 908 /*
900 * If the list of pending descriptors is empty, then we 909 * If the list of pending descriptors is empty, then we
901 * don't need to do any work at all 910 * don't need to do any work at all
902 */ 911 */
903 if (list_empty(&chan->ld_pending)) { 912 if (list_empty(&chan->ld_pending)) {
904 dev_dbg(chan->dev, "no pending LDs\n"); 913 chan_dbg(chan, "no pending LDs\n");
905 goto out_unlock; 914 return;
906 } 915 }
907 916
908 /* 917 /*
909 * The DMA controller is not idle, which means the interrupt 918 * The DMA controller is not idle, which means that the interrupt
910 * handler will start any queued transactions when it runs 919 * handler will start any queued transactions when it runs after
911 * at the end of the current transaction 920 * this transaction finishes
912 */ 921 */
913 if (!dma_is_idle(chan)) { 922 if (!chan->idle) {
914 dev_dbg(chan->dev, "DMA controller still busy\n"); 923 chan_dbg(chan, "DMA controller still busy\n");
915 goto out_unlock; 924 return;
916 } 925 }
917 926
918 /* 927 /*
919 * TODO:
920 * make sure the dma_halt() function really un-wedges the
921 * controller as much as possible
922 */
923 dma_halt(chan);
924
925 /*
926 * If there are some link descriptors which have not been 928 * If there are some link descriptors which have not been
927 * transferred, we need to start the controller 929 * transferred, we need to start the controller
928 */ 930 */
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
931 * Move all elements from the queue of pending transactions 933 * Move all elements from the queue of pending transactions
932 * onto the list of running transactions 934 * onto the list of running transactions
933 */ 935 */
936 chan_dbg(chan, "idle, starting controller\n");
934 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 937 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
935 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 938 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
936 939
937 /* 940 /*
941 * The 85xx DMA controller doesn't clear the channel start bit
942 * automatically at the end of a transfer. Therefore we must clear
943 * it in software before starting the transfer.
944 */
945 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
946 u32 mode;
947
948 mode = DMA_IN(chan, &chan->regs->mr, 32);
949 mode &= ~FSL_DMA_MR_CS;
950 DMA_OUT(chan, &chan->regs->mr, mode, 32);
951 }
952
953 /*
938 * Program the descriptor's address into the DMA controller, 954 * Program the descriptor's address into the DMA controller,
939 * then start the DMA transaction 955 * then start the DMA transaction
940 */ 956 */
941 set_cdar(chan, desc->async_tx.phys); 957 set_cdar(chan, desc->async_tx.phys);
942 dma_start(chan); 958 get_cdar(chan);
943 959
944out_unlock: 960 dma_start(chan);
945 spin_unlock_irqrestore(&chan->desc_lock, flags); 961 chan->idle = false;
946} 962}
947 963
948/** 964/**
@@ -952,7 +968,11 @@ out_unlock:
952static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 968static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
953{ 969{
954 struct fsldma_chan *chan = to_fsl_chan(dchan); 970 struct fsldma_chan *chan = to_fsl_chan(dchan);
971 unsigned long flags;
972
973 spin_lock_irqsave(&chan->desc_lock, flags);
955 fsl_chan_xfer_ld_queue(chan); 974 fsl_chan_xfer_ld_queue(chan);
975 spin_unlock_irqrestore(&chan->desc_lock, flags);
956} 976}
957 977
958/** 978/**
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
964 struct dma_tx_state *txstate) 984 struct dma_tx_state *txstate)
965{ 985{
966 struct fsldma_chan *chan = to_fsl_chan(dchan); 986 struct fsldma_chan *chan = to_fsl_chan(dchan);
967 dma_cookie_t last_used;
968 dma_cookie_t last_complete; 987 dma_cookie_t last_complete;
988 dma_cookie_t last_used;
989 unsigned long flags;
969 990
970 fsl_chan_ld_cleanup(chan); 991 spin_lock_irqsave(&chan->desc_lock, flags);
971 992
972 last_used = dchan->cookie;
973 last_complete = chan->completed_cookie; 993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
974 995
975 dma_set_tx_state(txstate, last_complete, last_used, 0); 996 spin_unlock_irqrestore(&chan->desc_lock, flags);
976 997
998 dma_set_tx_state(txstate, last_complete, last_used, 0);
977 return dma_async_is_complete(cookie, last_complete, last_used); 999 return dma_async_is_complete(cookie, last_complete, last_used);
978} 1000}
979 1001
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
984static irqreturn_t fsldma_chan_irq(int irq, void *data) 1006static irqreturn_t fsldma_chan_irq(int irq, void *data)
985{ 1007{
986 struct fsldma_chan *chan = data; 1008 struct fsldma_chan *chan = data;
987 int update_cookie = 0;
988 int xfer_ld_q = 0;
989 u32 stat; 1009 u32 stat;
990 1010
991 /* save and clear the status register */ 1011 /* save and clear the status register */
992 stat = get_sr(chan); 1012 stat = get_sr(chan);
993 set_sr(chan, stat); 1013 set_sr(chan, stat);
994 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1014 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
995 1015
1016 /* check that this was really our device */
996 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1017 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
997 if (!stat) 1018 if (!stat)
998 return IRQ_NONE; 1019 return IRQ_NONE;
999 1020
1000 if (stat & FSL_DMA_SR_TE) 1021 if (stat & FSL_DMA_SR_TE)
1001 dev_err(chan->dev, "Transfer Error!\n"); 1022 chan_err(chan, "Transfer Error!\n");
1002 1023
1003 /* 1024 /*
1004 * Programming Error 1025 * Programming Error
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1006 * triger a PE interrupt. 1027 * triger a PE interrupt.
1007 */ 1028 */
1008 if (stat & FSL_DMA_SR_PE) { 1029 if (stat & FSL_DMA_SR_PE) {
1009 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1030 chan_dbg(chan, "irq: Programming Error INT\n");
1010 if (get_bcr(chan) == 0) {
1011 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1012 * Now, update the completed cookie, and continue the
1013 * next uncompleted transfer.
1014 */
1015 update_cookie = 1;
1016 xfer_ld_q = 1;
1017 }
1018 stat &= ~FSL_DMA_SR_PE; 1031 stat &= ~FSL_DMA_SR_PE;
1019 } 1032 if (get_bcr(chan) != 0)
1020 1033 chan_err(chan, "Programming Error!\n");
1021 /*
1022 * If the link descriptor segment transfer finishes,
1023 * we will recycle the used descriptor.
1024 */
1025 if (stat & FSL_DMA_SR_EOSI) {
1026 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1027 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1028 (unsigned long long)get_cdar(chan),
1029 (unsigned long long)get_ndar(chan));
1030 stat &= ~FSL_DMA_SR_EOSI;
1031 update_cookie = 1;
1032 } 1034 }
1033 1035
1034 /* 1036 /*
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1036 * and start the next transfer if it exist. 1038 * and start the next transfer if it exist.
1037 */ 1039 */
1038 if (stat & FSL_DMA_SR_EOCDI) { 1040 if (stat & FSL_DMA_SR_EOCDI) {
1039 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1041 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1040 stat &= ~FSL_DMA_SR_EOCDI; 1042 stat &= ~FSL_DMA_SR_EOCDI;
1041 update_cookie = 1;
1042 xfer_ld_q = 1;
1043 } 1043 }
1044 1044
1045 /* 1045 /*
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1048 * prepare next transfer. 1048 * prepare next transfer.
1049 */ 1049 */
1050 if (stat & FSL_DMA_SR_EOLNI) { 1050 if (stat & FSL_DMA_SR_EOLNI) {
1051 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1051 chan_dbg(chan, "irq: End-of-link INT\n");
1052 stat &= ~FSL_DMA_SR_EOLNI; 1052 stat &= ~FSL_DMA_SR_EOLNI;
1053 xfer_ld_q = 1;
1054 } 1053 }
1055 1054
1056 if (update_cookie) 1055 /* check that the DMA controller is really idle */
1057 fsl_dma_update_completed_cookie(chan); 1056 if (!dma_is_idle(chan))
1058 if (xfer_ld_q) 1057 chan_err(chan, "irq: controller not idle!\n");
1059 fsl_chan_xfer_ld_queue(chan); 1058
1059 /* check that we handled all of the bits */
1060 if (stat) 1060 if (stat)
1061 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1061 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1062 1062
1063 dev_dbg(chan->dev, "irq: Exit\n"); 1063 /*
1064 * Schedule the tasklet to handle all cleanup of the current
1065 * transaction. It will start a new transaction if there is
1066 * one pending.
1067 */
1064 tasklet_schedule(&chan->tasklet); 1068 tasklet_schedule(&chan->tasklet);
1069 chan_dbg(chan, "irq: Exit\n");
1065 return IRQ_HANDLED; 1070 return IRQ_HANDLED;
1066} 1071}
1067 1072
1068static void dma_do_tasklet(unsigned long data) 1073static void dma_do_tasklet(unsigned long data)
1069{ 1074{
1070 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1075 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1071 fsl_chan_ld_cleanup(chan); 1076 struct fsl_desc_sw *desc, *_desc;
1077 LIST_HEAD(ld_cleanup);
1078 unsigned long flags;
1079
1080 chan_dbg(chan, "tasklet entry\n");
1081
1082 spin_lock_irqsave(&chan->desc_lock, flags);
1083
1084 /* update the cookie if we have some descriptors to cleanup */
1085 if (!list_empty(&chan->ld_running)) {
1086 dma_cookie_t cookie;
1087
1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie;
1090
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 }
1094
1095 /*
1096 * move the descriptors to a temporary list so we can drop the lock
1097 * during the entire cleanup operation
1098 */
1099 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1100
1101 /* the hardware is now idle and ready for more */
1102 chan->idle = true;
1103
1104 /*
1105 * Start any pending transactions automatically
1106 *
1107 * In the ideal case, we keep the DMA controller busy while we go
1108 * ahead and free the descriptors below.
1109 */
1110 fsl_chan_xfer_ld_queue(chan);
1111 spin_unlock_irqrestore(&chan->desc_lock, flags);
1112
1113 /* Run the callback for each descriptor, in order */
1114 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1115
1116 /* Remove from the list of transactions */
1117 list_del(&desc->node);
1118
1119 /* Run all cleanup for this descriptor */
1120 fsldma_cleanup_descriptor(chan, desc);
1121 }
1122
1123 chan_dbg(chan, "tasklet exit\n");
1072} 1124}
1073 1125
1074static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1126static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
1116 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1168 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1117 chan = fdev->chan[i]; 1169 chan = fdev->chan[i];
1118 if (chan && chan->irq != NO_IRQ) { 1170 if (chan && chan->irq != NO_IRQ) {
1119 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1171 chan_dbg(chan, "free per-channel IRQ\n");
1120 free_irq(chan->irq, chan); 1172 free_irq(chan->irq, chan);
1121 } 1173 }
1122 } 1174 }
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
1143 continue; 1195 continue;
1144 1196
1145 if (chan->irq == NO_IRQ) { 1197 if (chan->irq == NO_IRQ) {
1146 dev_err(fdev->dev, "no interrupts property defined for " 1198 chan_err(chan, "interrupts property missing in device tree\n");
1147 "DMA channel %d. Please fix your "
1148 "device tree\n", chan->id);
1149 ret = -ENODEV; 1199 ret = -ENODEV;
1150 goto out_unwind; 1200 goto out_unwind;
1151 } 1201 }
1152 1202
1153 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1203 chan_dbg(chan, "request per-channel IRQ\n");
1154 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1204 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1155 "fsldma-chan", chan); 1205 "fsldma-chan", chan);
1156 if (ret) { 1206 if (ret) {
1157 dev_err(fdev->dev, "unable to request IRQ for DMA " 1207 chan_err(chan, "unable to request per-channel IRQ\n");
1158 "channel %d\n", chan->id);
1159 goto out_unwind; 1208 goto out_unwind;
1160 } 1209 }
1161 } 1210 }
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1230 1279
1231 fdev->chan[chan->id] = chan; 1280 fdev->chan[chan->id] = chan;
1232 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1281 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1282 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1233 1283
1234 /* Initialize the channel */ 1284 /* Initialize the channel */
1235 dma_init(chan); 1285 dma_init(chan);
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1250 spin_lock_init(&chan->desc_lock); 1300 spin_lock_init(&chan->desc_lock);
1251 INIT_LIST_HEAD(&chan->ld_pending); 1301 INIT_LIST_HEAD(&chan->ld_pending);
1252 INIT_LIST_HEAD(&chan->ld_running); 1302 INIT_LIST_HEAD(&chan->ld_running);
1303 chan->idle = true;
1253 1304
1254 chan->common.device = &fdev->common; 1305 chan->common.device = &fdev->common;
1255 1306
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403c0fbe..9cb5aa57c677 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -102,8 +102,8 @@ struct fsl_desc_sw {
102} __attribute__((aligned(32))); 102} __attribute__((aligned(32)));
103 103
104struct fsldma_chan_regs { 104struct fsldma_chan_regs {
105 u32 mr; /* 0x00 - Mode Register */ 105 u32 mr; /* 0x00 - Mode Register */
106 u32 sr; /* 0x04 - Status Register */ 106 u32 sr; /* 0x04 - Status Register */
107 u64 cdar; /* 0x08 - Current descriptor address register */ 107 u64 cdar; /* 0x08 - Current descriptor address register */
108 u64 sar; /* 0x10 - Source Address Register */ 108 u64 sar; /* 0x10 - Source Address Register */
109 u64 dar; /* 0x18 - Destination Address Register */ 109 u64 dar; /* 0x18 - Destination Address Register */
@@ -135,6 +135,7 @@ struct fsldma_device {
135#define FSL_DMA_CHAN_START_EXT 0x00002000 135#define FSL_DMA_CHAN_START_EXT 0x00002000
136 136
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */
138 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
139 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
140 spinlock_t desc_lock; /* Descriptor operation lock */ 141 spinlock_t desc_lock; /* Descriptor operation lock */
@@ -147,6 +148,7 @@ struct fsldma_chan {
147 int id; /* Raw id of this channel */ 148 int id; /* Raw id of this channel */
148 struct tasklet_struct tasklet; 149 struct tasklet_struct tasklet;
149 u32 feature; 150 u32 feature;
151 bool idle; /* DMA controller is idle */
150 152
151 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); 153 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
152 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); 154 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
new file mode 100644
index 000000000000..88aad4f54002
--- /dev/null
+++ b/drivers/dma/mxs-dma.c
@@ -0,0 +1,724 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * Refer to drivers/dma/imx-sdma.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/interrupt.h>
15#include <linux/clk.h>
16#include <linux/wait.h>
17#include <linux/sched.h>
18#include <linux/semaphore.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25
26#include <asm/irq.h>
27#include <mach/mxs.h>
28#include <mach/dma.h>
29#include <mach/common.h>
30
31/*
32 * NOTE: The term "PIO" throughout the mxs-dma implementation means
33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
34 * dma can program the controller registers of peripheral devices.
35 */
36
37#define MXS_DMA_APBH 0
38#define MXS_DMA_APBX 1
39#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
40
41#define APBH_VERSION_LATEST 3
42#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
43
44#define HW_APBHX_CTRL0 0x000
45#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
46#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
47#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
48#define BP_APBH_CTRL0_RESET_CHANNEL 16
49#define HW_APBHX_CTRL1 0x010
50#define HW_APBHX_CTRL2 0x020
51#define HW_APBHX_CHANNEL_CTRL 0x030
52#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
53#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
54#define HW_APBX_VERSION 0x800
55#define BP_APBHX_VERSION_MAJOR 24
56#define HW_APBHX_CHn_NXTCMDAR(n) \
57 (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
58#define HW_APBHX_CHn_SEMA(n) \
59 (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
60
61/*
62 * ccw bits definitions
63 *
64 * COMMAND: 0..1 (2)
65 * CHAIN: 2 (1)
66 * IRQ: 3 (1)
67 * NAND_LOCK: 4 (1) - not implemented
68 * NAND_WAIT4READY: 5 (1) - not implemented
69 * DEC_SEM: 6 (1)
70 * WAIT4END: 7 (1)
71 * HALT_ON_TERMINATE: 8 (1)
72 * TERMINATE_FLUSH: 9 (1)
73 * RESERVED: 10..11 (2)
74 * PIO_NUM: 12..15 (4)
75 */
76#define BP_CCW_COMMAND 0
77#define BM_CCW_COMMAND (3 << 0)
78#define CCW_CHAIN (1 << 2)
79#define CCW_IRQ (1 << 3)
80#define CCW_DEC_SEM (1 << 6)
81#define CCW_WAIT4END (1 << 7)
82#define CCW_HALT_ON_TERM (1 << 8)
83#define CCW_TERM_FLUSH (1 << 9)
84#define BP_CCW_PIO_NUM 12
85#define BM_CCW_PIO_NUM (0xf << 12)
86
87#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
88
89#define MXS_DMA_CMD_NO_XFER 0
90#define MXS_DMA_CMD_WRITE 1
91#define MXS_DMA_CMD_READ 2
92#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
93
94struct mxs_dma_ccw {
95 u32 next;
96 u16 bits;
97 u16 xfer_bytes;
98#define MAX_XFER_BYTES 0xff00
99 u32 bufaddr;
100#define MXS_PIO_WORDS 16
101 u32 pio_words[MXS_PIO_WORDS];
102};
103
104#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
105
106struct mxs_dma_chan {
107 struct mxs_dma_engine *mxs_dma;
108 struct dma_chan chan;
109 struct dma_async_tx_descriptor desc;
110 struct tasklet_struct tasklet;
111 int chan_irq;
112 struct mxs_dma_ccw *ccw;
113 dma_addr_t ccw_phys;
114 dma_cookie_t last_completed;
115 enum dma_status status;
116 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0)
118};
119
120#define MXS_DMA_CHANNELS 16
121#define MXS_DMA_CHANNELS_MASK 0xffff
122
123struct mxs_dma_engine {
124 int dev_id;
125 unsigned int version;
126 void __iomem *base;
127 struct clk *clk;
128 struct dma_device dma_device;
129 struct device_dma_parameters dma_parms;
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131};
132
133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137
138 if (dma_is_apbh() && apbh_is_old())
139 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
140 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
141 else
142 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
143 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
144}
145
146static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
147{
148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
149 int chan_id = mxs_chan->chan.chan_id;
150
151 /* set cmd_addr up */
152 writel(mxs_chan->ccw_phys,
153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
154
155 /* enable apbh channel clock */
156 if (dma_is_apbh()) {
157 if (apbh_is_old())
158 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
159 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
160 else
161 writel(1 << chan_id,
162 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
163 }
164
165 /* write 1 to SEMA to kick off the channel */
166 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
167}
168
169static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
170{
171 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
172 int chan_id = mxs_chan->chan.chan_id;
173
174 /* disable apbh channel clock */
175 if (dma_is_apbh()) {
176 if (apbh_is_old())
177 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
178 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
179 else
180 writel(1 << chan_id,
181 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
182 }
183
184 mxs_chan->status = DMA_SUCCESS;
185}
186
187static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
188{
189 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
190 int chan_id = mxs_chan->chan.chan_id;
191
192 /* freeze the channel */
193 if (dma_is_apbh() && apbh_is_old())
194 writel(1 << chan_id,
195 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
196 else
197 writel(1 << chan_id,
198 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
199
200 mxs_chan->status = DMA_PAUSED;
201}
202
203static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
204{
205 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
206 int chan_id = mxs_chan->chan.chan_id;
207
208 /* unfreeze the channel */
209 if (dma_is_apbh() && apbh_is_old())
210 writel(1 << chan_id,
211 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
212 else
213 writel(1 << chan_id,
214 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
215
216 mxs_chan->status = DMA_IN_PROGRESS;
217}
218
219static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
220{
221 dma_cookie_t cookie = mxs_chan->chan.cookie;
222
223 if (++cookie < 0)
224 cookie = 1;
225
226 mxs_chan->chan.cookie = cookie;
227 mxs_chan->desc.cookie = cookie;
228
229 return cookie;
230}
231
232static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
233{
234 return container_of(chan, struct mxs_dma_chan, chan);
235}
236
237static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
238{
239 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
240
241 mxs_dma_enable_chan(mxs_chan);
242
243 return mxs_dma_assign_cookie(mxs_chan);
244}
245
246static void mxs_dma_tasklet(unsigned long data)
247{
248 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
249
250 if (mxs_chan->desc.callback)
251 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
252}
253
254static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
255{
256 struct mxs_dma_engine *mxs_dma = dev_id;
257 u32 stat1, stat2;
258
259 /* completion status */
260 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
261 stat1 &= MXS_DMA_CHANNELS_MASK;
262 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
263
264 /* error status */
265 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
266 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
267
268 /*
269 * When both completion and error of termination bits set at the
270 * same time, we do not take it as an error. IOW, it only becomes
271 * an error we need to handler here in case of ether it's (1) an bus
272 * error or (2) a termination error with no completion.
273 */
274 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
275 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
276
277 /* combine error and completion status for checking */
278 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
279 while (stat1) {
280 int channel = fls(stat1) - 1;
281 struct mxs_dma_chan *mxs_chan =
282 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
283
284 if (channel >= MXS_DMA_CHANNELS) {
285 dev_dbg(mxs_dma->dma_device.dev,
286 "%s: error in channel %d\n", __func__,
287 channel - MXS_DMA_CHANNELS);
288 mxs_chan->status = DMA_ERROR;
289 mxs_dma_reset_chan(mxs_chan);
290 } else {
291 if (mxs_chan->flags & MXS_DMA_SG_LOOP)
292 mxs_chan->status = DMA_IN_PROGRESS;
293 else
294 mxs_chan->status = DMA_SUCCESS;
295 }
296
297 stat1 &= ~(1 << channel);
298
299 if (mxs_chan->status == DMA_SUCCESS)
300 mxs_chan->last_completed = mxs_chan->desc.cookie;
301
302 /* schedule tasklet on this channel */
303 tasklet_schedule(&mxs_chan->tasklet);
304 }
305
306 return IRQ_HANDLED;
307}
308
309static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
310{
311 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
312 struct mxs_dma_data *data = chan->private;
313 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
314 int ret;
315
316 if (!data)
317 return -EINVAL;
318
319 mxs_chan->chan_irq = data->chan_irq;
320
321 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
322 &mxs_chan->ccw_phys, GFP_KERNEL);
323 if (!mxs_chan->ccw) {
324 ret = -ENOMEM;
325 goto err_alloc;
326 }
327
328 memset(mxs_chan->ccw, 0, PAGE_SIZE);
329
330 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
331 0, "mxs-dma", mxs_dma);
332 if (ret)
333 goto err_irq;
334
335 ret = clk_enable(mxs_dma->clk);
336 if (ret)
337 goto err_clk;
338
339 mxs_dma_reset_chan(mxs_chan);
340
341 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
342 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
343
344 /* the descriptor is ready */
345 async_tx_ack(&mxs_chan->desc);
346
347 return 0;
348
349err_clk:
350 free_irq(mxs_chan->chan_irq, mxs_dma);
351err_irq:
352 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
353 mxs_chan->ccw, mxs_chan->ccw_phys);
354err_alloc:
355 return ret;
356}
357
358static void mxs_dma_free_chan_resources(struct dma_chan *chan)
359{
360 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
361 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
362
363 mxs_dma_disable_chan(mxs_chan);
364
365 free_irq(mxs_chan->chan_irq, mxs_dma);
366
367 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
368 mxs_chan->ccw, mxs_chan->ccw_phys);
369
370 clk_disable(mxs_dma->clk);
371}
372
373static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
374 struct dma_chan *chan, struct scatterlist *sgl,
375 unsigned int sg_len, enum dma_data_direction direction,
376 unsigned long append)
377{
378 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
379 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
380 struct mxs_dma_ccw *ccw;
381 struct scatterlist *sg;
382 int i, j;
383 u32 *pio;
384 static int idx;
385
386 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
387 return NULL;
388
389 if (sg_len + (append ? idx : 0) > NUM_CCW) {
390 dev_err(mxs_dma->dma_device.dev,
391 "maximum number of sg exceeded: %d > %d\n",
392 sg_len, NUM_CCW);
393 goto err_out;
394 }
395
396 mxs_chan->status = DMA_IN_PROGRESS;
397 mxs_chan->flags = 0;
398
399 /*
400 * If the sg is prepared with append flag set, the sg
401 * will be appended to the last prepared sg.
402 */
403 if (append) {
404 BUG_ON(idx < 1);
405 ccw = &mxs_chan->ccw[idx - 1];
406 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
407 ccw->bits |= CCW_CHAIN;
408 ccw->bits &= ~CCW_IRQ;
409 ccw->bits &= ~CCW_DEC_SEM;
410 ccw->bits &= ~CCW_WAIT4END;
411 } else {
412 idx = 0;
413 }
414
415 if (direction == DMA_NONE) {
416 ccw = &mxs_chan->ccw[idx++];
417 pio = (u32 *) sgl;
418
419 for (j = 0; j < sg_len;)
420 ccw->pio_words[j++] = *pio++;
421
422 ccw->bits = 0;
423 ccw->bits |= CCW_IRQ;
424 ccw->bits |= CCW_DEC_SEM;
425 ccw->bits |= CCW_WAIT4END;
426 ccw->bits |= CCW_HALT_ON_TERM;
427 ccw->bits |= CCW_TERM_FLUSH;
428 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
429 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
430 } else {
431 for_each_sg(sgl, sg, sg_len, i) {
432 if (sg->length > MAX_XFER_BYTES) {
433 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
434 sg->length, MAX_XFER_BYTES);
435 goto err_out;
436 }
437
438 ccw = &mxs_chan->ccw[idx++];
439
440 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
441 ccw->bufaddr = sg->dma_address;
442 ccw->xfer_bytes = sg->length;
443
444 ccw->bits = 0;
445 ccw->bits |= CCW_CHAIN;
446 ccw->bits |= CCW_HALT_ON_TERM;
447 ccw->bits |= CCW_TERM_FLUSH;
448 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
449 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
450 COMMAND);
451
452 if (i + 1 == sg_len) {
453 ccw->bits &= ~CCW_CHAIN;
454 ccw->bits |= CCW_IRQ;
455 ccw->bits |= CCW_DEC_SEM;
456 ccw->bits |= CCW_WAIT4END;
457 }
458 }
459 }
460
461 return &mxs_chan->desc;
462
463err_out:
464 mxs_chan->status = DMA_ERROR;
465 return NULL;
466}
467
468static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
469 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
470 size_t period_len, enum dma_data_direction direction)
471{
472 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
473 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
474 int num_periods = buf_len / period_len;
475 int i = 0, buf = 0;
476
477 if (mxs_chan->status == DMA_IN_PROGRESS)
478 return NULL;
479
480 mxs_chan->status = DMA_IN_PROGRESS;
481 mxs_chan->flags |= MXS_DMA_SG_LOOP;
482
483 if (num_periods > NUM_CCW) {
484 dev_err(mxs_dma->dma_device.dev,
485 "maximum number of sg exceeded: %d > %d\n",
486 num_periods, NUM_CCW);
487 goto err_out;
488 }
489
490 if (period_len > MAX_XFER_BYTES) {
491 dev_err(mxs_dma->dma_device.dev,
492 "maximum period size exceeded: %d > %d\n",
493 period_len, MAX_XFER_BYTES);
494 goto err_out;
495 }
496
497 while (buf < buf_len) {
498 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
499
500 if (i + 1 == num_periods)
501 ccw->next = mxs_chan->ccw_phys;
502 else
503 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
504
505 ccw->bufaddr = dma_addr;
506 ccw->xfer_bytes = period_len;
507
508 ccw->bits = 0;
509 ccw->bits |= CCW_CHAIN;
510 ccw->bits |= CCW_IRQ;
511 ccw->bits |= CCW_HALT_ON_TERM;
512 ccw->bits |= CCW_TERM_FLUSH;
513 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
514 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
515
516 dma_addr += period_len;
517 buf += period_len;
518
519 i++;
520 }
521
522 return &mxs_chan->desc;
523
524err_out:
525 mxs_chan->status = DMA_ERROR;
526 return NULL;
527}
528
529static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
530 unsigned long arg)
531{
532 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
533 int ret = 0;
534
535 switch (cmd) {
536 case DMA_TERMINATE_ALL:
537 mxs_dma_disable_chan(mxs_chan);
538 break;
539 case DMA_PAUSE:
540 mxs_dma_pause_chan(mxs_chan);
541 break;
542 case DMA_RESUME:
543 mxs_dma_resume_chan(mxs_chan);
544 break;
545 default:
546 ret = -ENOSYS;
547 }
548
549 return ret;
550}
551
552static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
553 dma_cookie_t cookie, struct dma_tx_state *txstate)
554{
555 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
556 dma_cookie_t last_used;
557
558 last_used = chan->cookie;
559 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
560
561 return mxs_chan->status;
562}
563
564static void mxs_dma_issue_pending(struct dma_chan *chan)
565{
566 /*
567 * Nothing to do. We only have a single descriptor.
568 */
569}
570
571static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
572{
573 int ret;
574
575 ret = clk_enable(mxs_dma->clk);
576 if (ret)
577 goto err_out;
578
579 ret = mxs_reset_block(mxs_dma->base);
580 if (ret)
581 goto err_out;
582
583 /* only major version matters */
584 mxs_dma->version = readl(mxs_dma->base +
585 ((mxs_dma->dev_id == MXS_DMA_APBX) ?
586 HW_APBX_VERSION : HW_APBH_VERSION)) >>
587 BP_APBHX_VERSION_MAJOR;
588
589 /* enable apbh burst */
590 if (dma_is_apbh()) {
591 writel(BM_APBH_CTRL0_APB_BURST_EN,
592 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
593 writel(BM_APBH_CTRL0_APB_BURST8_EN,
594 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
595 }
596
597 /* enable irq for all the channels */
598 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
599 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
600
601 clk_disable(mxs_dma->clk);
602
603 return 0;
604
605err_out:
606 return ret;
607}
608
609static int __init mxs_dma_probe(struct platform_device *pdev)
610{
611 const struct platform_device_id *id_entry =
612 platform_get_device_id(pdev);
613 struct mxs_dma_engine *mxs_dma;
614 struct resource *iores;
615 int ret, i;
616
617 mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
618 if (!mxs_dma)
619 return -ENOMEM;
620
621 mxs_dma->dev_id = id_entry->driver_data;
622
623 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624
625 if (!request_mem_region(iores->start, resource_size(iores),
626 pdev->name)) {
627 ret = -EBUSY;
628 goto err_request_region;
629 }
630
631 mxs_dma->base = ioremap(iores->start, resource_size(iores));
632 if (!mxs_dma->base) {
633 ret = -ENOMEM;
634 goto err_ioremap;
635 }
636
637 mxs_dma->clk = clk_get(&pdev->dev, NULL);
638 if (IS_ERR(mxs_dma->clk)) {
639 ret = PTR_ERR(mxs_dma->clk);
640 goto err_clk;
641 }
642
643 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
644 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
645
646 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
647
648 /* Initialize channel parameters */
649 for (i = 0; i < MXS_DMA_CHANNELS; i++) {
650 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
651
652 mxs_chan->mxs_dma = mxs_dma;
653 mxs_chan->chan.device = &mxs_dma->dma_device;
654
655 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
656 (unsigned long) mxs_chan);
657
658
659 /* Add the channel to mxs_chan list */
660 list_add_tail(&mxs_chan->chan.device_node,
661 &mxs_dma->dma_device.channels);
662 }
663
664 ret = mxs_dma_init(mxs_dma);
665 if (ret)
666 goto err_init;
667
668 mxs_dma->dma_device.dev = &pdev->dev;
669
670 /* mxs_dma gets 65535 bytes maximum sg size */
671 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
672 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
673
674 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
675 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
676 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
677 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
678 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
679 mxs_dma->dma_device.device_control = mxs_dma_control;
680 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
681
682 ret = dma_async_device_register(&mxs_dma->dma_device);
683 if (ret) {
684 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
685 goto err_init;
686 }
687
688 dev_info(mxs_dma->dma_device.dev, "initialized\n");
689
690 return 0;
691
692err_init:
693 clk_put(mxs_dma->clk);
694err_clk:
695 iounmap(mxs_dma->base);
696err_ioremap:
697 release_mem_region(iores->start, resource_size(iores));
698err_request_region:
699 kfree(mxs_dma);
700 return ret;
701}
702
703static struct platform_device_id mxs_dma_type[] = {
704 {
705 .name = "mxs-dma-apbh",
706 .driver_data = MXS_DMA_APBH,
707 }, {
708 .name = "mxs-dma-apbx",
709 .driver_data = MXS_DMA_APBX,
710 }
711};
712
713static struct platform_driver mxs_dma_driver = {
714 .driver = {
715 .name = "mxs-dma",
716 },
717 .id_table = mxs_dma_type,
718};
719
720static int __init mxs_dma_module_init(void)
721{
722 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
723}
724subsys_initcall(mxs_dma_module_init);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1c38418ae61f..8d8fef1480a9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -82,7 +82,7 @@ struct pch_dma_regs {
82 u32 dma_sts1; 82 u32 dma_sts1;
83 u32 reserved2; 83 u32 reserved2;
84 u32 reserved3; 84 u32 reserved3;
85 struct pch_dma_desc_regs desc[0]; 85 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86}; 86};
87 87
88struct pch_dma_desc { 88struct pch_dma_desc {
@@ -124,7 +124,7 @@ struct pch_dma {
124 struct pci_pool *pool; 124 struct pci_pool *pool;
125 struct pch_dma_regs regs; 125 struct pch_dma_regs regs;
126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; 126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
127 struct pch_dma_chan channels[0]; 127 struct pch_dma_chan channels[MAX_CHAN_NR];
128}; 128};
129 129
130#define PCH_DMA_CTL0 0x00 130#define PCH_DMA_CTL0 0x00
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); 366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
367 dma_cookie_t cookie; 367 dma_cookie_t cookie;
368 368
369 spin_lock_bh(&pd_chan->lock); 369 spin_lock(&pd_chan->lock);
370 cookie = pdc_assign_cookie(pd_chan, desc); 370 cookie = pdc_assign_cookie(pd_chan, desc);
371 371
372 if (list_empty(&pd_chan->active_list)) { 372 if (list_empty(&pd_chan->active_list)) {
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
376 list_add_tail(&desc->desc_node, &pd_chan->queue); 376 list_add_tail(&desc->desc_node, &pd_chan->queue);
377 } 377 }
378 378
379 spin_unlock_bh(&pd_chan->lock); 379 spin_unlock(&pd_chan->lock);
380 return 0; 380 return 0;
381} 381}
382 382
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
386 struct pch_dma *pd = to_pd(chan->device); 386 struct pch_dma *pd = to_pd(chan->device);
387 dma_addr_t addr; 387 dma_addr_t addr;
388 388
389 desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); 389 desc = pci_pool_alloc(pd->pool, flags, &addr);
390 if (desc) { 390 if (desc) {
391 memset(desc, 0, sizeof(struct pch_dma_desc)); 391 memset(desc, 0, sizeof(struct pch_dma_desc));
392 INIT_LIST_HEAD(&desc->tx_list); 392 INIT_LIST_HEAD(&desc->tx_list);
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
405 struct pch_dma_desc *ret = NULL; 405 struct pch_dma_desc *ret = NULL;
406 int i; 406 int i;
407 407
408 spin_lock_bh(&pd_chan->lock); 408 spin_lock(&pd_chan->lock);
409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
410 i++; 410 i++;
411 if (async_tx_test_ack(&desc->txd)) { 411 if (async_tx_test_ack(&desc->txd)) {
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
415 } 415 }
416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); 416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
417 } 417 }
418 spin_unlock_bh(&pd_chan->lock); 418 spin_unlock(&pd_chan->lock);
419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
420 420
421 if (!ret) { 421 if (!ret) {
422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
423 if (ret) { 423 if (ret) {
424 spin_lock_bh(&pd_chan->lock); 424 spin_lock(&pd_chan->lock);
425 pd_chan->descs_allocated++; 425 pd_chan->descs_allocated++;
426 spin_unlock_bh(&pd_chan->lock); 426 spin_unlock(&pd_chan->lock);
427 } else { 427 } else {
428 dev_err(chan2dev(&pd_chan->chan), 428 dev_err(chan2dev(&pd_chan->chan),
429 "failed to alloc desc\n"); 429 "failed to alloc desc\n");
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
437 struct pch_dma_desc *desc) 437 struct pch_dma_desc *desc)
438{ 438{
439 if (desc) { 439 if (desc) {
440 spin_lock_bh(&pd_chan->lock); 440 spin_lock(&pd_chan->lock);
441 list_splice_init(&desc->tx_list, &pd_chan->free_list); 441 list_splice_init(&desc->tx_list, &pd_chan->free_list);
442 list_add(&desc->desc_node, &pd_chan->free_list); 442 list_add(&desc->desc_node, &pd_chan->free_list);
443 spin_unlock_bh(&pd_chan->lock); 443 spin_unlock(&pd_chan->lock);
444 } 444 }
445} 445}
446 446
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
530 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 530 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
531 531
532 if (pdc_is_idle(pd_chan)) { 532 if (pdc_is_idle(pd_chan)) {
533 spin_lock_bh(&pd_chan->lock); 533 spin_lock(&pd_chan->lock);
534 pdc_advance_work(pd_chan); 534 pdc_advance_work(pd_chan);
535 spin_unlock_bh(&pd_chan->lock); 535 spin_unlock(&pd_chan->lock);
536 } 536 }
537} 537}
538 538
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
592 goto err_desc_get; 592 goto err_desc_get;
593 } 593 }
594 594
595
596 if (!first) { 595 if (!first) {
597 first = desc; 596 first = desc;
598 } else { 597 } else {
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
641 640
642 spin_unlock_bh(&pd_chan->lock); 641 spin_unlock_bh(&pd_chan->lock);
643 642
644
645 return 0; 643 return 0;
646} 644}
647 645
648static void pdc_tasklet(unsigned long data) 646static void pdc_tasklet(unsigned long data)
649{ 647{
650 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; 648 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
649 unsigned long flags;
651 650
652 if (!pdc_is_idle(pd_chan)) { 651 if (!pdc_is_idle(pd_chan)) {
653 dev_err(chan2dev(&pd_chan->chan), 652 dev_err(chan2dev(&pd_chan->chan),
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
655 return; 654 return;
656 } 655 }
657 656
658 spin_lock_bh(&pd_chan->lock); 657 spin_lock_irqsave(&pd_chan->lock, flags);
659 if (test_and_clear_bit(0, &pd_chan->err_status)) 658 if (test_and_clear_bit(0, &pd_chan->err_status))
660 pdc_handle_error(pd_chan); 659 pdc_handle_error(pd_chan);
661 else 660 else
662 pdc_advance_work(pd_chan); 661 pdc_advance_work(pd_chan);
663 spin_unlock_bh(&pd_chan->lock); 662 spin_unlock_irqrestore(&pd_chan->lock, flags);
664} 663}
665 664
666static irqreturn_t pd_irq(int irq, void *devid) 665static irqreturn_t pd_irq(int irq, void *devid)
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
694 return ret; 693 return ret;
695} 694}
696 695
696#ifdef CONFIG_PM
697static void pch_dma_save_regs(struct pch_dma *pd) 697static void pch_dma_save_regs(struct pch_dma *pd)
698{ 698{
699 struct pch_dma_chan *pd_chan; 699 struct pch_dma_chan *pd_chan;
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
771 771
772 return 0; 772 return 0;
773} 773}
774#endif
774 775
775static int __devinit pch_dma_probe(struct pci_dev *pdev, 776static int __devinit pch_dma_probe(struct pci_dev *pdev,
776 const struct pci_device_id *id) 777 const struct pci_device_id *id)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6e1d46a65d0e..af955de035f4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large 68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used. 70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer. 74 * one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
75struct d40_lli_pool { 76struct d40_lli_pool {
76 void *base; 77 void *base;
77 int size; 78 int size;
79 dma_addr_t dma_addr;
78 /* Space for dst and src, plus an extra for padding */ 80 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 81 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
80}; 82};
@@ -94,7 +96,6 @@ struct d40_lli_pool {
94 * during a transfer. 96 * during a transfer.
95 * @node: List entry. 97 * @node: List entry.
96 * @is_in_client_list: true if the client owns this descriptor. 98 * @is_in_client_list: true if the client owns this descriptor.
97 * @is_hw_linked: true if this job will automatically be continued for
98 * the previous one. 99 * the previous one.
99 * 100 *
100 * This descriptor is used for both logical and physical transfers. 101 * This descriptor is used for both logical and physical transfers.
@@ -114,7 +115,7 @@ struct d40_desc {
114 struct list_head node; 115 struct list_head node;
115 116
116 bool is_in_client_list; 117 bool is_in_client_list;
117 bool is_hw_linked; 118 bool cyclic;
118}; 119};
119 120
120/** 121/**
@@ -130,6 +131,7 @@ struct d40_desc {
130 */ 131 */
131struct d40_lcla_pool { 132struct d40_lcla_pool {
132 void *base; 133 void *base;
134 dma_addr_t dma_addr;
133 void *base_unaligned; 135 void *base_unaligned;
134 int pages; 136 int pages;
135 spinlock_t lock; 137 spinlock_t lock;
@@ -303,9 +305,37 @@ struct d40_reg_val {
303 unsigned int val; 305 unsigned int val;
304}; 306};
305 307
306static int d40_pool_lli_alloc(struct d40_desc *d40d, 308static struct device *chan2dev(struct d40_chan *d40c)
307 int lli_len, bool is_log)
308{ 309{
310 return &d40c->chan.dev->device;
311}
312
313static bool chan_is_physical(struct d40_chan *chan)
314{
315 return chan->log_num == D40_PHY_CHAN;
316}
317
318static bool chan_is_logical(struct d40_chan *chan)
319{
320 return !chan_is_physical(chan);
321}
322
323static void __iomem *chan_base(struct d40_chan *chan)
324{
325 return chan->base->virtbase + D40_DREG_PCBASE +
326 chan->phy_chan->num * D40_DREG_PCDELTA;
327}
328
329#define d40_err(dev, format, arg...) \
330 dev_err(dev, "[%s] " format, __func__, ## arg)
331
332#define chan_err(d40c, format, arg...) \
333 d40_err(chan2dev(d40c), format, ## arg)
334
335static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
336 int lli_len)
337{
338 bool is_log = chan_is_logical(d40c);
309 u32 align; 339 u32 align;
310 void *base; 340 void *base;
311 341
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
319 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 349 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
320 d40d->lli_pool.base = NULL; 350 d40d->lli_pool.base = NULL;
321 } else { 351 } else {
322 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 352 d40d->lli_pool.size = lli_len * 2 * align;
323 353
324 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 354 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
325 d40d->lli_pool.base = base; 355 d40d->lli_pool.base = base;
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
329 } 359 }
330 360
331 if (is_log) { 361 if (is_log) {
332 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 362 d40d->lli_log.src = PTR_ALIGN(base, align);
333 align); 363 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
334 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 364
335 align); 365 d40d->lli_pool.dma_addr = 0;
336 } else { 366 } else {
337 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 367 d40d->lli_phy.src = PTR_ALIGN(base, align);
338 align); 368 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
339 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 369
340 align); 370 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
371 d40d->lli_phy.src,
372 d40d->lli_pool.size,
373 DMA_TO_DEVICE);
374
375 if (dma_mapping_error(d40c->base->dev,
376 d40d->lli_pool.dma_addr)) {
377 kfree(d40d->lli_pool.base);
378 d40d->lli_pool.base = NULL;
379 d40d->lli_pool.dma_addr = 0;
380 return -ENOMEM;
381 }
341 } 382 }
342 383
343 return 0; 384 return 0;
344} 385}
345 386
346static void d40_pool_lli_free(struct d40_desc *d40d) 387static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
347{ 388{
389 if (d40d->lli_pool.dma_addr)
390 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
391 d40d->lli_pool.size, DMA_TO_DEVICE);
392
348 kfree(d40d->lli_pool.base); 393 kfree(d40d->lli_pool.base);
349 d40d->lli_pool.base = NULL; 394 d40d->lli_pool.base = NULL;
350 d40d->lli_pool.size = 0; 395 d40d->lli_pool.size = 0;
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
391 int i; 436 int i;
392 int ret = -EINVAL; 437 int ret = -EINVAL;
393 438
394 if (d40c->log_num == D40_PHY_CHAN) 439 if (chan_is_physical(d40c))
395 return 0; 440 return 0;
396 441
397 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 442 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
430 475
431 list_for_each_entry_safe(d, _d, &d40c->client, node) 476 list_for_each_entry_safe(d, _d, &d40c->client, node)
432 if (async_tx_test_ack(&d->txd)) { 477 if (async_tx_test_ack(&d->txd)) {
433 d40_pool_lli_free(d); 478 d40_pool_lli_free(d40c, d);
434 d40_desc_remove(d); 479 d40_desc_remove(d);
435 desc = d; 480 desc = d;
436 memset(desc, 0, sizeof(*desc)); 481 memset(desc, 0, sizeof(*desc));
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
450static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 495static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
451{ 496{
452 497
498 d40_pool_lli_free(d40c, d40d);
453 d40_lcla_free_all(d40c, d40d); 499 d40_lcla_free_all(d40c, d40d);
454 kmem_cache_free(d40c->base->desc_slab, d40d); 500 kmem_cache_free(d40c->base->desc_slab, d40d);
455} 501}
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
459 list_add_tail(&desc->node, &d40c->active); 505 list_add_tail(&desc->node, &d40c->active);
460} 506}
461 507
462static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 508static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
463{ 509{
464 int curr_lcla = -EINVAL, next_lcla; 510 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
511 struct d40_phy_lli *lli_src = desc->lli_phy.src;
512 void __iomem *base = chan_base(chan);
513
514 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
515 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
516 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
517 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
518
519 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
520 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
521 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
522 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
523}
465 524
466 if (d40c->log_num == D40_PHY_CHAN) { 525static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
467 d40_phy_lli_write(d40c->base->virtbase, 526{
468 d40c->phy_chan->num, 527 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
469 d40d->lli_phy.dst, 528 struct d40_log_lli_bidir *lli = &desc->lli_log;
470 d40d->lli_phy.src); 529 int lli_current = desc->lli_current;
471 d40d->lli_current = d40d->lli_len; 530 int lli_len = desc->lli_len;
472 } else { 531 bool cyclic = desc->cyclic;
532 int curr_lcla = -EINVAL;
533 int first_lcla = 0;
534 bool linkback;
473 535
474 if ((d40d->lli_len - d40d->lli_current) > 1) 536 /*
475 curr_lcla = d40_lcla_alloc_one(d40c, d40d); 537 * We may have partially running cyclic transfers, in case we did't get
538 * enough LCLA entries.
539 */
540 linkback = cyclic && lli_current == 0;
476 541
477 d40_log_lli_lcpa_write(d40c->lcpa, 542 /*
478 &d40d->lli_log.dst[d40d->lli_current], 543 * For linkback, we need one LCLA even with only one link, because we
479 &d40d->lli_log.src[d40d->lli_current], 544 * can't link back to the one in LCPA space
480 curr_lcla); 545 */
546 if (linkback || (lli_len - lli_current > 1)) {
547 curr_lcla = d40_lcla_alloc_one(chan, desc);
548 first_lcla = curr_lcla;
549 }
481 550
482 d40d->lli_current++; 551 /*
483 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { 552 * For linkback, we normally load the LCPA in the loop since we need to
484 struct d40_log_lli *lcla; 553 * link it to the second LCLA and not the first. However, if we
554 * couldn't even get a first LCLA, then we have to run in LCPA and
555 * reload manually.
556 */
557 if (!linkback || curr_lcla == -EINVAL) {
558 unsigned int flags = 0;
485 559
486 if (d40d->lli_current + 1 < d40d->lli_len) 560 if (curr_lcla == -EINVAL)
487 next_lcla = d40_lcla_alloc_one(d40c, d40d); 561 flags |= LLI_TERM_INT;
488 else
489 next_lcla = -EINVAL;
490 562
491 lcla = d40c->base->lcla_pool.base + 563 d40_log_lli_lcpa_write(chan->lcpa,
492 d40c->phy_chan->num * 1024 + 564 &lli->dst[lli_current],
493 8 * curr_lcla * 2; 565 &lli->src[lli_current],
566 curr_lcla,
567 flags);
568 lli_current++;
569 }
494 570
495 d40_log_lli_lcla_write(lcla, 571 if (curr_lcla < 0)
496 &d40d->lli_log.dst[d40d->lli_current], 572 goto out;
497 &d40d->lli_log.src[d40d->lli_current],
498 next_lcla);
499 573
500 (void) dma_map_single(d40c->base->dev, lcla, 574 for (; lli_current < lli_len; lli_current++) {
501 2 * sizeof(struct d40_log_lli), 575 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
502 DMA_TO_DEVICE); 576 8 * curr_lcla * 2;
577 struct d40_log_lli *lcla = pool->base + lcla_offset;
578 unsigned int flags = 0;
579 int next_lcla;
503 580
504 curr_lcla = next_lcla; 581 if (lli_current + 1 < lli_len)
582 next_lcla = d40_lcla_alloc_one(chan, desc);
583 else
584 next_lcla = linkback ? first_lcla : -EINVAL;
505 585
506 if (curr_lcla == -EINVAL) { 586 if (cyclic || next_lcla == -EINVAL)
507 d40d->lli_current++; 587 flags |= LLI_TERM_INT;
508 break; 588
509 } 589 if (linkback && curr_lcla == first_lcla) {
590 /* First link goes in both LCPA and LCLA */
591 d40_log_lli_lcpa_write(chan->lcpa,
592 &lli->dst[lli_current],
593 &lli->src[lli_current],
594 next_lcla, flags);
595 }
596
597 /*
598 * One unused LCLA in the cyclic case if the very first
599 * next_lcla fails...
600 */
601 d40_log_lli_lcla_write(lcla,
602 &lli->dst[lli_current],
603 &lli->src[lli_current],
604 next_lcla, flags);
605
606 dma_sync_single_range_for_device(chan->base->dev,
607 pool->dma_addr, lcla_offset,
608 2 * sizeof(struct d40_log_lli),
609 DMA_TO_DEVICE);
510 610
611 curr_lcla = next_lcla;
612
613 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
614 lli_current++;
615 break;
511 } 616 }
512 } 617 }
618
619out:
620 desc->lli_current = lli_current;
621}
622
623static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
624{
625 if (chan_is_physical(d40c)) {
626 d40_phy_lli_load(d40c, d40d);
627 d40d->lli_current = d40d->lli_len;
628 } else
629 d40_log_lli_to_lcxa(d40c, d40d);
513} 630}
514 631
515static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 632static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
543 return d; 660 return d;
544} 661}
545 662
546static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
547{
548 struct d40_desc *d;
549
550 if (list_empty(&d40c->queue))
551 return NULL;
552 list_for_each_entry(d, &d40c->queue, node)
553 if (list_is_last(&d->node, &d40c->queue))
554 break;
555 return d;
556}
557
558static int d40_psize_2_burst_size(bool is_log, int psize) 663static int d40_psize_2_burst_size(bool is_log, int psize)
559{ 664{
560 if (is_log) { 665 if (is_log) {
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
666 } 771 }
667 772
668 if (i == D40_SUSPEND_MAX_IT) { 773 if (i == D40_SUSPEND_MAX_IT) {
669 dev_err(&d40c->chan.dev->device, 774 chan_err(d40c,
670 "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 775 "unable to suspend the chl %d (log: %d) status %x\n",
671 __func__, d40c->phy_chan->num, d40c->log_num, 776 d40c->phy_chan->num, d40c->log_num,
672 status); 777 status);
673 dump_stack(); 778 dump_stack();
674 ret = -EBUSY; 779 ret = -EBUSY;
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
701 d40c->busy = false; 806 d40c->busy = false;
702} 807}
703 808
809static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
810 u32 event, int reg)
811{
812 void __iomem *addr = chan_base(d40c) + reg;
813 int tries;
814
815 if (!enable) {
816 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
817 | ~D40_EVENTLINE_MASK(event), addr);
818 return;
819 }
820
821 /*
822 * The hardware sometimes doesn't register the enable when src and dst
823 * event lines are active on the same logical channel. Retry to ensure
824 * it does. Usually only one retry is sufficient.
825 */
826 tries = 100;
827 while (--tries) {
828 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
829 | ~D40_EVENTLINE_MASK(event), addr);
830
831 if (readl(addr) & D40_EVENTLINE_MASK(event))
832 break;
833 }
834
835 if (tries != 99)
836 dev_dbg(chan2dev(d40c),
837 "[%s] workaround enable S%cLNK (%d tries)\n",
838 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
839 100 - tries);
840
841 WARN_ON(!tries);
842}
843
704static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 844static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
705{ 845{
706 u32 val;
707 unsigned long flags; 846 unsigned long flags;
708 847
709 /* Notice, that disable requires the physical channel to be stopped */
710 if (do_enable)
711 val = D40_ACTIVATE_EVENTLINE;
712 else
713 val = D40_DEACTIVATE_EVENTLINE;
714
715 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 848 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
716 849
717 /* Enable event line connected to device (or memcpy) */ 850 /* Enable event line connected to device (or memcpy) */
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
719 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 852 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
720 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 853 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
721 854
722 writel((val << D40_EVENTLINE_POS(event)) | 855 __d40_config_set_event(d40c, do_enable, event,
723 ~D40_EVENTLINE_MASK(event), 856 D40_CHAN_REG_SSLNK);
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SSLNK);
727 } 857 }
858
728 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 859 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
729 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 860 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
730 861
731 writel((val << D40_EVENTLINE_POS(event)) | 862 __d40_config_set_event(d40c, do_enable, event,
732 ~D40_EVENTLINE_MASK(event), 863 D40_CHAN_REG_SDLNK);
733 d40c->base->virtbase + D40_DREG_PCBASE +
734 d40c->phy_chan->num * D40_DREG_PCDELTA +
735 D40_CHAN_REG_SDLNK);
736 } 864 }
737 865
738 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 866 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
740 868
741static u32 d40_chan_has_events(struct d40_chan *d40c) 869static u32 d40_chan_has_events(struct d40_chan *d40c)
742{ 870{
871 void __iomem *chanbase = chan_base(d40c);
743 u32 val; 872 u32 val;
744 873
745 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 874 val = readl(chanbase + D40_CHAN_REG_SSLNK);
746 d40c->phy_chan->num * D40_DREG_PCDELTA + 875 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
747 D40_CHAN_REG_SSLNK);
748 876
749 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
750 d40c->phy_chan->num * D40_DREG_PCDELTA +
751 D40_CHAN_REG_SDLNK);
752 return val; 877 return val;
753} 878}
754 879
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
771 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 896 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
772 }; 897 };
773 898
774 if (d40c->log_num == D40_PHY_CHAN) 899 if (chan_is_physical(d40c))
775 return phy_map[d40c->dma_cfg.mode_opt]; 900 return phy_map[d40c->dma_cfg.mode_opt];
776 else 901 else
777 return log_map[d40c->dma_cfg.mode_opt]; 902 return log_map[d40c->dma_cfg.mode_opt];
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
785 /* Odd addresses are even addresses + 4 */ 910 /* Odd addresses are even addresses + 4 */
786 addr_base = (d40c->phy_chan->num % 2) * 4; 911 addr_base = (d40c->phy_chan->num % 2) * 4;
787 /* Setup channel mode to logical or physical */ 912 /* Setup channel mode to logical or physical */
788 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 913 var = ((u32)(chan_is_logical(d40c)) + 1) <<
789 D40_CHAN_POS(d40c->phy_chan->num); 914 D40_CHAN_POS(d40c->phy_chan->num);
790 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 915 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
791 916
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
794 919
795 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 920 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
796 921
797 if (d40c->log_num != D40_PHY_CHAN) { 922 if (chan_is_logical(d40c)) {
923 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
924 & D40_SREG_ELEM_LOG_LIDX_MASK;
925 void __iomem *chanbase = chan_base(d40c);
926
798 /* Set default config for CFG reg */ 927 /* Set default config for CFG reg */
799 writel(d40c->src_def_cfg, 928 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
800 d40c->base->virtbase + D40_DREG_PCBASE + 929 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
801 d40c->phy_chan->num * D40_DREG_PCDELTA +
802 D40_CHAN_REG_SSCFG);
803 writel(d40c->dst_def_cfg,
804 d40c->base->virtbase + D40_DREG_PCBASE +
805 d40c->phy_chan->num * D40_DREG_PCDELTA +
806 D40_CHAN_REG_SDCFG);
807 930
808 /* Set LIDX for lcla */ 931 /* Set LIDX for lcla */
809 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 932 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
810 D40_SREG_ELEM_LOG_LIDX_MASK, 933 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
811 d40c->base->virtbase + D40_DREG_PCBASE +
812 d40c->phy_chan->num * D40_DREG_PCDELTA +
813 D40_CHAN_REG_SDELT);
814
815 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
816 D40_SREG_ELEM_LOG_LIDX_MASK,
817 d40c->base->virtbase + D40_DREG_PCBASE +
818 d40c->phy_chan->num * D40_DREG_PCDELTA +
819 D40_CHAN_REG_SSELT);
820
821 } 934 }
822} 935}
823 936
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
825{ 938{
826 u32 num_elt; 939 u32 num_elt;
827 940
828 if (d40c->log_num != D40_PHY_CHAN) 941 if (chan_is_logical(d40c))
829 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 942 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
830 >> D40_MEM_LCSP2_ECNT_POS; 943 >> D40_MEM_LCSP2_ECNT_POS;
831 else 944 else {
832 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 945 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
833 d40c->phy_chan->num * D40_DREG_PCDELTA + 946 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
834 D40_CHAN_REG_SDELT) & 947 >> D40_SREG_ELEM_PHY_ECNT_POS;
835 D40_SREG_ELEM_PHY_ECNT_MASK) >> 948 }
836 D40_SREG_ELEM_PHY_ECNT_POS; 949
837 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 950 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
838} 951}
839 952
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
841{ 954{
842 bool is_link; 955 bool is_link;
843 956
844 if (d40c->log_num != D40_PHY_CHAN) 957 if (chan_is_logical(d40c))
845 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 958 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
846 else 959 else
847 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 960 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
848 d40c->phy_chan->num * D40_DREG_PCDELTA + 961 & D40_SREG_LNK_PHYS_LNK_MASK;
849 D40_CHAN_REG_SDLNK) & 962
850 D40_SREG_LNK_PHYS_LNK_MASK;
851 return is_link; 963 return is_link;
852} 964}
853 965
854static int d40_pause(struct dma_chan *chan) 966static int d40_pause(struct d40_chan *d40c)
855{ 967{
856 struct d40_chan *d40c =
857 container_of(chan, struct d40_chan, chan);
858 int res = 0; 968 int res = 0;
859 unsigned long flags; 969 unsigned long flags;
860 970
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
865 975
866 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 976 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
867 if (res == 0) { 977 if (res == 0) {
868 if (d40c->log_num != D40_PHY_CHAN) { 978 if (chan_is_logical(d40c)) {
869 d40_config_set_event(d40c, false); 979 d40_config_set_event(d40c, false);
870 /* Resume the other logical channels if any */ 980 /* Resume the other logical channels if any */
871 if (d40_chan_has_events(d40c)) 981 if (d40_chan_has_events(d40c))
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
878 return res; 988 return res;
879} 989}
880 990
881static int d40_resume(struct dma_chan *chan) 991static int d40_resume(struct d40_chan *d40c)
882{ 992{
883 struct d40_chan *d40c =
884 container_of(chan, struct d40_chan, chan);
885 int res = 0; 993 int res = 0;
886 unsigned long flags; 994 unsigned long flags;
887 995
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
891 spin_lock_irqsave(&d40c->lock, flags); 999 spin_lock_irqsave(&d40c->lock, flags);
892 1000
893 if (d40c->base->rev == 0) 1001 if (d40c->base->rev == 0)
894 if (d40c->log_num != D40_PHY_CHAN) { 1002 if (chan_is_logical(d40c)) {
895 res = d40_channel_execute_command(d40c, 1003 res = d40_channel_execute_command(d40c,
896 D40_DMA_SUSPEND_REQ); 1004 D40_DMA_SUSPEND_REQ);
897 goto no_suspend; 1005 goto no_suspend;
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
900 /* If bytes left to transfer or linked tx resume job */ 1008 /* If bytes left to transfer or linked tx resume job */
901 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1009 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
902 1010
903 if (d40c->log_num != D40_PHY_CHAN) 1011 if (chan_is_logical(d40c))
904 d40_config_set_event(d40c, true); 1012 d40_config_set_event(d40c, true);
905 1013
906 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1014 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -911,75 +1019,20 @@ no_suspend:
911 return res; 1019 return res;
912} 1020}
913 1021
914static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) 1022static int d40_terminate_all(struct d40_chan *chan)
915{ 1023{
916 /* TODO: Write */ 1024 unsigned long flags;
917} 1025 int ret = 0;
918
919static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
920{
921 struct d40_desc *d40d_prev = NULL;
922 int i;
923 u32 val;
924
925 if (!list_empty(&d40c->queue))
926 d40d_prev = d40_last_queued(d40c);
927 else if (!list_empty(&d40c->active))
928 d40d_prev = d40_first_active_get(d40c);
929
930 if (!d40d_prev)
931 return;
932
933 /* Here we try to join this job with previous jobs */
934 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
935 d40c->phy_chan->num * D40_DREG_PCDELTA +
936 D40_CHAN_REG_SSLNK);
937
938 /* Figure out which link we're currently transmitting */
939 for (i = 0; i < d40d_prev->lli_len; i++)
940 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
941 break;
942
943 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
944 d40c->phy_chan->num * D40_DREG_PCDELTA +
945 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
946
947 if (i == (d40d_prev->lli_len - 1) && val > 0) {
948 /* Change the current one */
949 writel(virt_to_phys(d40d->lli_phy.src),
950 d40c->base->virtbase + D40_DREG_PCBASE +
951 d40c->phy_chan->num * D40_DREG_PCDELTA +
952 D40_CHAN_REG_SSLNK);
953 writel(virt_to_phys(d40d->lli_phy.dst),
954 d40c->base->virtbase + D40_DREG_PCBASE +
955 d40c->phy_chan->num * D40_DREG_PCDELTA +
956 D40_CHAN_REG_SDLNK);
957
958 d40d->is_hw_linked = true;
959
960 } else if (i < d40d_prev->lli_len) {
961 (void) dma_unmap_single(d40c->base->dev,
962 virt_to_phys(d40d_prev->lli_phy.src),
963 d40d_prev->lli_pool.size,
964 DMA_TO_DEVICE);
965 1026
966 /* Keep the settings */ 1027 ret = d40_pause(chan);
967 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & 1028 if (!ret && chan_is_physical(chan))
968 ~D40_SREG_LNK_PHYS_LNK_MASK; 1029 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
969 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
970 val | virt_to_phys(d40d->lli_phy.src);
971 1030
972 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & 1031 spin_lock_irqsave(&chan->lock, flags);
973 ~D40_SREG_LNK_PHYS_LNK_MASK; 1032 d40_term_all(chan);
974 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = 1033 spin_unlock_irqrestore(&chan->lock, flags);
975 val | virt_to_phys(d40d->lli_phy.dst);
976 1034
977 (void) dma_map_single(d40c->base->dev, 1035 return ret;
978 d40d_prev->lli_phy.src,
979 d40d_prev->lli_pool.size,
980 DMA_TO_DEVICE);
981 d40d->is_hw_linked = true;
982 }
983} 1036}
984 1037
985static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1038static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
990 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1043 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
991 unsigned long flags; 1044 unsigned long flags;
992 1045
993 (void) d40_pause(&d40c->chan);
994
995 spin_lock_irqsave(&d40c->lock, flags); 1046 spin_lock_irqsave(&d40c->lock, flags);
996 1047
997 d40c->chan.cookie++; 1048 d40c->chan.cookie++;
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1001 1052
1002 d40d->txd.cookie = d40c->chan.cookie; 1053 d40d->txd.cookie = d40c->chan.cookie;
1003 1054
1004 if (d40c->log_num == D40_PHY_CHAN)
1005 d40_tx_submit_phy(d40c, d40d);
1006 else
1007 d40_tx_submit_log(d40c, d40d);
1008
1009 d40_desc_queue(d40c, d40d); 1055 d40_desc_queue(d40c, d40d);
1010 1056
1011 spin_unlock_irqrestore(&d40c->lock, flags); 1057 spin_unlock_irqrestore(&d40c->lock, flags);
1012 1058
1013 (void) d40_resume(&d40c->chan);
1014
1015 return tx->cookie; 1059 return tx->cookie;
1016} 1060}
1017 1061
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
1020 if (d40c->base->rev == 0) { 1064 if (d40c->base->rev == 0) {
1021 int err; 1065 int err;
1022 1066
1023 if (d40c->log_num != D40_PHY_CHAN) { 1067 if (chan_is_logical(d40c)) {
1024 err = d40_channel_execute_command(d40c, 1068 err = d40_channel_execute_command(d40c,
1025 D40_DMA_SUSPEND_REQ); 1069 D40_DMA_SUSPEND_REQ);
1026 if (err) 1070 if (err)
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
1028 } 1072 }
1029 } 1073 }
1030 1074
1031 if (d40c->log_num != D40_PHY_CHAN) 1075 if (chan_is_logical(d40c))
1032 d40_config_set_event(d40c, true); 1076 d40_config_set_event(d40c, true);
1033 1077
1034 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1078 return d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1051 /* Add to active queue */ 1095 /* Add to active queue */
1052 d40_desc_submit(d40c, d40d); 1096 d40_desc_submit(d40c, d40d);
1053 1097
1054 /* 1098 /* Initiate DMA job */
1055 * If this job is already linked in hw, 1099 d40_desc_load(d40c, d40d);
1056 * do not submit it.
1057 */
1058
1059 if (!d40d->is_hw_linked) {
1060 /* Initiate DMA job */
1061 d40_desc_load(d40c, d40d);
1062 1100
1063 /* Start dma job */ 1101 /* Start dma job */
1064 err = d40_start(d40c); 1102 err = d40_start(d40c);
1065 1103
1066 if (err) 1104 if (err)
1067 return NULL; 1105 return NULL;
1068 }
1069 } 1106 }
1070 1107
1071 return d40d; 1108 return d40d;
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
1082 if (d40d == NULL) 1119 if (d40d == NULL)
1083 return; 1120 return;
1084 1121
1085 d40_lcla_free_all(d40c, d40d); 1122 if (d40d->cyclic) {
1123 /*
1124 * If this was a paritially loaded list, we need to reloaded
1125 * it, and only when the list is completed. We need to check
1126 * for done because the interrupt will hit for every link, and
1127 * not just the last one.
1128 */
1129 if (d40d->lli_current < d40d->lli_len
1130 && !d40_tx_is_linked(d40c)
1131 && !d40_residue(d40c)) {
1132 d40_lcla_free_all(d40c, d40d);
1133 d40_desc_load(d40c, d40d);
1134 (void) d40_start(d40c);
1086 1135
1087 if (d40d->lli_current < d40d->lli_len) { 1136 if (d40d->lli_current == d40d->lli_len)
1088 d40_desc_load(d40c, d40d); 1137 d40d->lli_current = 0;
1089 /* Start dma job */ 1138 }
1090 (void) d40_start(d40c); 1139 } else {
1091 return; 1140 d40_lcla_free_all(d40c, d40d);
1092 }
1093 1141
1094 if (d40_queue_start(d40c) == NULL) 1142 if (d40d->lli_current < d40d->lli_len) {
1095 d40c->busy = false; 1143 d40_desc_load(d40c, d40d);
1144 /* Start dma job */
1145 (void) d40_start(d40c);
1146 return;
1147 }
1148
1149 if (d40_queue_start(d40c) == NULL)
1150 d40c->busy = false;
1151 }
1096 1152
1097 d40c->pending_tx++; 1153 d40c->pending_tx++;
1098 tasklet_schedule(&d40c->tasklet); 1154 tasklet_schedule(&d40c->tasklet);
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
1111 1167
1112 /* Get first active entry from list */ 1168 /* Get first active entry from list */
1113 d40d = d40_first_active_get(d40c); 1169 d40d = d40_first_active_get(d40c);
1114
1115 if (d40d == NULL) 1170 if (d40d == NULL)
1116 goto err; 1171 goto err;
1117 1172
1118 d40c->completed = d40d->txd.cookie; 1173 if (!d40d->cyclic)
1174 d40c->completed = d40d->txd.cookie;
1119 1175
1120 /* 1176 /*
1121 * If terminating a channel pending_tx is set to zero. 1177 * If terminating a channel pending_tx is set to zero.
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
1130 callback = d40d->txd.callback; 1186 callback = d40d->txd.callback;
1131 callback_param = d40d->txd.callback_param; 1187 callback_param = d40d->txd.callback_param;
1132 1188
1133 if (async_tx_test_ack(&d40d->txd)) { 1189 if (!d40d->cyclic) {
1134 d40_pool_lli_free(d40d); 1190 if (async_tx_test_ack(&d40d->txd)) {
1135 d40_desc_remove(d40d); 1191 d40_pool_lli_free(d40c, d40d);
1136 d40_desc_free(d40c, d40d);
1137 } else {
1138 if (!d40d->is_in_client_list) {
1139 d40_desc_remove(d40d); 1192 d40_desc_remove(d40d);
1140 d40_lcla_free_all(d40c, d40d); 1193 d40_desc_free(d40c, d40d);
1141 list_add_tail(&d40d->node, &d40c->client); 1194 } else {
1142 d40d->is_in_client_list = true; 1195 if (!d40d->is_in_client_list) {
1196 d40_desc_remove(d40d);
1197 d40_lcla_free_all(d40c, d40d);
1198 list_add_tail(&d40d->node, &d40c->client);
1199 d40d->is_in_client_list = true;
1200 }
1143 } 1201 }
1144 } 1202 }
1145 1203
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1216 if (!il[row].is_error) 1274 if (!il[row].is_error)
1217 dma_tc_handle(d40c); 1275 dma_tc_handle(d40c);
1218 else 1276 else
1219 dev_err(base->dev, 1277 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1220 "[%s] IRQ chan: %ld offset %d idx %d\n", 1278 chan, il[row].offset, idx);
1221 __func__, chan, il[row].offset, idx);
1222 1279
1223 spin_unlock(&d40c->lock); 1280 spin_unlock(&d40c->lock);
1224 } 1281 }
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1237 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1294 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1238 1295
1239 if (!conf->dir) { 1296 if (!conf->dir) {
1240 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", 1297 chan_err(d40c, "Invalid direction.\n");
1241 __func__);
1242 res = -EINVAL; 1298 res = -EINVAL;
1243 } 1299 }
1244 1300
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
1246 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1302 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1247 d40c->runtime_addr == 0) { 1303 d40c->runtime_addr == 0) {
1248 1304
1249 dev_err(&d40c->chan.dev->device, 1305 chan_err(d40c, "Invalid TX channel address (%d)\n",
1250 "[%s] Invalid TX channel address (%d)\n", 1306 conf->dst_dev_type);
1251 __func__, conf->dst_dev_type);
1252 res = -EINVAL; 1307 res = -EINVAL;
1253 } 1308 }
1254 1309
1255 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 1310 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1256 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1311 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1257 d40c->runtime_addr == 0) { 1312 d40c->runtime_addr == 0) {
1258 dev_err(&d40c->chan.dev->device, 1313 chan_err(d40c, "Invalid RX channel address (%d)\n",
1259 "[%s] Invalid RX channel address (%d)\n", 1314 conf->src_dev_type);
1260 __func__, conf->src_dev_type);
1261 res = -EINVAL; 1315 res = -EINVAL;
1262 } 1316 }
1263 1317
1264 if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1318 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1265 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1319 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1266 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1320 chan_err(d40c, "Invalid dst\n");
1267 __func__);
1268 res = -EINVAL; 1321 res = -EINVAL;
1269 } 1322 }
1270 1323
1271 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1324 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1272 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1325 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1273 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1326 chan_err(d40c, "Invalid src\n");
1274 __func__);
1275 res = -EINVAL; 1327 res = -EINVAL;
1276 } 1328 }
1277 1329
1278 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1330 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1279 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1331 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1280 dev_err(&d40c->chan.dev->device, 1332 chan_err(d40c, "No event line\n");
1281 "[%s] No event line\n", __func__);
1282 res = -EINVAL; 1333 res = -EINVAL;
1283 } 1334 }
1284 1335
1285 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1336 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1286 (src_event_group != dst_event_group)) { 1337 (src_event_group != dst_event_group)) {
1287 dev_err(&d40c->chan.dev->device, 1338 chan_err(d40c, "Invalid event group\n");
1288 "[%s] Invalid event group\n", __func__);
1289 res = -EINVAL; 1339 res = -EINVAL;
1290 } 1340 }
1291 1341
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1294 * DMAC HW supports it. Will be added to this driver, 1344 * DMAC HW supports it. Will be added to this driver,
1295 * in case any dma client requires it. 1345 * in case any dma client requires it.
1296 */ 1346 */
1297 dev_err(&d40c->chan.dev->device, 1347 chan_err(d40c, "periph to periph not supported\n");
1298 "[%s] periph to periph not supported\n",
1299 __func__);
1300 res = -EINVAL; 1348 res = -EINVAL;
1301 } 1349 }
1302 1350
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1309 * src (burst x width) == dst (burst x width) 1357 * src (burst x width) == dst (burst x width)
1310 */ 1358 */
1311 1359
1312 dev_err(&d40c->chan.dev->device, 1360 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1314 __func__);
1315 res = -EINVAL; 1361 res = -EINVAL;
1316 } 1362 }
1317 1363
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1514 dma_has_cap(DMA_SLAVE, cap)) { 1560 dma_has_cap(DMA_SLAVE, cap)) {
1515 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1561 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1516 } else { 1562 } else {
1517 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1563 chan_err(d40c, "No memcpy\n");
1518 __func__);
1519 return -EINVAL; 1564 return -EINVAL;
1520 } 1565 }
1521 1566
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
1540 /* Release client owned descriptors */ 1585 /* Release client owned descriptors */
1541 if (!list_empty(&d40c->client)) 1586 if (!list_empty(&d40c->client))
1542 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1587 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1543 d40_pool_lli_free(d); 1588 d40_pool_lli_free(d40c, d);
1544 d40_desc_remove(d); 1589 d40_desc_remove(d);
1545 d40_desc_free(d40c, d); 1590 d40_desc_free(d40c, d);
1546 } 1591 }
1547 1592
1548 if (phy == NULL) { 1593 if (phy == NULL) {
1549 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1594 chan_err(d40c, "phy == null\n");
1550 __func__);
1551 return -EINVAL; 1595 return -EINVAL;
1552 } 1596 }
1553 1597
1554 if (phy->allocated_src == D40_ALLOC_FREE && 1598 if (phy->allocated_src == D40_ALLOC_FREE &&
1555 phy->allocated_dst == D40_ALLOC_FREE) { 1599 phy->allocated_dst == D40_ALLOC_FREE) {
1556 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1600 chan_err(d40c, "channel already free\n");
1557 __func__);
1558 return -EINVAL; 1601 return -EINVAL;
1559 } 1602 }
1560 1603
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
1566 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1609 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1567 is_src = true; 1610 is_src = true;
1568 } else { 1611 } else {
1569 dev_err(&d40c->chan.dev->device, 1612 chan_err(d40c, "Unknown direction\n");
1570 "[%s] Unknown direction\n", __func__);
1571 return -EINVAL; 1613 return -EINVAL;
1572 } 1614 }
1573 1615
1574 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1616 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1575 if (res) { 1617 if (res) {
1576 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1618 chan_err(d40c, "suspend failed\n");
1577 __func__);
1578 return res; 1619 return res;
1579 } 1620 }
1580 1621
1581 if (d40c->log_num != D40_PHY_CHAN) { 1622 if (chan_is_logical(d40c)) {
1582 /* Release logical channel, deactivate the event line */ 1623 /* Release logical channel, deactivate the event line */
1583 1624
1584 d40_config_set_event(d40c, false); 1625 d40_config_set_event(d40c, false);
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
1594 res = d40_channel_execute_command(d40c, 1635 res = d40_channel_execute_command(d40c,
1595 D40_DMA_RUN); 1636 D40_DMA_RUN);
1596 if (res) { 1637 if (res) {
1597 dev_err(&d40c->chan.dev->device, 1638 chan_err(d40c,
1598 "[%s] Executing RUN command\n", 1639 "Executing RUN command\n");
1599 __func__);
1600 return res; 1640 return res;
1601 } 1641 }
1602 } 1642 }
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1609 /* Release physical channel */ 1649 /* Release physical channel */
1610 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1650 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1611 if (res) { 1651 if (res) {
1612 dev_err(&d40c->chan.dev->device, 1652 chan_err(d40c, "Failed to stop channel\n");
1613 "[%s] Failed to stop channel\n", __func__);
1614 return res; 1653 return res;
1615 } 1654 }
1616 d40c->phy_chan = NULL; 1655 d40c->phy_chan = NULL;
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1622 1661
1623static bool d40_is_paused(struct d40_chan *d40c) 1662static bool d40_is_paused(struct d40_chan *d40c)
1624{ 1663{
1664 void __iomem *chanbase = chan_base(d40c);
1625 bool is_paused = false; 1665 bool is_paused = false;
1626 unsigned long flags; 1666 unsigned long flags;
1627 void __iomem *active_reg; 1667 void __iomem *active_reg;
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
1630 1670
1631 spin_lock_irqsave(&d40c->lock, flags); 1671 spin_lock_irqsave(&d40c->lock, flags);
1632 1672
1633 if (d40c->log_num == D40_PHY_CHAN) { 1673 if (chan_is_physical(d40c)) {
1634 if (d40c->phy_chan->num % 2 == 0) 1674 if (d40c->phy_chan->num % 2 == 0)
1635 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1675 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1636 else 1676 else
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
1648 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1688 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1649 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1689 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1650 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1690 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1651 status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1691 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1652 d40c->phy_chan->num * D40_DREG_PCDELTA +
1653 D40_CHAN_REG_SDLNK);
1654 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1692 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1655 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1693 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1656 status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1694 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1657 d40c->phy_chan->num * D40_DREG_PCDELTA +
1658 D40_CHAN_REG_SSLNK);
1659 } else { 1695 } else {
1660 dev_err(&d40c->chan.dev->device, 1696 chan_err(d40c, "Unknown direction\n");
1661 "[%s] Unknown direction\n", __func__);
1662 goto _exit; 1697 goto _exit;
1663 } 1698 }
1664 1699
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
1688 return bytes_left; 1723 return bytes_left;
1689} 1724}
1690 1725
1691struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1726static int
1692 struct scatterlist *sgl_dst, 1727d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1693 struct scatterlist *sgl_src, 1728 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1694 unsigned int sgl_len, 1729 unsigned int sg_len, dma_addr_t src_dev_addr,
1695 unsigned long dma_flags) 1730 dma_addr_t dst_dev_addr)
1696{ 1731{
1697 int res; 1732 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1698 struct d40_desc *d40d; 1733 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1699 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1734 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1700 chan); 1735 int ret;
1701 unsigned long flags;
1702 1736
1703 if (d40c->phy_chan == NULL) { 1737 ret = d40_log_sg_to_lli(sg_src, sg_len,
1704 dev_err(&d40c->chan.dev->device, 1738 src_dev_addr,
1705 "[%s] Unallocated channel.\n", __func__); 1739 desc->lli_log.src,
1706 return ERR_PTR(-EINVAL); 1740 chan->log_def.lcsp1,
1707 } 1741 src_info->data_width,
1742 dst_info->data_width);
1708 1743
1709 spin_lock_irqsave(&d40c->lock, flags); 1744 ret = d40_log_sg_to_lli(sg_dst, sg_len,
1710 d40d = d40_desc_get(d40c); 1745 dst_dev_addr,
1746 desc->lli_log.dst,
1747 chan->log_def.lcsp3,
1748 dst_info->data_width,
1749 src_info->data_width);
1711 1750
1712 if (d40d == NULL) 1751 return ret < 0 ? ret : 0;
1752}
1753
1754static int
1755d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1756 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1757 unsigned int sg_len, dma_addr_t src_dev_addr,
1758 dma_addr_t dst_dev_addr)
1759{
1760 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1761 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1762 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1763 unsigned long flags = 0;
1764 int ret;
1765
1766 if (desc->cyclic)
1767 flags |= LLI_CYCLIC | LLI_TERM_INT;
1768
1769 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1770 desc->lli_phy.src,
1771 virt_to_phys(desc->lli_phy.src),
1772 chan->src_def_cfg,
1773 src_info, dst_info, flags);
1774
1775 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1776 desc->lli_phy.dst,
1777 virt_to_phys(desc->lli_phy.dst),
1778 chan->dst_def_cfg,
1779 dst_info, src_info, flags);
1780
1781 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1782 desc->lli_pool.size, DMA_TO_DEVICE);
1783
1784 return ret < 0 ? ret : 0;
1785}
1786
1787
1788static struct d40_desc *
1789d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1790 unsigned int sg_len, unsigned long dma_flags)
1791{
1792 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1793 struct d40_desc *desc;
1794 int ret;
1795
1796 desc = d40_desc_get(chan);
1797 if (!desc)
1798 return NULL;
1799
1800 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1801 cfg->dst_info.data_width);
1802 if (desc->lli_len < 0) {
1803 chan_err(chan, "Unaligned size\n");
1713 goto err; 1804 goto err;
1805 }
1714 1806
1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, 1807 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
1716 d40c->dma_cfg.src_info.data_width, 1808 if (ret < 0) {
1717 d40c->dma_cfg.dst_info.data_width); 1809 chan_err(chan, "Could not allocate lli\n");
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1721 goto err; 1810 goto err;
1722 } 1811 }
1723 1812
1724 d40d->lli_current = 0;
1725 d40d->txd.flags = dma_flags;
1726 1813
1727 if (d40c->log_num != D40_PHY_CHAN) { 1814 desc->lli_current = 0;
1815 desc->txd.flags = dma_flags;
1816 desc->txd.tx_submit = d40_tx_submit;
1728 1817
1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1818 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1730 dev_err(&d40c->chan.dev->device,
1731 "[%s] Out of memory\n", __func__);
1732 goto err;
1733 }
1734 1819
1735 (void) d40_log_sg_to_lli(sgl_src, 1820 return desc;
1736 sgl_len, 1821
1737 d40d->lli_log.src, 1822err:
1738 d40c->log_def.lcsp1, 1823 d40_desc_free(chan, desc);
1739 d40c->dma_cfg.src_info.data_width, 1824 return NULL;
1740 d40c->dma_cfg.dst_info.data_width); 1825}
1741 1826
1742 (void) d40_log_sg_to_lli(sgl_dst, 1827static dma_addr_t
1743 sgl_len, 1828d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1744 d40d->lli_log.dst, 1829{
1745 d40c->log_def.lcsp3, 1830 struct stedma40_platform_data *plat = chan->base->plat_data;
1746 d40c->dma_cfg.dst_info.data_width, 1831 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1747 d40c->dma_cfg.src_info.data_width); 1832 dma_addr_t addr;
1748 } else {
1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1750 dev_err(&d40c->chan.dev->device,
1751 "[%s] Out of memory\n", __func__);
1752 goto err;
1753 }
1754 1833
1755 res = d40_phy_sg_to_lli(sgl_src, 1834 if (chan->runtime_addr)
1756 sgl_len, 1835 return chan->runtime_addr;
1757 0,
1758 d40d->lli_phy.src,
1759 virt_to_phys(d40d->lli_phy.src),
1760 d40c->src_def_cfg,
1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1763 d40c->dma_cfg.src_info.psize);
1764 1836
1765 if (res < 0) 1837 if (direction == DMA_FROM_DEVICE)
1766 goto err; 1838 addr = plat->dev_rx[cfg->src_dev_type];
1839 else if (direction == DMA_TO_DEVICE)
1840 addr = plat->dev_tx[cfg->dst_dev_type];
1767 1841
1768 res = d40_phy_sg_to_lli(sgl_dst, 1842 return addr;
1769 sgl_len, 1843}
1770 0,
1771 d40d->lli_phy.dst,
1772 virt_to_phys(d40d->lli_phy.dst),
1773 d40c->dst_def_cfg,
1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1776 d40c->dma_cfg.dst_info.psize);
1777 1844
1778 if (res < 0) 1845static struct dma_async_tx_descriptor *
1779 goto err; 1846d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1847 struct scatterlist *sg_dst, unsigned int sg_len,
1848 enum dma_data_direction direction, unsigned long dma_flags)
1849{
1850 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1851 dma_addr_t src_dev_addr = 0;
1852 dma_addr_t dst_dev_addr = 0;
1853 struct d40_desc *desc;
1854 unsigned long flags;
1855 int ret;
1780 1856
1781 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1857 if (!chan->phy_chan) {
1782 d40d->lli_pool.size, DMA_TO_DEVICE); 1858 chan_err(chan, "Cannot prepare unallocated channel\n");
1859 return NULL;
1783 } 1860 }
1784 1861
1785 dma_async_tx_descriptor_init(&d40d->txd, chan);
1786 1862
1787 d40d->txd.tx_submit = d40_tx_submit; 1863 spin_lock_irqsave(&chan->lock, flags);
1788 1864
1789 spin_unlock_irqrestore(&d40c->lock, flags); 1865 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
1866 if (desc == NULL)
1867 goto err;
1868
1869 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
1870 desc->cyclic = true;
1871
1872 if (direction != DMA_NONE) {
1873 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1874
1875 if (direction == DMA_FROM_DEVICE)
1876 src_dev_addr = dev_addr;
1877 else if (direction == DMA_TO_DEVICE)
1878 dst_dev_addr = dev_addr;
1879 }
1880
1881 if (chan_is_logical(chan))
1882 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
1883 sg_len, src_dev_addr, dst_dev_addr);
1884 else
1885 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
1886 sg_len, src_dev_addr, dst_dev_addr);
1887
1888 if (ret) {
1889 chan_err(chan, "Failed to prepare %s sg job: %d\n",
1890 chan_is_logical(chan) ? "log" : "phy", ret);
1891 goto err;
1892 }
1893
1894 spin_unlock_irqrestore(&chan->lock, flags);
1895
1896 return &desc->txd;
1790 1897
1791 return &d40d->txd;
1792err: 1898err:
1793 if (d40d) 1899 if (desc)
1794 d40_desc_free(d40c, d40d); 1900 d40_desc_free(chan, desc);
1795 spin_unlock_irqrestore(&d40c->lock, flags); 1901 spin_unlock_irqrestore(&chan->lock, flags);
1796 return NULL; 1902 return NULL;
1797} 1903}
1798EXPORT_SYMBOL(stedma40_memcpy_sg);
1799 1904
1800bool stedma40_filter(struct dma_chan *chan, void *data) 1905bool stedma40_filter(struct dma_chan *chan, void *data)
1801{ 1906{
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
1818} 1923}
1819EXPORT_SYMBOL(stedma40_filter); 1924EXPORT_SYMBOL(stedma40_filter);
1820 1925
1926static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1927{
1928 bool realtime = d40c->dma_cfg.realtime;
1929 bool highprio = d40c->dma_cfg.high_priority;
1930 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1931 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1932 u32 event = D40_TYPE_TO_EVENT(dev_type);
1933 u32 group = D40_TYPE_TO_GROUP(dev_type);
1934 u32 bit = 1 << event;
1935
1936 /* Destination event lines are stored in the upper halfword */
1937 if (!src)
1938 bit <<= 16;
1939
1940 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1941 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1942}
1943
1944static void d40_set_prio_realtime(struct d40_chan *d40c)
1945{
1946 if (d40c->base->rev < 3)
1947 return;
1948
1949 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1950 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1951 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1952
1953 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1954 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1955 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1956}
1957
1821/* DMA ENGINE functions */ 1958/* DMA ENGINE functions */
1822static int d40_alloc_chan_resources(struct dma_chan *chan) 1959static int d40_alloc_chan_resources(struct dma_chan *chan)
1823{ 1960{
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1834 if (!d40c->configured) { 1971 if (!d40c->configured) {
1835 err = d40_config_memcpy(d40c); 1972 err = d40_config_memcpy(d40c);
1836 if (err) { 1973 if (err) {
1837 dev_err(&d40c->chan.dev->device, 1974 chan_err(d40c, "Failed to configure memcpy channel\n");
1838 "[%s] Failed to configure memcpy channel\n",
1839 __func__);
1840 goto fail; 1975 goto fail;
1841 } 1976 }
1842 } 1977 }
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1844 1979
1845 err = d40_allocate_channel(d40c); 1980 err = d40_allocate_channel(d40c);
1846 if (err) { 1981 if (err) {
1847 dev_err(&d40c->chan.dev->device, 1982 chan_err(d40c, "Failed to allocate channel\n");
1848 "[%s] Failed to allocate channel\n", __func__);
1849 goto fail; 1983 goto fail;
1850 } 1984 }
1851 1985
1852 /* Fill in basic CFG register values */ 1986 /* Fill in basic CFG register values */
1853 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1987 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1854 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1988 &d40c->dst_def_cfg, chan_is_logical(d40c));
1855 1989
1856 if (d40c->log_num != D40_PHY_CHAN) { 1990 d40_set_prio_realtime(d40c);
1991
1992 if (chan_is_logical(d40c)) {
1857 d40_log_cfg(&d40c->dma_cfg, 1993 d40_log_cfg(&d40c->dma_cfg,
1858 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1994 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1859 1995
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1886 unsigned long flags; 2022 unsigned long flags;
1887 2023
1888 if (d40c->phy_chan == NULL) { 2024 if (d40c->phy_chan == NULL) {
1889 dev_err(&d40c->chan.dev->device, 2025 chan_err(d40c, "Cannot free unallocated channel\n");
1890 "[%s] Cannot free unallocated channel\n", __func__);
1891 return; 2026 return;
1892 } 2027 }
1893 2028
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1897 err = d40_free_dma(d40c); 2032 err = d40_free_dma(d40c);
1898 2033
1899 if (err) 2034 if (err)
1900 dev_err(&d40c->chan.dev->device, 2035 chan_err(d40c, "Failed to free channel\n");
1901 "[%s] Failed to free channel\n", __func__);
1902 spin_unlock_irqrestore(&d40c->lock, flags); 2036 spin_unlock_irqrestore(&d40c->lock, flags);
1903} 2037}
1904 2038
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1908 size_t size, 2042 size_t size,
1909 unsigned long dma_flags) 2043 unsigned long dma_flags)
1910{ 2044{
1911 struct d40_desc *d40d; 2045 struct scatterlist dst_sg;
1912 struct d40_chan *d40c = container_of(chan, struct d40_chan, 2046 struct scatterlist src_sg;
1913 chan);
1914 unsigned long flags;
1915
1916 if (d40c->phy_chan == NULL) {
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Channel is not allocated.\n", __func__);
1919 return ERR_PTR(-EINVAL);
1920 }
1921
1922 spin_lock_irqsave(&d40c->lock, flags);
1923 d40d = d40_desc_get(d40c);
1924
1925 if (d40d == NULL) {
1926 dev_err(&d40c->chan.dev->device,
1927 "[%s] Descriptor is NULL\n", __func__);
1928 goto err;
1929 }
1930 2047
1931 d40d->txd.flags = dma_flags; 2048 sg_init_table(&dst_sg, 1);
1932 d40d->lli_len = d40_size_2_dmalen(size, 2049 sg_init_table(&src_sg, 1);
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1938 goto err;
1939 }
1940 2050
2051 sg_dma_address(&dst_sg) = dst;
2052 sg_dma_address(&src_sg) = src;
1941 2053
1942 dma_async_tx_descriptor_init(&d40d->txd, chan); 2054 sg_dma_len(&dst_sg) = size;
2055 sg_dma_len(&src_sg) = size;
1943 2056
1944 d40d->txd.tx_submit = d40_tx_submit; 2057 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
1945
1946 if (d40c->log_num != D40_PHY_CHAN) {
1947
1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__);
1951 goto err;
1952 }
1953 d40d->lli_current = 0;
1954
1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1956 src,
1957 size,
1958 d40c->log_def.lcsp1,
1959 d40c->dma_cfg.src_info.data_width,
1960 d40c->dma_cfg.dst_info.data_width,
1961 true) == NULL)
1962 goto err;
1963
1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1965 dst,
1966 size,
1967 d40c->log_def.lcsp3,
1968 d40c->dma_cfg.dst_info.data_width,
1969 d40c->dma_cfg.src_info.data_width,
1970 true) == NULL)
1971 goto err;
1972
1973 } else {
1974
1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1976 dev_err(&d40c->chan.dev->device,
1977 "[%s] Out of memory\n", __func__);
1978 goto err;
1979 }
1980
1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1982 src,
1983 size,
1984 d40c->dma_cfg.src_info.psize,
1985 0,
1986 d40c->src_def_cfg,
1987 true,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width,
1990 false) == NULL)
1991 goto err;
1992
1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1994 dst,
1995 size,
1996 d40c->dma_cfg.dst_info.psize,
1997 0,
1998 d40c->dst_def_cfg,
1999 true,
2000 d40c->dma_cfg.dst_info.data_width,
2001 d40c->dma_cfg.src_info.data_width,
2002 false) == NULL)
2003 goto err;
2004
2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2006 d40d->lli_pool.size, DMA_TO_DEVICE);
2007 }
2008
2009 spin_unlock_irqrestore(&d40c->lock, flags);
2010 return &d40d->txd;
2011
2012err:
2013 if (d40d)
2014 d40_desc_free(d40c, d40d);
2015 spin_unlock_irqrestore(&d40c->lock, flags);
2016 return NULL;
2017} 2058}
2018 2059
2019static struct dma_async_tx_descriptor * 2060static struct dma_async_tx_descriptor *
2020d40_prep_sg(struct dma_chan *chan, 2061d40_prep_memcpy_sg(struct dma_chan *chan,
2021 struct scatterlist *dst_sg, unsigned int dst_nents, 2062 struct scatterlist *dst_sg, unsigned int dst_nents,
2022 struct scatterlist *src_sg, unsigned int src_nents, 2063 struct scatterlist *src_sg, unsigned int src_nents,
2023 unsigned long dma_flags) 2064 unsigned long dma_flags)
2024{ 2065{
2025 if (dst_nents != src_nents) 2066 if (dst_nents != src_nents)
2026 return NULL; 2067 return NULL;
2027 2068
2028 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); 2069 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2029}
2030
2031static int d40_prep_slave_sg_log(struct d40_desc *d40d,
2032 struct d40_chan *d40c,
2033 struct scatterlist *sgl,
2034 unsigned int sg_len,
2035 enum dma_data_direction direction,
2036 unsigned long dma_flags)
2037{
2038 dma_addr_t dev_addr = 0;
2039 int total_size;
2040
2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2047 return -EINVAL;
2048 }
2049
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
2051 dev_err(&d40c->chan.dev->device,
2052 "[%s] Out of memory\n", __func__);
2053 return -ENOMEM;
2054 }
2055
2056 d40d->lli_current = 0;
2057
2058 if (direction == DMA_FROM_DEVICE)
2059 if (d40c->runtime_addr)
2060 dev_addr = d40c->runtime_addr;
2061 else
2062 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2063 else if (direction == DMA_TO_DEVICE)
2064 if (d40c->runtime_addr)
2065 dev_addr = d40c->runtime_addr;
2066 else
2067 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2068
2069 else
2070 return -EINVAL;
2071
2072 total_size = d40_log_sg_to_dev(sgl, sg_len,
2073 &d40d->lli_log,
2074 &d40c->log_def,
2075 d40c->dma_cfg.src_info.data_width,
2076 d40c->dma_cfg.dst_info.data_width,
2077 direction,
2078 dev_addr);
2079
2080 if (total_size < 0)
2081 return -EINVAL;
2082
2083 return 0;
2084}
2085
2086static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2087 struct d40_chan *d40c,
2088 struct scatterlist *sgl,
2089 unsigned int sgl_len,
2090 enum dma_data_direction direction,
2091 unsigned long dma_flags)
2092{
2093 dma_addr_t src_dev_addr;
2094 dma_addr_t dst_dev_addr;
2095 int res;
2096
2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2103 return -EINVAL;
2104 }
2105
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2107 dev_err(&d40c->chan.dev->device,
2108 "[%s] Out of memory\n", __func__);
2109 return -ENOMEM;
2110 }
2111
2112 d40d->lli_current = 0;
2113
2114 if (direction == DMA_FROM_DEVICE) {
2115 dst_dev_addr = 0;
2116 if (d40c->runtime_addr)
2117 src_dev_addr = d40c->runtime_addr;
2118 else
2119 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2120 } else if (direction == DMA_TO_DEVICE) {
2121 if (d40c->runtime_addr)
2122 dst_dev_addr = d40c->runtime_addr;
2123 else
2124 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2125 src_dev_addr = 0;
2126 } else
2127 return -EINVAL;
2128
2129 res = d40_phy_sg_to_lli(sgl,
2130 sgl_len,
2131 src_dev_addr,
2132 d40d->lli_phy.src,
2133 virt_to_phys(d40d->lli_phy.src),
2134 d40c->src_def_cfg,
2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2137 d40c->dma_cfg.src_info.psize);
2138 if (res < 0)
2139 return res;
2140
2141 res = d40_phy_sg_to_lli(sgl,
2142 sgl_len,
2143 dst_dev_addr,
2144 d40d->lli_phy.dst,
2145 virt_to_phys(d40d->lli_phy.dst),
2146 d40c->dst_def_cfg,
2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2149 d40c->dma_cfg.dst_info.psize);
2150 if (res < 0)
2151 return res;
2152
2153 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2154 d40d->lli_pool.size, DMA_TO_DEVICE);
2155 return 0;
2156} 2070}
2157 2071
2158static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 2072static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2161 enum dma_data_direction direction, 2075 enum dma_data_direction direction,
2162 unsigned long dma_flags) 2076 unsigned long dma_flags)
2163{ 2077{
2164 struct d40_desc *d40d; 2078 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
2165 struct d40_chan *d40c = container_of(chan, struct d40_chan, 2079 return NULL;
2166 chan);
2167 unsigned long flags;
2168 int err;
2169
2170 if (d40c->phy_chan == NULL) {
2171 dev_err(&d40c->chan.dev->device,
2172 "[%s] Cannot prepare unallocated channel\n", __func__);
2173 return ERR_PTR(-EINVAL);
2174 }
2175 2080
2176 spin_lock_irqsave(&d40c->lock, flags); 2081 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2177 d40d = d40_desc_get(d40c); 2082}
2178 2083
2179 if (d40d == NULL) 2084static struct dma_async_tx_descriptor *
2180 goto err; 2085dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2086 size_t buf_len, size_t period_len,
2087 enum dma_data_direction direction)
2088{
2089 unsigned int periods = buf_len / period_len;
2090 struct dma_async_tx_descriptor *txd;
2091 struct scatterlist *sg;
2092 int i;
2181 2093
2182 if (d40c->log_num != D40_PHY_CHAN) 2094 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
2183 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2095 for (i = 0; i < periods; i++) {
2184 direction, dma_flags); 2096 sg_dma_address(&sg[i]) = dma_addr;
2185 else 2097 sg_dma_len(&sg[i]) = period_len;
2186 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2098 dma_addr += period_len;
2187 direction, dma_flags);
2188 if (err) {
2189 dev_err(&d40c->chan.dev->device,
2190 "[%s] Failed to prepare %s slave sg job: %d\n",
2191 __func__,
2192 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2193 goto err;
2194 } 2099 }
2195 2100
2196 d40d->txd.flags = dma_flags; 2101 sg[periods].offset = 0;
2102 sg[periods].length = 0;
2103 sg[periods].page_link =
2104 ((unsigned long)sg | 0x01) & ~0x02;
2197 2105
2198 dma_async_tx_descriptor_init(&d40d->txd, chan); 2106 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2107 DMA_PREP_INTERRUPT);
2199 2108
2200 d40d->txd.tx_submit = d40_tx_submit; 2109 kfree(sg);
2201 2110
2202 spin_unlock_irqrestore(&d40c->lock, flags); 2111 return txd;
2203 return &d40d->txd;
2204
2205err:
2206 if (d40d)
2207 d40_desc_free(d40c, d40d);
2208 spin_unlock_irqrestore(&d40c->lock, flags);
2209 return NULL;
2210} 2112}
2211 2113
2212static enum dma_status d40_tx_status(struct dma_chan *chan, 2114static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2219 int ret; 2121 int ret;
2220 2122
2221 if (d40c->phy_chan == NULL) { 2123 if (d40c->phy_chan == NULL) {
2222 dev_err(&d40c->chan.dev->device, 2124 chan_err(d40c, "Cannot read status of unallocated channel\n");
2223 "[%s] Cannot read status of unallocated channel\n",
2224 __func__);
2225 return -EINVAL; 2125 return -EINVAL;
2226 } 2126 }
2227 2127
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
2245 unsigned long flags; 2145 unsigned long flags;
2246 2146
2247 if (d40c->phy_chan == NULL) { 2147 if (d40c->phy_chan == NULL) {
2248 dev_err(&d40c->chan.dev->device, 2148 chan_err(d40c, "Channel is not allocated!\n");
2249 "[%s] Channel is not allocated!\n", __func__);
2250 return; 2149 return;
2251 } 2150 }
2252 2151
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2339 return; 2238 return;
2340 } 2239 }
2341 2240
2342 if (d40c->log_num != D40_PHY_CHAN) { 2241 if (chan_is_logical(d40c)) {
2343 if (config_maxburst >= 16) 2242 if (config_maxburst >= 16)
2344 psize = STEDMA40_PSIZE_LOG_16; 2243 psize = STEDMA40_PSIZE_LOG_16;
2345 else if (config_maxburst >= 8) 2244 else if (config_maxburst >= 8)
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2372 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2271 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2373 2272
2374 /* Fill in register values */ 2273 /* Fill in register values */
2375 if (d40c->log_num != D40_PHY_CHAN) 2274 if (chan_is_logical(d40c))
2376 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2275 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2377 else 2276 else
2378 d40_phy_cfg(cfg, &d40c->src_def_cfg, 2277 d40_phy_cfg(cfg, &d40c->src_def_cfg,
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2393static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2292static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2394 unsigned long arg) 2293 unsigned long arg)
2395{ 2294{
2396 unsigned long flags;
2397 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2295 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2398 2296
2399 if (d40c->phy_chan == NULL) { 2297 if (d40c->phy_chan == NULL) {
2400 dev_err(&d40c->chan.dev->device, 2298 chan_err(d40c, "Channel is not allocated!\n");
2401 "[%s] Channel is not allocated!\n", __func__);
2402 return -EINVAL; 2299 return -EINVAL;
2403 } 2300 }
2404 2301
2405 switch (cmd) { 2302 switch (cmd) {
2406 case DMA_TERMINATE_ALL: 2303 case DMA_TERMINATE_ALL:
2407 spin_lock_irqsave(&d40c->lock, flags); 2304 return d40_terminate_all(d40c);
2408 d40_term_all(d40c);
2409 spin_unlock_irqrestore(&d40c->lock, flags);
2410 return 0;
2411 case DMA_PAUSE: 2305 case DMA_PAUSE:
2412 return d40_pause(chan); 2306 return d40_pause(d40c);
2413 case DMA_RESUME: 2307 case DMA_RESUME:
2414 return d40_resume(chan); 2308 return d40_resume(d40c);
2415 case DMA_SLAVE_CONFIG: 2309 case DMA_SLAVE_CONFIG:
2416 d40_set_runtime_config(chan, 2310 d40_set_runtime_config(chan,
2417 (struct dma_slave_config *) arg); 2311 (struct dma_slave_config *) arg);
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2456 } 2350 }
2457} 2351}
2458 2352
2353static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2354{
2355 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2356 dev->device_prep_slave_sg = d40_prep_slave_sg;
2357
2358 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2359 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2360
2361 /*
2362 * This controller can only access address at even
2363 * 32bit boundaries, i.e. 2^2
2364 */
2365 dev->copy_align = 2;
2366 }
2367
2368 if (dma_has_cap(DMA_SG, dev->cap_mask))
2369 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2370
2371 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2372 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2373
2374 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2375 dev->device_free_chan_resources = d40_free_chan_resources;
2376 dev->device_issue_pending = d40_issue_pending;
2377 dev->device_tx_status = d40_tx_status;
2378 dev->device_control = d40_control;
2379 dev->dev = base->dev;
2380}
2381
2459static int __init d40_dmaengine_init(struct d40_base *base, 2382static int __init d40_dmaengine_init(struct d40_base *base,
2460 int num_reserved_chans) 2383 int num_reserved_chans)
2461{ 2384{
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2466 2389
2467 dma_cap_zero(base->dma_slave.cap_mask); 2390 dma_cap_zero(base->dma_slave.cap_mask);
2468 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2391 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2392 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2469 2393
2470 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2394 d40_ops_init(base, &base->dma_slave);
2471 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2472 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2473 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2474 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2475 base->dma_slave.device_tx_status = d40_tx_status;
2476 base->dma_slave.device_issue_pending = d40_issue_pending;
2477 base->dma_slave.device_control = d40_control;
2478 base->dma_slave.dev = base->dev;
2479 2395
2480 err = dma_async_device_register(&base->dma_slave); 2396 err = dma_async_device_register(&base->dma_slave);
2481 2397
2482 if (err) { 2398 if (err) {
2483 dev_err(base->dev, 2399 d40_err(base->dev, "Failed to register slave channels\n");
2484 "[%s] Failed to register slave channels\n",
2485 __func__);
2486 goto failure1; 2400 goto failure1;
2487 } 2401 }
2488 2402
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2491 2405
2492 dma_cap_zero(base->dma_memcpy.cap_mask); 2406 dma_cap_zero(base->dma_memcpy.cap_mask);
2493 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2407 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2494 dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2408 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2495 2409
2496 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2410 d40_ops_init(base, &base->dma_memcpy);
2497 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2498 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2499 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2500 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2501 base->dma_memcpy.device_tx_status = d40_tx_status;
2502 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2503 base->dma_memcpy.device_control = d40_control;
2504 base->dma_memcpy.dev = base->dev;
2505 /*
2506 * This controller can only access address at even
2507 * 32bit boundaries, i.e. 2^2
2508 */
2509 base->dma_memcpy.copy_align = 2;
2510 2411
2511 err = dma_async_device_register(&base->dma_memcpy); 2412 err = dma_async_device_register(&base->dma_memcpy);
2512 2413
2513 if (err) { 2414 if (err) {
2514 dev_err(base->dev, 2415 d40_err(base->dev,
2515 "[%s] Failed to regsiter memcpy only channels\n", 2416 "Failed to regsiter memcpy only channels\n");
2516 __func__);
2517 goto failure2; 2417 goto failure2;
2518 } 2418 }
2519 2419
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2523 dma_cap_zero(base->dma_both.cap_mask); 2423 dma_cap_zero(base->dma_both.cap_mask);
2524 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2424 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2525 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2425 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2526 dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2426 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2527 2427 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2528 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2428
2529 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2429 d40_ops_init(base, &base->dma_both);
2530 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2531 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2532 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2533 base->dma_both.device_tx_status = d40_tx_status;
2534 base->dma_both.device_issue_pending = d40_issue_pending;
2535 base->dma_both.device_control = d40_control;
2536 base->dma_both.dev = base->dev;
2537 base->dma_both.copy_align = 2;
2538 err = dma_async_device_register(&base->dma_both); 2430 err = dma_async_device_register(&base->dma_both);
2539 2431
2540 if (err) { 2432 if (err) {
2541 dev_err(base->dev, 2433 d40_err(base->dev,
2542 "[%s] Failed to register logical and physical capable channels\n", 2434 "Failed to register logical and physical capable channels\n");
2543 __func__);
2544 goto failure3; 2435 goto failure3;
2545 } 2436 }
2546 return 0; 2437 return 0;
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2616 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2507 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2617 /* 2508 /*
2618 * D40_DREG_PERIPHID2 Depends on HW revision: 2509 * D40_DREG_PERIPHID2 Depends on HW revision:
2619 * MOP500/HREF ED has 0x0008, 2510 * DB8500ed has 0x0008,
2620 * ? has 0x0018, 2511 * ? has 0x0018,
2621 * HREF V1 has 0x0028 2512 * DB8500v1 has 0x0028
2513 * DB8500v2 has 0x0038
2622 */ 2514 */
2623 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2515 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2624 2516
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2642 clk = clk_get(&pdev->dev, NULL); 2534 clk = clk_get(&pdev->dev, NULL);
2643 2535
2644 if (IS_ERR(clk)) { 2536 if (IS_ERR(clk)) {
2645 dev_err(&pdev->dev, "[%s] No matching clock found\n", 2537 d40_err(&pdev->dev, "No matching clock found\n");
2646 __func__);
2647 goto failure; 2538 goto failure;
2648 } 2539 }
2649 2540
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2666 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2557 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2667 if (dma_id_regs[i].val != 2558 if (dma_id_regs[i].val !=
2668 readl(virtbase + dma_id_regs[i].reg)) { 2559 readl(virtbase + dma_id_regs[i].reg)) {
2669 dev_err(&pdev->dev, 2560 d40_err(&pdev->dev,
2670 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2561 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2671 __func__,
2672 dma_id_regs[i].val, 2562 dma_id_regs[i].val,
2673 dma_id_regs[i].reg, 2563 dma_id_regs[i].reg,
2674 readl(virtbase + dma_id_regs[i].reg)); 2564 readl(virtbase + dma_id_regs[i].reg));
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2681 2571
2682 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2572 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2683 D40_HW_DESIGNER) { 2573 D40_HW_DESIGNER) {
2684 dev_err(&pdev->dev, 2574 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2685 "[%s] Unknown designer! Got %x wanted %x\n", 2575 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2686 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2687 D40_HW_DESIGNER); 2576 D40_HW_DESIGNER);
2688 goto failure; 2577 goto failure;
2689 } 2578 }
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2713 sizeof(struct d40_chan), GFP_KERNEL); 2602 sizeof(struct d40_chan), GFP_KERNEL);
2714 2603
2715 if (base == NULL) { 2604 if (base == NULL) {
2716 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2605 d40_err(&pdev->dev, "Out of memory\n");
2717 goto failure; 2606 goto failure;
2718 } 2607 }
2719 2608
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
2860 2749
2861static int __init d40_lcla_allocate(struct d40_base *base) 2750static int __init d40_lcla_allocate(struct d40_base *base)
2862{ 2751{
2752 struct d40_lcla_pool *pool = &base->lcla_pool;
2863 unsigned long *page_list; 2753 unsigned long *page_list;
2864 int i, j; 2754 int i, j;
2865 int ret = 0; 2755 int ret = 0;
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2885 base->lcla_pool.pages); 2775 base->lcla_pool.pages);
2886 if (!page_list[i]) { 2776 if (!page_list[i]) {
2887 2777
2888 dev_err(base->dev, 2778 d40_err(base->dev, "Failed to allocate %d pages.\n",
2889 "[%s] Failed to allocate %d pages.\n", 2779 base->lcla_pool.pages);
2890 __func__, base->lcla_pool.pages);
2891 2780
2892 for (j = 0; j < i; j++) 2781 for (j = 0; j < i; j++)
2893 free_pages(page_list[j], base->lcla_pool.pages); 2782 free_pages(page_list[j], base->lcla_pool.pages);
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2925 LCLA_ALIGNMENT); 2814 LCLA_ALIGNMENT);
2926 } 2815 }
2927 2816
2817 pool->dma_addr = dma_map_single(base->dev, pool->base,
2818 SZ_1K * base->num_phy_chans,
2819 DMA_TO_DEVICE);
2820 if (dma_mapping_error(base->dev, pool->dma_addr)) {
2821 pool->dma_addr = 0;
2822 ret = -ENOMEM;
2823 goto failure;
2824 }
2825
2928 writel(virt_to_phys(base->lcla_pool.base), 2826 writel(virt_to_phys(base->lcla_pool.base),
2929 base->virtbase + D40_DREG_LCLA); 2827 base->virtbase + D40_DREG_LCLA);
2930failure: 2828failure:
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
2957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2958 if (!res) { 2856 if (!res) {
2959 ret = -ENOENT; 2857 ret = -ENOENT;
2960 dev_err(&pdev->dev, 2858 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2961 "[%s] No \"lcpa\" memory resource\n",
2962 __func__);
2963 goto failure; 2859 goto failure;
2964 } 2860 }
2965 base->lcpa_size = resource_size(res); 2861 base->lcpa_size = resource_size(res);
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
2968 if (request_mem_region(res->start, resource_size(res), 2864 if (request_mem_region(res->start, resource_size(res),
2969 D40_NAME " I/O lcpa") == NULL) { 2865 D40_NAME " I/O lcpa") == NULL) {
2970 ret = -EBUSY; 2866 ret = -EBUSY;
2971 dev_err(&pdev->dev, 2867 d40_err(&pdev->dev,
2972 "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2868 "Failed to request LCPA region 0x%x-0x%x\n",
2973 __func__, res->start, res->end); 2869 res->start, res->end);
2974 goto failure; 2870 goto failure;
2975 } 2871 }
2976 2872
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
2986 base->lcpa_base = ioremap(res->start, resource_size(res)); 2882 base->lcpa_base = ioremap(res->start, resource_size(res));
2987 if (!base->lcpa_base) { 2883 if (!base->lcpa_base) {
2988 ret = -ENOMEM; 2884 ret = -ENOMEM;
2989 dev_err(&pdev->dev, 2885 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2990 "[%s] Failed to ioremap LCPA region\n",
2991 __func__);
2992 goto failure; 2886 goto failure;
2993 } 2887 }
2994 2888
2995 ret = d40_lcla_allocate(base); 2889 ret = d40_lcla_allocate(base);
2996 if (ret) { 2890 if (ret) {
2997 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", 2891 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2998 __func__);
2999 goto failure; 2892 goto failure;
3000 } 2893 }
3001 2894
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
3004 base->irq = platform_get_irq(pdev, 0); 2897 base->irq = platform_get_irq(pdev, 0);
3005 2898
3006 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2899 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3007
3008 if (ret) { 2900 if (ret) {
3009 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2901 d40_err(&pdev->dev, "No IRQ defined\n");
3010 goto failure; 2902 goto failure;
3011 } 2903 }
3012 2904
@@ -3025,6 +2917,12 @@ failure:
3025 kmem_cache_destroy(base->desc_slab); 2917 kmem_cache_destroy(base->desc_slab);
3026 if (base->virtbase) 2918 if (base->virtbase)
3027 iounmap(base->virtbase); 2919 iounmap(base->virtbase);
2920
2921 if (base->lcla_pool.dma_addr)
2922 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2923 SZ_1K * base->num_phy_chans,
2924 DMA_TO_DEVICE);
2925
3028 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2926 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3029 free_pages((unsigned long)base->lcla_pool.base, 2927 free_pages((unsigned long)base->lcla_pool.base,
3030 base->lcla_pool.pages); 2928 base->lcla_pool.pages);
@@ -3049,7 +2947,7 @@ failure:
3049 kfree(base); 2947 kfree(base);
3050 } 2948 }
3051 2949
3052 dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2950 d40_err(&pdev->dev, "probe failed\n");
3053 return ret; 2951 return ret;
3054} 2952}
3055 2953
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
3060 }, 2958 },
3061}; 2959};
3062 2960
3063int __init stedma40_init(void) 2961static int __init stedma40_init(void)
3064{ 2962{
3065 return platform_driver_probe(&d40_driver, d40_probe); 2963 return platform_driver_probe(&d40_driver, d40_probe);
3066} 2964}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 0b096a38322d..cad9e1daedff 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
125static int d40_phy_fill_lli(struct d40_phy_lli *lli, 125static int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data, 126 dma_addr_t data,
127 u32 data_size, 127 u32 data_size,
128 int psize,
129 dma_addr_t next_lli, 128 dma_addr_t next_lli,
130 u32 reg_cfg, 129 u32 reg_cfg,
131 bool term_int, 130 struct stedma40_half_channel_info *info,
132 u32 data_width, 131 unsigned int flags)
133 bool is_device)
134{ 132{
133 bool addr_inc = flags & LLI_ADDR_INC;
134 bool term_int = flags & LLI_TERM_INT;
135 unsigned int data_width = info->data_width;
136 int psize = info->psize;
135 int num_elems; 137 int num_elems;
136 138
137 if (psize == STEDMA40_PSIZE_PHY_1) 139 if (psize == STEDMA40_PSIZE_PHY_1)
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
154 * Distance to next element sized entry. 156 * Distance to next element sized entry.
155 * Usually the size of the element unless you want gaps. 157 * Usually the size of the element unless you want gaps.
156 */ 158 */
157 if (!is_device) 159 if (addr_inc)
158 lli->reg_elt |= (0x1 << data_width) << 160 lli->reg_elt |= (0x1 << data_width) <<
159 D40_SREG_ELEM_PHY_EIDX_POS; 161 D40_SREG_ELEM_PHY_EIDX_POS;
160 162
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
198 return seg_max; 200 return seg_max;
199} 201}
200 202
201struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, 203static struct d40_phy_lli *
202 dma_addr_t addr, 204d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
203 u32 size, 205 dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
204 int psize, 206 struct stedma40_half_channel_info *info,
205 dma_addr_t lli_phys, 207 struct stedma40_half_channel_info *otherinfo,
206 u32 reg_cfg, 208 unsigned long flags)
207 bool term_int,
208 u32 data_width1,
209 u32 data_width2,
210 bool is_device)
211{ 209{
210 bool lastlink = flags & LLI_LAST_LINK;
211 bool addr_inc = flags & LLI_ADDR_INC;
212 bool term_int = flags & LLI_TERM_INT;
213 bool cyclic = flags & LLI_CYCLIC;
212 int err; 214 int err;
213 dma_addr_t next = lli_phys; 215 dma_addr_t next = lli_phys;
214 int size_rest = size; 216 int size_rest = size;
215 int size_seg = 0; 217 int size_seg = 0;
216 218
219 /*
220 * This piece may be split up based on d40_seg_size(); we only want the
221 * term int on the last part.
222 */
223 if (term_int)
224 flags &= ~LLI_TERM_INT;
225
217 do { 226 do {
218 size_seg = d40_seg_size(size_rest, data_width1, data_width2); 227 size_seg = d40_seg_size(size_rest, info->data_width,
228 otherinfo->data_width);
219 size_rest -= size_seg; 229 size_rest -= size_seg;
220 230
221 if (term_int && size_rest == 0) 231 if (size_rest == 0 && term_int)
222 next = 0; 232 flags |= LLI_TERM_INT;
233
234 if (size_rest == 0 && lastlink)
235 next = cyclic ? first_phys : 0;
223 else 236 else
224 next = ALIGN(next + sizeof(struct d40_phy_lli), 237 next = ALIGN(next + sizeof(struct d40_phy_lli),
225 D40_LLI_ALIGN); 238 D40_LLI_ALIGN);
226 239
227 err = d40_phy_fill_lli(lli, 240 err = d40_phy_fill_lli(lli, addr, size_seg, next,
228 addr, 241 reg_cfg, info, flags);
229 size_seg,
230 psize,
231 next,
232 reg_cfg,
233 !next,
234 data_width1,
235 is_device);
236 242
237 if (err) 243 if (err)
238 goto err; 244 goto err;
239 245
240 lli++; 246 lli++;
241 if (!is_device) 247 if (addr_inc)
242 addr += size_seg; 248 addr += size_seg;
243 } while (size_rest); 249 } while (size_rest);
244 250
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
254 struct d40_phy_lli *lli_sg, 260 struct d40_phy_lli *lli_sg,
255 dma_addr_t lli_phys, 261 dma_addr_t lli_phys,
256 u32 reg_cfg, 262 u32 reg_cfg,
257 u32 data_width1, 263 struct stedma40_half_channel_info *info,
258 u32 data_width2, 264 struct stedma40_half_channel_info *otherinfo,
259 int psize) 265 unsigned long flags)
260{ 266{
261 int total_size = 0; 267 int total_size = 0;
262 int i; 268 int i;
263 struct scatterlist *current_sg = sg; 269 struct scatterlist *current_sg = sg;
264 dma_addr_t dst;
265 struct d40_phy_lli *lli = lli_sg; 270 struct d40_phy_lli *lli = lli_sg;
266 dma_addr_t l_phys = lli_phys; 271 dma_addr_t l_phys = lli_phys;
267 272
273 if (!target)
274 flags |= LLI_ADDR_INC;
275
268 for_each_sg(sg, current_sg, sg_len, i) { 276 for_each_sg(sg, current_sg, sg_len, i) {
277 dma_addr_t sg_addr = sg_dma_address(current_sg);
278 unsigned int len = sg_dma_len(current_sg);
279 dma_addr_t dst = target ?: sg_addr;
269 280
270 total_size += sg_dma_len(current_sg); 281 total_size += sg_dma_len(current_sg);
271 282
272 if (target) 283 if (i == sg_len - 1)
273 dst = target; 284 flags |= LLI_TERM_INT | LLI_LAST_LINK;
274 else
275 dst = sg_phys(current_sg);
276 285
277 l_phys = ALIGN(lli_phys + (lli - lli_sg) * 286 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
278 sizeof(struct d40_phy_lli), D40_LLI_ALIGN); 287 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
279 288
280 lli = d40_phy_buf_to_lli(lli, 289 lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
281 dst, 290 reg_cfg, info, otherinfo, flags);
282 sg_dma_len(current_sg), 291
283 psize,
284 l_phys,
285 reg_cfg,
286 sg_len - 1 == i,
287 data_width1,
288 data_width2,
289 target == dst);
290 if (lli == NULL) 292 if (lli == NULL)
291 return -EINVAL; 293 return -EINVAL;
292 } 294 }
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
295} 297}
296 298
297 299
298void d40_phy_lli_write(void __iomem *virtbase,
299 u32 phy_chan_num,
300 struct d40_phy_lli *lli_dst,
301 struct d40_phy_lli *lli_src)
302{
303
304 writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
305 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
306 writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
307 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
308 writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
309 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
310 writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
311 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
312
313 writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
314 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
315 writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
316 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
317 writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
318 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
319 writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
320 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
321
322}
323
324/* DMA logical lli operations */ 300/* DMA logical lli operations */
325 301
326static void d40_log_lli_link(struct d40_log_lli *lli_dst, 302static void d40_log_lli_link(struct d40_log_lli *lli_dst,
327 struct d40_log_lli *lli_src, 303 struct d40_log_lli *lli_src,
328 int next) 304 int next, unsigned int flags)
329{ 305{
306 bool interrupt = flags & LLI_TERM_INT;
330 u32 slos = 0; 307 u32 slos = 0;
331 u32 dlos = 0; 308 u32 dlos = 0;
332 309
333 if (next != -EINVAL) { 310 if (next != -EINVAL) {
334 slos = next * 2; 311 slos = next * 2;
335 dlos = next * 2 + 1; 312 dlos = next * 2 + 1;
336 } else { 313 }
314
315 if (interrupt) {
337 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 316 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
338 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 317 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
339 } 318 }
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
348void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 327void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
349 struct d40_log_lli *lli_dst, 328 struct d40_log_lli *lli_dst,
350 struct d40_log_lli *lli_src, 329 struct d40_log_lli *lli_src,
351 int next) 330 int next, unsigned int flags)
352{ 331{
353 d40_log_lli_link(lli_dst, lli_src, next); 332 d40_log_lli_link(lli_dst, lli_src, next, flags);
354 333
355 writel(lli_src->lcsp02, &lcpa[0].lcsp0); 334 writel(lli_src->lcsp02, &lcpa[0].lcsp0);
356 writel(lli_src->lcsp13, &lcpa[0].lcsp1); 335 writel(lli_src->lcsp13, &lcpa[0].lcsp1);
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
361void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 340void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
362 struct d40_log_lli *lli_dst, 341 struct d40_log_lli *lli_dst,
363 struct d40_log_lli *lli_src, 342 struct d40_log_lli *lli_src,
364 int next) 343 int next, unsigned int flags)
365{ 344{
366 d40_log_lli_link(lli_dst, lli_src, next); 345 d40_log_lli_link(lli_dst, lli_src, next, flags);
367 346
368 writel(lli_src->lcsp02, &lcla[0].lcsp02); 347 writel(lli_src->lcsp02, &lcla[0].lcsp02);
369 writel(lli_src->lcsp13, &lcla[0].lcsp13); 348 writel(lli_src->lcsp13, &lcla[0].lcsp13);
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
375 dma_addr_t data, u32 data_size, 354 dma_addr_t data, u32 data_size,
376 u32 reg_cfg, 355 u32 reg_cfg,
377 u32 data_width, 356 u32 data_width,
378 bool addr_inc) 357 unsigned int flags)
379{ 358{
359 bool addr_inc = flags & LLI_ADDR_INC;
360
380 lli->lcsp13 = reg_cfg; 361 lli->lcsp13 = reg_cfg;
381 362
382 /* The number of elements to transfer */ 363 /* The number of elements to transfer */
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
395 376
396} 377}
397 378
398int d40_log_sg_to_dev(struct scatterlist *sg, 379static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
399 int sg_len,
400 struct d40_log_lli_bidir *lli,
401 struct d40_def_lcsp *lcsp,
402 u32 src_data_width,
403 u32 dst_data_width,
404 enum dma_data_direction direction,
405 dma_addr_t dev_addr)
406{
407 int total_size = 0;
408 struct scatterlist *current_sg = sg;
409 int i;
410 struct d40_log_lli *lli_src = lli->src;
411 struct d40_log_lli *lli_dst = lli->dst;
412
413 for_each_sg(sg, current_sg, sg_len, i) {
414 total_size += sg_dma_len(current_sg);
415
416 if (direction == DMA_TO_DEVICE) {
417 lli_src =
418 d40_log_buf_to_lli(lli_src,
419 sg_phys(current_sg),
420 sg_dma_len(current_sg),
421 lcsp->lcsp1, src_data_width,
422 dst_data_width,
423 true);
424 lli_dst =
425 d40_log_buf_to_lli(lli_dst,
426 dev_addr,
427 sg_dma_len(current_sg),
428 lcsp->lcsp3, dst_data_width,
429 src_data_width,
430 false);
431 } else {
432 lli_dst =
433 d40_log_buf_to_lli(lli_dst,
434 sg_phys(current_sg),
435 sg_dma_len(current_sg),
436 lcsp->lcsp3, dst_data_width,
437 src_data_width,
438 true);
439 lli_src =
440 d40_log_buf_to_lli(lli_src,
441 dev_addr,
442 sg_dma_len(current_sg),
443 lcsp->lcsp1, src_data_width,
444 dst_data_width,
445 false);
446 }
447 }
448 return total_size;
449}
450
451struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
452 dma_addr_t addr, 380 dma_addr_t addr,
453 int size, 381 int size,
454 u32 lcsp13, /* src or dst*/ 382 u32 lcsp13, /* src or dst*/
455 u32 data_width1, 383 u32 data_width1,
456 u32 data_width2, 384 u32 data_width2,
457 bool addr_inc) 385 unsigned int flags)
458{ 386{
387 bool addr_inc = flags & LLI_ADDR_INC;
459 struct d40_log_lli *lli = lli_sg; 388 struct d40_log_lli *lli = lli_sg;
460 int size_rest = size; 389 int size_rest = size;
461 int size_seg = 0; 390 int size_seg = 0;
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
468 addr, 397 addr,
469 size_seg, 398 size_seg,
470 lcsp13, data_width1, 399 lcsp13, data_width1,
471 addr_inc); 400 flags);
472 if (addr_inc) 401 if (addr_inc)
473 addr += size_seg; 402 addr += size_seg;
474 lli++; 403 lli++;
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
479 408
480int d40_log_sg_to_lli(struct scatterlist *sg, 409int d40_log_sg_to_lli(struct scatterlist *sg,
481 int sg_len, 410 int sg_len,
411 dma_addr_t dev_addr,
482 struct d40_log_lli *lli_sg, 412 struct d40_log_lli *lli_sg,
483 u32 lcsp13, /* src or dst*/ 413 u32 lcsp13, /* src or dst*/
484 u32 data_width1, u32 data_width2) 414 u32 data_width1, u32 data_width2)
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
487 struct scatterlist *current_sg = sg; 417 struct scatterlist *current_sg = sg;
488 int i; 418 int i;
489 struct d40_log_lli *lli = lli_sg; 419 struct d40_log_lli *lli = lli_sg;
420 unsigned long flags = 0;
421
422 if (!dev_addr)
423 flags |= LLI_ADDR_INC;
490 424
491 for_each_sg(sg, current_sg, sg_len, i) { 425 for_each_sg(sg, current_sg, sg_len, i) {
426 dma_addr_t sg_addr = sg_dma_address(current_sg);
427 unsigned int len = sg_dma_len(current_sg);
428 dma_addr_t addr = dev_addr ?: sg_addr;
429
492 total_size += sg_dma_len(current_sg); 430 total_size += sg_dma_len(current_sg);
493 lli = d40_log_buf_to_lli(lli, 431
494 sg_phys(current_sg), 432 lli = d40_log_buf_to_lli(lli, addr, len,
495 sg_dma_len(current_sg),
496 lcsp13, 433 lcsp13,
497 data_width1, data_width2, true); 434 data_width1,
435 data_width2,
436 flags);
498 } 437 }
438
499 return total_size; 439 return total_size;
500} 440}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9cc43495bea2..195ee65ee7f3 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -163,6 +163,22 @@
163#define D40_DREG_LCEIS1 0x0B4 163#define D40_DREG_LCEIS1 0x0B4
164#define D40_DREG_LCEIS2 0x0B8 164#define D40_DREG_LCEIS2 0x0B8
165#define D40_DREG_LCEIS3 0x0BC 165#define D40_DREG_LCEIS3 0x0BC
166#define D40_DREG_PSEG1 0x110
167#define D40_DREG_PSEG2 0x114
168#define D40_DREG_PSEG3 0x118
169#define D40_DREG_PSEG4 0x11C
170#define D40_DREG_PCEG1 0x120
171#define D40_DREG_PCEG2 0x124
172#define D40_DREG_PCEG3 0x128
173#define D40_DREG_PCEG4 0x12C
174#define D40_DREG_RSEG1 0x130
175#define D40_DREG_RSEG2 0x134
176#define D40_DREG_RSEG3 0x138
177#define D40_DREG_RSEG4 0x13C
178#define D40_DREG_RCEG1 0x140
179#define D40_DREG_RCEG2 0x144
180#define D40_DREG_RCEG3 0x148
181#define D40_DREG_RCEG4 0x14C
166#define D40_DREG_STFU 0xFC8 182#define D40_DREG_STFU 0xFC8
167#define D40_DREG_ICFG 0xFCC 183#define D40_DREG_ICFG 0xFCC
168#define D40_DREG_PERIPHID0 0xFE0 184#define D40_DREG_PERIPHID0 0xFE0
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
277 293
278/* Physical channels */ 294/* Physical channels */
279 295
296enum d40_lli_flags {
297 LLI_ADDR_INC = 1 << 0,
298 LLI_TERM_INT = 1 << 1,
299 LLI_CYCLIC = 1 << 2,
300 LLI_LAST_LINK = 1 << 3,
301};
302
280void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 303void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
281 u32 *src_cfg, 304 u32 *src_cfg,
282 u32 *dst_cfg, 305 u32 *dst_cfg,
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
292 struct d40_phy_lli *lli, 315 struct d40_phy_lli *lli,
293 dma_addr_t lli_phys, 316 dma_addr_t lli_phys,
294 u32 reg_cfg, 317 u32 reg_cfg,
295 u32 data_width1, 318 struct stedma40_half_channel_info *info,
296 u32 data_width2, 319 struct stedma40_half_channel_info *otherinfo,
297 int psize); 320 unsigned long flags);
298
299struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
300 dma_addr_t data,
301 u32 data_size,
302 int psize,
303 dma_addr_t next_lli,
304 u32 reg_cfg,
305 bool term_int,
306 u32 data_width1,
307 u32 data_width2,
308 bool is_device);
309
310void d40_phy_lli_write(void __iomem *virtbase,
311 u32 phy_chan_num,
312 struct d40_phy_lli *lli_dst,
313 struct d40_phy_lli *lli_src);
314 321
315/* Logical channels */ 322/* Logical channels */
316 323
317struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
318 dma_addr_t addr,
319 int size,
320 u32 lcsp13, /* src or dst*/
321 u32 data_width1, u32 data_width2,
322 bool addr_inc);
323
324int d40_log_sg_to_dev(struct scatterlist *sg,
325 int sg_len,
326 struct d40_log_lli_bidir *lli,
327 struct d40_def_lcsp *lcsp,
328 u32 src_data_width,
329 u32 dst_data_width,
330 enum dma_data_direction direction,
331 dma_addr_t dev_addr);
332
333int d40_log_sg_to_lli(struct scatterlist *sg, 324int d40_log_sg_to_lli(struct scatterlist *sg,
334 int sg_len, 325 int sg_len,
326 dma_addr_t dev_addr,
335 struct d40_log_lli *lli_sg, 327 struct d40_log_lli *lli_sg,
336 u32 lcsp13, /* src or dst*/ 328 u32 lcsp13, /* src or dst*/
337 u32 data_width1, u32 data_width2); 329 u32 data_width1, u32 data_width2);
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
339void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 331void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
340 struct d40_log_lli *lli_dst, 332 struct d40_log_lli *lli_dst,
341 struct d40_log_lli *lli_src, 333 struct d40_log_lli *lli_src,
342 int next); 334 int next, unsigned int flags);
343 335
344void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 336void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
345 struct d40_log_lli *lli_dst, 337 struct d40_log_lli *lli_dst,
346 struct d40_log_lli *lli_src, 338 struct d40_log_lli *lli_src,
347 int next); 339 int next, unsigned int flags);
348 340
349#endif /* STE_DMA40_LLI_H */ 341#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3c56afc5eb1b..b3a25a55ba23 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,4 +145,16 @@ config ISCSI_IBFT
145 detect iSCSI boot parameters dynamically during system boot, say Y. 145 detect iSCSI boot parameters dynamically during system boot, say Y.
146 Otherwise, say N. 146 Otherwise, say N.
147 147
148config SIGMA
149 tristate "SigmaStudio firmware loader"
150 depends on I2C
151 select CRC32
152 default n
153 help
154 Enable helper functions for working with Analog Devices SigmaDSP
155 parts and binary firmwares produced by Analog Devices SigmaStudio.
156
157 If unsure, say N here. Drivers that need these helpers will select
158 this option automatically.
159
148endmenu 160endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 20c17fca1232..00bb0b80a79f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_DMIID) += dmi-id.o
12obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 12obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
15obj-$(CONFIG_SIGMA) += sigma.o
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
new file mode 100644
index 000000000000..c19cd2c39fa6
--- /dev/null
+++ b/drivers/firmware/sigma.c
@@ -0,0 +1,115 @@
1/*
2 * Load Analog Devices SigmaStudio firmware files
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/firmware.h>
12#include <linux/kernel.h>
13#include <linux/i2c.h>
14#include <linux/sigma.h>
15
16/* Return: 0==OK, <0==error, =1 ==no more actions */
17static int
18process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
19{
20 struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
21 size_t len = sigma_action_len(sa);
22 int ret = 0;
23
24 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
25 sa->instr, sa->addr, len);
26
27 switch (sa->instr) {
28 case SIGMA_ACTION_WRITEXBYTES:
29 case SIGMA_ACTION_WRITESINGLE:
30 case SIGMA_ACTION_WRITESAFELOAD:
31 if (ssfw->fw->size < ssfw->pos + len)
32 return -EINVAL;
33 ret = i2c_master_send(client, (void *)&sa->addr, len);
34 if (ret < 0)
35 return -EINVAL;
36 break;
37
38 case SIGMA_ACTION_DELAY:
39 ret = 0;
40 udelay(len);
41 len = 0;
42 break;
43
44 case SIGMA_ACTION_END:
45 return 1;
46
47 default:
48 return -EINVAL;
49 }
50
51 /* when arrive here ret=0 or sent data */
52 ssfw->pos += sigma_action_size(sa, len);
53 return ssfw->pos == ssfw->fw->size;
54}
55
56static int
57process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
58{
59 pr_debug("%s: processing %p\n", __func__, ssfw);
60
61 while (1) {
62 int ret = process_sigma_action(client, ssfw);
63 pr_debug("%s: action returned %i\n", __func__, ret);
64 if (ret == 1)
65 return 0;
66 else if (ret)
67 return ret;
68 }
69}
70
71int process_sigma_firmware(struct i2c_client *client, const char *name)
72{
73 int ret;
74 struct sigma_firmware_header *ssfw_head;
75 struct sigma_firmware ssfw;
76 const struct firmware *fw;
77 u32 crc;
78
79 pr_debug("%s: loading firmware %s\n", __func__, name);
80
81 /* first load the blob */
82 ret = request_firmware(&fw, name, &client->dev);
83 if (ret) {
84 pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
85 return ret;
86 }
87 ssfw.fw = fw;
88
89 /* then verify the header */
90 ret = -EINVAL;
91 if (fw->size < sizeof(*ssfw_head))
92 goto done;
93
94 ssfw_head = (void *)fw->data;
95 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
96 goto done;
97
98 crc = crc32(0, fw->data, fw->size);
99 pr_debug("%s: crc=%x\n", __func__, crc);
100 if (crc != ssfw_head->crc)
101 goto done;
102
103 ssfw.pos = sizeof(*ssfw_head);
104
105 /* finally process all of the actions */
106 ret = process_sigma_actions(client, &ssfw);
107
108 done:
109 release_firmware(fw);
110
111 pr_debug("%s: loaded %s\n", __func__, name);
112
113 return ret;
114}
115EXPORT_SYMBOL(process_sigma_firmware);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d3a9c6e02477..00a55dfdba82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -88,18 +88,20 @@ static const struct backlight_ops nv50_bl_ops = {
88 .update_status = nv50_set_intensity, 88 .update_status = nv50_set_intensity,
89}; 89};
90 90
91static int nouveau_nv40_backlight_init(struct drm_device *dev) 91static int nouveau_nv40_backlight_init(struct drm_connector *connector)
92{ 92{
93 struct backlight_properties props; 93 struct drm_device *dev = connector->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct backlight_properties props;
95 struct backlight_device *bd; 96 struct backlight_device *bd;
96 97
97 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 98 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
98 return 0; 99 return 0;
99 100
100 memset(&props, 0, sizeof(struct backlight_properties)); 101 memset(&props, 0, sizeof(struct backlight_properties));
102 props.type = BACKLIGHT_RAW;
101 props.max_brightness = 31; 103 props.max_brightness = 31;
102 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 104 bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
103 &nv40_bl_ops, &props); 105 &nv40_bl_ops, &props);
104 if (IS_ERR(bd)) 106 if (IS_ERR(bd))
105 return PTR_ERR(bd); 107 return PTR_ERR(bd);
@@ -111,18 +113,20 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
111 return 0; 113 return 0;
112} 114}
113 115
114static int nouveau_nv50_backlight_init(struct drm_device *dev) 116static int nouveau_nv50_backlight_init(struct drm_connector *connector)
115{ 117{
116 struct backlight_properties props; 118 struct drm_device *dev = connector->dev;
117 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 struct backlight_properties props;
118 struct backlight_device *bd; 121 struct backlight_device *bd;
119 122
120 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) 123 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
121 return 0; 124 return 0;
122 125
123 memset(&props, 0, sizeof(struct backlight_properties)); 126 memset(&props, 0, sizeof(struct backlight_properties));
127 props.type = BACKLIGHT_RAW;
124 props.max_brightness = 1025; 128 props.max_brightness = 1025;
125 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 129 bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
126 &nv50_bl_ops, &props); 130 &nv50_bl_ops, &props);
127 if (IS_ERR(bd)) 131 if (IS_ERR(bd))
128 return PTR_ERR(bd); 132 return PTR_ERR(bd);
@@ -133,8 +137,9 @@ static int nouveau_nv50_backlight_init(struct drm_device *dev)
133 return 0; 137 return 0;
134} 138}
135 139
136int nouveau_backlight_init(struct drm_device *dev) 140int nouveau_backlight_init(struct drm_connector *connector)
137{ 141{
142 struct drm_device *dev = connector->dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 143 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 144
140#ifdef CONFIG_ACPI 145#ifdef CONFIG_ACPI
@@ -147,9 +152,9 @@ int nouveau_backlight_init(struct drm_device *dev)
147 152
148 switch (dev_priv->card_type) { 153 switch (dev_priv->card_type) {
149 case NV_40: 154 case NV_40:
150 return nouveau_nv40_backlight_init(dev); 155 return nouveau_nv40_backlight_init(connector);
151 case NV_50: 156 case NV_50:
152 return nouveau_nv50_backlight_init(dev); 157 return nouveau_nv50_backlight_init(connector);
153 default: 158 default:
154 break; 159 break;
155 } 160 }
@@ -157,8 +162,9 @@ int nouveau_backlight_init(struct drm_device *dev)
157 return 0; 162 return 0;
158} 163}
159 164
160void nouveau_backlight_exit(struct drm_device *dev) 165void nouveau_backlight_exit(struct drm_connector *connector)
161{ 166{
167 struct drm_device *dev = connector->dev;
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 168 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 169
164 if (dev_priv->backlight) { 170 if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 390d82c3c4b0..7ae151109a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -116,6 +116,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
116 nouveau_connector_hotplug, connector); 116 nouveau_connector_hotplug, connector);
117 } 117 }
118 118
119 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
120 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
121 nouveau_backlight_exit(connector);
122
119 kfree(nv_connector->edid); 123 kfree(nv_connector->edid);
120 drm_sysfs_connector_remove(connector); 124 drm_sysfs_connector_remove(connector);
121 drm_connector_cleanup(connector); 125 drm_connector_cleanup(connector);
@@ -894,6 +898,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
894 } 898 }
895 899
896 drm_sysfs_connector_add(connector); 900 drm_sysfs_connector_add(connector);
901
902 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
903 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
904 nouveau_backlight_init(connector);
905
897 dcb->drm = connector; 906 dcb->drm = connector;
898 return dcb->drm; 907 return dcb->drm;
899 908
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 06111887b789..fff180a99867 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -999,15 +999,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
999 999
1000/* nouveau_backlight.c */ 1000/* nouveau_backlight.c */
1001#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1001#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1002extern int nouveau_backlight_init(struct drm_device *); 1002extern int nouveau_backlight_init(struct drm_connector *);
1003extern void nouveau_backlight_exit(struct drm_device *); 1003extern void nouveau_backlight_exit(struct drm_connector *);
1004#else 1004#else
1005static inline int nouveau_backlight_init(struct drm_device *dev) 1005static inline int nouveau_backlight_init(struct drm_connector *dev)
1006{ 1006{
1007 return 0; 1007 return 0;
1008} 1008}
1009 1009
1010static inline void nouveau_backlight_exit(struct drm_device *dev) { } 1010static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
1011#endif 1011#endif
1012 1012
1013/* nouveau_bios.c */ 1013/* nouveau_bios.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 05294910e135..4fcbd091a117 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -704,10 +704,6 @@ nouveau_card_init(struct drm_device *dev)
704 goto out_fence; 704 goto out_fence;
705 } 705 }
706 706
707 ret = nouveau_backlight_init(dev);
708 if (ret)
709 NV_ERROR(dev, "Error %d registering backlight\n", ret);
710
711 nouveau_fbcon_init(dev); 707 nouveau_fbcon_init(dev);
712 drm_kms_helper_poll_init(dev); 708 drm_kms_helper_poll_init(dev);
713 return 0; 709 return 0;
@@ -759,8 +755,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
759 struct drm_nouveau_private *dev_priv = dev->dev_private; 755 struct drm_nouveau_private *dev_priv = dev->dev_private;
760 struct nouveau_engine *engine = &dev_priv->engine; 756 struct nouveau_engine *engine = &dev_priv->engine;
761 757
762 nouveau_backlight_exit(dev);
763
764 if (!engine->graph.accel_blocked) { 758 if (!engine->graph.accel_blocked) {
765 nouveau_fence_fini(dev); 759 nouveau_fence_fini(dev);
766 nouveau_channel_put_unlocked(&dev_priv->channel); 760 nouveau_channel_put_unlocked(&dev_priv->channel);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 1c02d23f6fcc..9746fee59f56 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,6 +1,7 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default - NEW DRIVER" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 select BACKLIGHT_CLASS_DEVICE
4 help 5 help
5 Choose this option if you want kernel modesetting enabled by default. 6 Choose this option if you want kernel modesetting enabled by default.
6 7
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3f3c9aac46cc..28c7961cd19b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,10 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector);
46
43void radeon_connector_hotplug(struct drm_connector *connector) 47void radeon_connector_hotplug(struct drm_connector *connector)
44{ 48{
45 struct drm_device *dev = connector->dev; 49 struct drm_device *dev = connector->dev;
@@ -1526,6 +1530,17 @@ radeon_add_legacy_connector(struct drm_device *dev,
1526 connector->polled = DRM_CONNECTOR_POLL_HPD; 1530 connector->polled = DRM_CONNECTOR_POLL_HPD;
1527 connector->display_info.subpixel_order = subpixel_order; 1531 connector->display_info.subpixel_order = subpixel_order;
1528 drm_sysfs_connector_add(connector); 1532 drm_sysfs_connector_add(connector);
1533 if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
1534 struct drm_encoder *drm_encoder;
1535
1536 list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
1537 struct radeon_encoder *radeon_encoder;
1538
1539 radeon_encoder = to_radeon_encoder(drm_encoder);
1540 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
1541 radeon_legacy_backlight_init(radeon_encoder, connector);
1542 }
1543 }
1529 return; 1544 return;
1530 1545
1531failed: 1546failed:
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 59f834ba283d..5b54268ed6b2 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -28,6 +28,10 @@
28#include "radeon_drm.h" 28#include "radeon_drm.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/backlight.h>
32#ifdef CONFIG_PMAC_BACKLIGHT
33#include <asm/backlight.h>
34#endif
31 35
32static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) 36static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
33{ 37{
@@ -39,7 +43,7 @@ static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
39 radeon_encoder->active_device = 0; 43 radeon_encoder->active_device = 0;
40} 44}
41 45
42static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) 46static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
43{ 47{
44 struct drm_device *dev = encoder->dev; 48 struct drm_device *dev = encoder->dev;
45 struct radeon_device *rdev = dev->dev_private; 49 struct radeon_device *rdev = dev->dev_private;
@@ -47,15 +51,23 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; 51 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
48 int panel_pwr_delay = 2000; 52 int panel_pwr_delay = 2000;
49 bool is_mac = false; 53 bool is_mac = false;
54 uint8_t backlight_level;
50 DRM_DEBUG_KMS("\n"); 55 DRM_DEBUG_KMS("\n");
51 56
57 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
58 backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
59
52 if (radeon_encoder->enc_priv) { 60 if (radeon_encoder->enc_priv) {
53 if (rdev->is_atom_bios) { 61 if (rdev->is_atom_bios) {
54 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; 62 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
55 panel_pwr_delay = lvds->panel_pwr_delay; 63 panel_pwr_delay = lvds->panel_pwr_delay;
64 if (lvds->bl_dev)
65 backlight_level = lvds->backlight_level;
56 } else { 66 } else {
57 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; 67 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
58 panel_pwr_delay = lvds->panel_pwr_delay; 68 panel_pwr_delay = lvds->panel_pwr_delay;
69 if (lvds->bl_dev)
70 backlight_level = lvds->backlight_level;
59 } 71 }
60 } 72 }
61 73
@@ -82,11 +94,13 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
82 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; 94 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
83 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); 95 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
84 96
85 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 97 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
86 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); 98 RADEON_LVDS_BL_MOD_LEVEL_MASK);
99 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
100 RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
101 (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
87 if (is_mac) 102 if (is_mac)
88 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; 103 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
89 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
90 udelay(panel_pwr_delay * 1000); 104 udelay(panel_pwr_delay * 1000);
91 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
92 break; 106 break;
@@ -95,7 +109,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
95 case DRM_MODE_DPMS_OFF: 109 case DRM_MODE_DPMS_OFF:
96 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); 110 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
97 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); 111 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
98 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
99 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 112 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
100 if (is_mac) { 113 if (is_mac) {
101 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; 114 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
@@ -119,6 +132,25 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
119 132
120} 133}
121 134
135static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
136{
137 struct radeon_device *rdev = encoder->dev->dev_private;
138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
139 DRM_DEBUG("\n");
140
141 if (radeon_encoder->enc_priv) {
142 if (rdev->is_atom_bios) {
143 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
144 lvds->dpms_mode = mode;
145 } else {
146 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
147 lvds->dpms_mode = mode;
148 }
149 }
150
151 radeon_legacy_lvds_update(encoder, mode);
152}
153
122static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 154static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
123{ 155{
124 struct radeon_device *rdev = encoder->dev->dev_private; 156 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -237,9 +269,222 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
237 .disable = radeon_legacy_encoder_disable, 269 .disable = radeon_legacy_encoder_disable,
238}; 270};
239 271
272#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
273
274#define MAX_RADEON_LEVEL 0xFF
275
276struct radeon_backlight_privdata {
277 struct radeon_encoder *encoder;
278 uint8_t negative;
279};
280
281static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
282{
283 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
284 uint8_t level;
285
286 /* Convert brightness to hardware level */
287 if (bd->props.brightness < 0)
288 level = 0;
289 else if (bd->props.brightness > MAX_RADEON_LEVEL)
290 level = MAX_RADEON_LEVEL;
291 else
292 level = bd->props.brightness;
293
294 if (pdata->negative)
295 level = MAX_RADEON_LEVEL - level;
296
297 return level;
298}
299
300static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
301{
302 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
303 struct radeon_encoder *radeon_encoder = pdata->encoder;
304 struct drm_device *dev = radeon_encoder->base.dev;
305 struct radeon_device *rdev = dev->dev_private;
306 int dpms_mode = DRM_MODE_DPMS_ON;
307
308 if (radeon_encoder->enc_priv) {
309 if (rdev->is_atom_bios) {
310 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
311 dpms_mode = lvds->dpms_mode;
312 lvds->backlight_level = radeon_legacy_lvds_level(bd);
313 } else {
314 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
315 dpms_mode = lvds->dpms_mode;
316 lvds->backlight_level = radeon_legacy_lvds_level(bd);
317 }
318 }
319
320 if (bd->props.brightness > 0)
321 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
322 else
323 radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
324
325 return 0;
326}
327
328static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
329{
330 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
331 struct radeon_encoder *radeon_encoder = pdata->encoder;
332 struct drm_device *dev = radeon_encoder->base.dev;
333 struct radeon_device *rdev = dev->dev_private;
334 uint8_t backlight_level;
335
336 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
337 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
338
339 return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
340}
341
342static const struct backlight_ops radeon_backlight_ops = {
343 .get_brightness = radeon_legacy_backlight_get_brightness,
344 .update_status = radeon_legacy_backlight_update_status,
345};
346
347void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
348 struct drm_connector *drm_connector)
349{
350 struct drm_device *dev = radeon_encoder->base.dev;
351 struct radeon_device *rdev = dev->dev_private;
352 struct backlight_device *bd;
353 struct backlight_properties props;
354 struct radeon_backlight_privdata *pdata;
355 uint8_t backlight_level;
356
357 if (!radeon_encoder->enc_priv)
358 return;
359
360#ifdef CONFIG_PMAC_BACKLIGHT
361 if (!pmac_has_backlight_type("ati") &&
362 !pmac_has_backlight_type("mnca"))
363 return;
364#endif
365
366 pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
367 if (!pdata) {
368 DRM_ERROR("Memory allocation failed\n");
369 goto error;
370 }
371
372 props.max_brightness = MAX_RADEON_LEVEL;
373 props.type = BACKLIGHT_RAW;
374 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
375 pdata, &radeon_backlight_ops, &props);
376 if (IS_ERR(bd)) {
377 DRM_ERROR("Backlight registration failed\n");
378 goto error;
379 }
380
381 pdata->encoder = radeon_encoder;
382
383 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
384 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
385
386 /* First, try to detect backlight level sense based on the assumption
387 * that firmware set it up at full brightness
388 */
389 if (backlight_level == 0)
390 pdata->negative = true;
391 else if (backlight_level == 0xff)
392 pdata->negative = false;
393 else {
394 /* XXX hack... maybe some day we can figure out in what direction
395 * backlight should work on a given panel?
396 */
397 pdata->negative = (rdev->family != CHIP_RV200 &&
398 rdev->family != CHIP_RV250 &&
399 rdev->family != CHIP_RV280 &&
400 rdev->family != CHIP_RV350);
401
402#ifdef CONFIG_PMAC_BACKLIGHT
403 pdata->negative = (pdata->negative ||
404 of_machine_is_compatible("PowerBook4,3") ||
405 of_machine_is_compatible("PowerBook6,3") ||
406 of_machine_is_compatible("PowerBook6,5"));
407#endif
408 }
409
410 if (rdev->is_atom_bios) {
411 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
412 lvds->bl_dev = bd;
413 } else {
414 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
415 lvds->bl_dev = bd;
416 }
417
418 bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
419 bd->props.power = FB_BLANK_UNBLANK;
420 backlight_update_status(bd);
421
422 DRM_INFO("radeon legacy LVDS backlight initialized\n");
423
424 return;
425
426error:
427 kfree(pdata);
428 return;
429}
430
431static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
432{
433 struct drm_device *dev = radeon_encoder->base.dev;
434 struct radeon_device *rdev = dev->dev_private;
435 struct backlight_device *bd = NULL;
436
437 if (!radeon_encoder->enc_priv)
438 return;
439
440 if (rdev->is_atom_bios) {
441 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
442 bd = lvds->bl_dev;
443 lvds->bl_dev = NULL;
444 } else {
445 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
446 bd = lvds->bl_dev;
447 lvds->bl_dev = NULL;
448 }
449
450 if (bd) {
451 struct radeon_legacy_backlight_privdata *pdata;
452
453 pdata = bl_get_data(bd);
454 backlight_device_unregister(bd);
455 kfree(pdata);
456
457 DRM_INFO("radeon legacy LVDS backlight unloaded\n");
458 }
459}
460
461#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
462
463void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
464{
465}
466
467static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
468{
469}
470
471#endif
472
473
474static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
475{
476 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
477
478 if (radeon_encoder->enc_priv) {
479 radeon_legacy_backlight_exit(radeon_encoder);
480 kfree(radeon_encoder->enc_priv);
481 }
482 drm_encoder_cleanup(encoder);
483 kfree(radeon_encoder);
484}
240 485
241static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { 486static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
242 .destroy = radeon_enc_destroy, 487 .destroy = radeon_lvds_enc_destroy,
243}; 488};
244 489
245static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) 490static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5067d18d0009..e4582814bb78 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -302,6 +302,9 @@ struct radeon_encoder_lvds {
302 uint32_t lvds_gen_cntl; 302 uint32_t lvds_gen_cntl;
303 /* panel mode */ 303 /* panel mode */
304 struct drm_display_mode native_mode; 304 struct drm_display_mode native_mode;
305 struct backlight_device *bl_dev;
306 int dpms_mode;
307 uint8_t backlight_level;
305}; 308};
306 309
307struct radeon_encoder_tv_dac { 310struct radeon_encoder_tv_dac {
@@ -355,6 +358,9 @@ struct radeon_encoder_atom_dig {
355 uint32_t lcd_ss_id; 358 uint32_t lcd_ss_id;
356 /* panel mode */ 359 /* panel mode */
357 struct drm_display_mode native_mode; 360 struct drm_display_mode native_mode;
361 struct backlight_device *bl_dev;
362 int dpms_mode;
363 uint8_t backlight_level;
358}; 364};
359 365
360struct radeon_encoder_atom_dac { 366struct radeon_encoder_atom_dac {
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index de9cf21b3494..657da5a3d5c6 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -944,6 +944,7 @@ static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *
944 } 944 }
945 945
946 memset(&props, 0, sizeof(props)); 946 memset(&props, 0, sizeof(props));
947 props.type = BACKLIGHT_RAW;
947 props.max_brightness = 0xff; 948 props.max_brightness = 0xff;
948 bdev = backlight_device_register(dev_name(dev), dev, data, 949 bdev = backlight_device_register(dev_name(dev), dev, data,
949 &picolcd_blops, &props); 950 &picolcd_blops, &props);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ad415e6ec5a1..326652f673f7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -547,15 +547,18 @@ config I2C_PUV3
547 547
548config I2C_PXA 548config I2C_PXA
549 tristate "Intel PXA2XX I2C adapter" 549 tristate "Intel PXA2XX I2C adapter"
550 depends on ARCH_PXA || ARCH_MMP 550 depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
551 help 551 help
552 If you have devices in the PXA I2C bus, say yes to this option. 552 If you have devices in the PXA I2C bus, say yes to this option.
553 This driver can also be built as a module. If so, the module 553 This driver can also be built as a module. If so, the module
554 will be called i2c-pxa. 554 will be called i2c-pxa.
555 555
556config I2C_PXA_PCI
557 def_bool I2C_PXA && X86_32 && PCI && OF
558
556config I2C_PXA_SLAVE 559config I2C_PXA_SLAVE
557 bool "Intel PXA2XX I2C Slave comms support" 560 bool "Intel PXA2XX I2C Slave comms support"
558 depends on I2C_PXA 561 depends on I2C_PXA && !X86_32
559 help 562 help
560 Support I2C slave mode communications on the PXA I2C bus. This 563 Support I2C slave mode communications on the PXA I2C bus. This
561 is necessary for systems where the PXA may be a target on the 564 is necessary for systems where the PXA may be a target on the
@@ -668,15 +671,28 @@ config I2C_XILINX
668 will be called xilinx_i2c. 671 will be called xilinx_i2c.
669 672
670config I2C_EG20T 673config I2C_EG20T
671 tristate "PCH I2C of Intel EG20T" 674 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH"
672 depends on PCI 675 depends on PCI
673 help 676 help
674 This driver is for PCH(Platform controller Hub) I2C of EG20T which 677 This driver is for PCH(Platform controller Hub) I2C of EG20T which
675 is an IOH(Input/Output Hub) for x86 embedded processor. 678 is an IOH(Input/Output Hub) for x86 embedded processor.
676 This driver can access PCH I2C bus device. 679 This driver can access PCH I2C bus device.
680
681 This driver also supports the ML7213, a companion chip for the
682 Atom E6xx series and compatible with the Intel EG20T PCH.
677 683
678comment "External I2C/SMBus adapter drivers" 684comment "External I2C/SMBus adapter drivers"
679 685
686config I2C_DIOLAN_U2C
687 tristate "Diolan U2C-12 USB adapter"
688 depends on USB
689 help
690 If you say yes to this option, support will be included for Diolan
691 U2C-12, a USB to I2C interface.
692
693 This driver can also be built as a module. If so, the module
694 will be called i2c-diolan-u2c.
695
680config I2C_PARPORT 696config I2C_PARPORT
681 tristate "Parallel port adapter" 697 tristate "Parallel port adapter"
682 depends on PARPORT 698 depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3878c959d4fa..e6cf294d3729 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
54obj-$(CONFIG_I2C_PNX) += i2c-pnx.o 54obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
55obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o 55obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
56obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 56obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
57obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
57obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 58obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
58obj-$(CONFIG_I2C_S6000) += i2c-s6000.o 59obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
59obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o 60obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -67,6 +68,7 @@ obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
67obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o 68obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
68 69
69# External I2C/SMBus adapter drivers 70# External I2C/SMBus adapter drivers
71obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
70obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o 72obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
71obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o 73obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
72obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o 74obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
new file mode 100644
index 000000000000..76366716a854
--- /dev/null
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -0,0 +1,535 @@
1/*
2 * Driver for the Diolan u2c-12 USB-I2C adapter
3 *
4 * Copyright (c) 2010-2011 Ericsson AB
5 *
6 * Derived from:
7 * i2c-tiny-usb.c
8 * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/usb.h>
21#include <linux/i2c.h>
22
23#define DRIVER_NAME "i2c-diolan-u2c"
24
25#define USB_VENDOR_ID_DIOLAN 0x0abf
26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
27
28#define DIOLAN_OUT_EP 0x02
29#define DIOLAN_IN_EP 0x84
30
31/* commands via USB, must match command ids in the firmware */
32#define CMD_I2C_READ 0x01
33#define CMD_I2C_WRITE 0x02
34#define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */
35#define CMD_I2C_RELEASE_SDA 0x04
36#define CMD_I2C_RELEASE_SCL 0x05
37#define CMD_I2C_DROP_SDA 0x06
38#define CMD_I2C_DROP_SCL 0x07
39#define CMD_I2C_READ_SDA 0x08
40#define CMD_I2C_READ_SCL 0x09
41#define CMD_GET_FW_VERSION 0x0a
42#define CMD_GET_SERIAL 0x0b
43#define CMD_I2C_START 0x0c
44#define CMD_I2C_STOP 0x0d
45#define CMD_I2C_REPEATED_START 0x0e
46#define CMD_I2C_PUT_BYTE 0x0f
47#define CMD_I2C_GET_BYTE 0x10
48#define CMD_I2C_PUT_ACK 0x11
49#define CMD_I2C_GET_ACK 0x12
50#define CMD_I2C_PUT_BYTE_ACK 0x13
51#define CMD_I2C_GET_BYTE_ACK 0x14
52#define CMD_I2C_SET_SPEED 0x1b
53#define CMD_I2C_GET_SPEED 0x1c
54#define CMD_I2C_SET_CLK_SYNC 0x24
55#define CMD_I2C_GET_CLK_SYNC 0x25
56#define CMD_I2C_SET_CLK_SYNC_TO 0x26
57#define CMD_I2C_GET_CLK_SYNC_TO 0x27
58
59#define RESP_OK 0x00
60#define RESP_FAILED 0x01
61#define RESP_BAD_MEMADDR 0x04
62#define RESP_DATA_ERR 0x05
63#define RESP_NOT_IMPLEMENTED 0x06
64#define RESP_NACK 0x07
65#define RESP_TIMEOUT 0x09
66
67#define U2C_I2C_SPEED_FAST 0 /* 400 kHz */
68#define U2C_I2C_SPEED_STD 1 /* 100 kHz */
69#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
70#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
71
72#define U2C_I2C_FREQ_FAST 400000
73#define U2C_I2C_FREQ_STD 100000
74#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
75
76#define DIOLAN_USB_TIMEOUT 100 /* in ms */
77#define DIOLAN_SYNC_TIMEOUT 20 /* in ms */
78
79#define DIOLAN_OUTBUF_LEN 128
80#define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4)
81#define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */
82
83/* Structure to hold all of our device specific stuff */
84struct i2c_diolan_u2c {
85 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
86 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
87 struct usb_device *usb_dev; /* the usb device for this device */
88 struct usb_interface *interface;/* the interface for this device */
89 struct i2c_adapter adapter; /* i2c related things */
90 int olen; /* Output buffer length */
91 int ocount; /* Number of enqueued messages */
92};
93
94static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
95
96module_param(frequency, uint, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
98
99/* usb layer */
100
101/* Send command to device, and get response. */
102static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
103{
104 int ret = 0;
105 int actual;
106 int i;
107
108 if (!dev->olen || !dev->ocount)
109 return -EINVAL;
110
111 ret = usb_bulk_msg(dev->usb_dev,
112 usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
113 dev->obuffer, dev->olen, &actual,
114 DIOLAN_USB_TIMEOUT);
115 if (!ret) {
116 for (i = 0; i < dev->ocount; i++) {
117 int tmpret;
118
119 tmpret = usb_bulk_msg(dev->usb_dev,
120 usb_rcvbulkpipe(dev->usb_dev,
121 DIOLAN_IN_EP),
122 dev->ibuffer,
123 sizeof(dev->ibuffer), &actual,
124 DIOLAN_USB_TIMEOUT);
125 /*
126 * Stop command processing if a previous command
127 * returned an error.
128 * Note that we still need to retrieve all messages.
129 */
130 if (ret < 0)
131 continue;
132 ret = tmpret;
133 if (ret == 0 && actual > 0) {
134 switch (dev->ibuffer[actual - 1]) {
135 case RESP_NACK:
136 /*
137 * Return ENXIO if NACK was received as
138 * response to the address phase,
139 * EIO otherwise
140 */
141 ret = i == 1 ? -ENXIO : -EIO;
142 break;
143 case RESP_TIMEOUT:
144 ret = -ETIMEDOUT;
145 break;
146 case RESP_OK:
147 /* strip off return code */
148 ret = actual - 1;
149 break;
150 default:
151 ret = -EIO;
152 break;
153 }
154 }
155 }
156 }
157 dev->olen = 0;
158 dev->ocount = 0;
159 return ret;
160}
161
162static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush)
163{
164 if (flush || dev->olen >= DIOLAN_FLUSH_LEN)
165 return diolan_usb_transfer(dev);
166 return 0;
167}
168
169/* Send command (no data) */
170static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush)
171{
172 dev->obuffer[dev->olen++] = command;
173 dev->ocount++;
174 return diolan_write_cmd(dev, flush);
175}
176
177/* Send command with one byte of data */
178static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data,
179 bool flush)
180{
181 dev->obuffer[dev->olen++] = command;
182 dev->obuffer[dev->olen++] = data;
183 dev->ocount++;
184 return diolan_write_cmd(dev, flush);
185}
186
187/* Send command with two bytes of data */
188static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1,
189 u8 d2, bool flush)
190{
191 dev->obuffer[dev->olen++] = command;
192 dev->obuffer[dev->olen++] = d1;
193 dev->obuffer[dev->olen++] = d2;
194 dev->ocount++;
195 return diolan_write_cmd(dev, flush);
196}
197
198/*
199 * Flush input queue.
200 * If we don't do this at startup and the controller has queued up
201 * messages which were not retrieved, it will stop responding
202 * at some point.
203 */
204static void diolan_flush_input(struct i2c_diolan_u2c *dev)
205{
206 int i;
207
208 for (i = 0; i < 10; i++) {
209 int actual = 0;
210 int ret;
211
212 ret = usb_bulk_msg(dev->usb_dev,
213 usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
214 dev->ibuffer, sizeof(dev->ibuffer), &actual,
215 DIOLAN_USB_TIMEOUT);
216 if (ret < 0 || actual == 0)
217 break;
218 }
219 if (i == 10)
220 dev_err(&dev->interface->dev, "Failed to flush input buffer\n");
221}
222
223static int diolan_i2c_start(struct i2c_diolan_u2c *dev)
224{
225 return diolan_usb_cmd(dev, CMD_I2C_START, false);
226}
227
228static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev)
229{
230 return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false);
231}
232
233static int diolan_i2c_stop(struct i2c_diolan_u2c *dev)
234{
235 return diolan_usb_cmd(dev, CMD_I2C_STOP, true);
236}
237
238static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack,
239 u8 *byte)
240{
241 int ret;
242
243 ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true);
244 if (ret > 0)
245 *byte = dev->ibuffer[0];
246 else if (ret == 0)
247 ret = -EIO;
248
249 return ret;
250}
251
252static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte)
253{
254 return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false);
255}
256
257static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed)
258{
259 return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true);
260}
261
262/* Enable or disable clock synchronization (stretching) */
263static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable)
264{
265 return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true);
266}
267
268/* Set clock synchronization timeout in ms */
269static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms)
270{
271 int to_val = ms * 10;
272
273 return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO,
274 to_val & 0xff, (to_val >> 8) & 0xff, true);
275}
276
277static void diolan_fw_version(struct i2c_diolan_u2c *dev)
278{
279 int ret;
280
281 ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true);
282 if (ret >= 2)
283 dev_info(&dev->interface->dev,
284 "Diolan U2C firmware version %u.%u\n",
285 (unsigned int)dev->ibuffer[0],
286 (unsigned int)dev->ibuffer[1]);
287}
288
289static void diolan_get_serial(struct i2c_diolan_u2c *dev)
290{
291 int ret;
292 u32 serial;
293
294 ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true);
295 if (ret >= 4) {
296 serial = le32_to_cpu(*(u32 *)dev->ibuffer);
297 dev_info(&dev->interface->dev,
298 "Diolan U2C serial number %u\n", serial);
299 }
300}
301
302static int diolan_init(struct i2c_diolan_u2c *dev)
303{
304 int speed, ret;
305
306 if (frequency >= 200000) {
307 speed = U2C_I2C_SPEED_FAST;
308 frequency = U2C_I2C_FREQ_FAST;
309 } else if (frequency >= 100000 || frequency == 0) {
310 speed = U2C_I2C_SPEED_STD;
311 frequency = U2C_I2C_FREQ_STD;
312 } else {
313 speed = U2C_I2C_SPEED(frequency);
314 if (speed > U2C_I2C_SPEED_2KHZ)
315 speed = U2C_I2C_SPEED_2KHZ;
316 frequency = U2C_I2C_FREQ(speed);
317 }
318
319 dev_info(&dev->interface->dev,
320 "Diolan U2C at USB bus %03d address %03d speed %d Hz\n",
321 dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency);
322
323 diolan_flush_input(dev);
324 diolan_fw_version(dev);
325 diolan_get_serial(dev);
326
327 /* Set I2C speed */
328 ret = diolan_set_speed(dev, speed);
329 if (ret < 0)
330 return ret;
331
332 /* Configure I2C clock synchronization */
333 ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST);
334 if (ret < 0)
335 return ret;
336
337 if (speed != U2C_I2C_SPEED_FAST)
338 ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT);
339
340 return ret;
341}
342
343/* i2c layer */
344
345static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
346 int num)
347{
348 struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter);
349 struct i2c_msg *pmsg;
350 int i, j;
351 int ret, sret;
352
353 ret = diolan_i2c_start(dev);
354 if (ret < 0)
355 return ret;
356
357 for (i = 0; i < num; i++) {
358 pmsg = &msgs[i];
359 if (i) {
360 ret = diolan_i2c_repeated_start(dev);
361 if (ret < 0)
362 goto abort;
363 }
364 if (pmsg->flags & I2C_M_RD) {
365 ret =
366 diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1);
367 if (ret < 0)
368 goto abort;
369 for (j = 0; j < pmsg->len; j++) {
370 u8 byte;
371 bool ack = j < pmsg->len - 1;
372
373 /*
374 * Don't send NACK if this is the first byte
375 * of a SMBUS_BLOCK message.
376 */
377 if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN))
378 ack = true;
379
380 ret = diolan_i2c_get_byte_ack(dev, ack, &byte);
381 if (ret < 0)
382 goto abort;
383 /*
384 * Adjust count if first received byte is length
385 */
386 if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) {
387 if (byte == 0
388 || byte > I2C_SMBUS_BLOCK_MAX) {
389 ret = -EPROTO;
390 goto abort;
391 }
392 pmsg->len += byte;
393 }
394 pmsg->buf[j] = byte;
395 }
396 } else {
397 ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1);
398 if (ret < 0)
399 goto abort;
400 for (j = 0; j < pmsg->len; j++) {
401 ret = diolan_i2c_put_byte_ack(dev,
402 pmsg->buf[j]);
403 if (ret < 0)
404 goto abort;
405 }
406 }
407 }
408abort:
409 sret = diolan_i2c_stop(dev);
410 if (sret < 0 && ret >= 0)
411 ret = sret;
412 return ret;
413}
414
415/*
416 * Return list of supported functionality.
417 */
418static u32 diolan_usb_func(struct i2c_adapter *a)
419{
420 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
421 I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
422}
423
424static const struct i2c_algorithm diolan_usb_algorithm = {
425 .master_xfer = diolan_usb_xfer,
426 .functionality = diolan_usb_func,
427};
428
429/* device layer */
430
431static const struct usb_device_id diolan_u2c_table[] = {
432 { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) },
433 { }
434};
435
436MODULE_DEVICE_TABLE(usb, diolan_u2c_table);
437
438static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
439{
440 usb_put_dev(dev->usb_dev);
441 kfree(dev);
442}
443
444static int diolan_u2c_probe(struct usb_interface *interface,
445 const struct usb_device_id *id)
446{
447 struct i2c_diolan_u2c *dev;
448 int ret;
449
450 /* allocate memory for our device state and initialize it */
451 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
452 if (dev == NULL) {
453 dev_err(&interface->dev, "no memory for device state\n");
454 ret = -ENOMEM;
455 goto error;
456 }
457
458 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
459 dev->interface = interface;
460
461 /* save our data pointer in this interface device */
462 usb_set_intfdata(interface, dev);
463
464 /* setup i2c adapter description */
465 dev->adapter.owner = THIS_MODULE;
466 dev->adapter.class = I2C_CLASS_HWMON;
467 dev->adapter.algo = &diolan_usb_algorithm;
468 i2c_set_adapdata(&dev->adapter, dev);
469 snprintf(dev->adapter.name, sizeof(dev->adapter.name),
470 DRIVER_NAME " at bus %03d device %03d",
471 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
472
473 dev->adapter.dev.parent = &dev->interface->dev;
474
475 /* initialize diolan i2c interface */
476 ret = diolan_init(dev);
477 if (ret < 0) {
478 dev_err(&interface->dev, "failed to initialize adapter\n");
479 goto error_free;
480 }
481
482 /* and finally attach to i2c layer */
483 ret = i2c_add_adapter(&dev->adapter);
484 if (ret < 0) {
485 dev_err(&interface->dev, "failed to add I2C adapter\n");
486 goto error_free;
487 }
488
489 dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
490
491 return 0;
492
493error_free:
494 usb_set_intfdata(interface, NULL);
495 diolan_u2c_free(dev);
496error:
497 return ret;
498}
499
500static void diolan_u2c_disconnect(struct usb_interface *interface)
501{
502 struct i2c_diolan_u2c *dev = usb_get_intfdata(interface);
503
504 i2c_del_adapter(&dev->adapter);
505 usb_set_intfdata(interface, NULL);
506 diolan_u2c_free(dev);
507
508 dev_dbg(&interface->dev, "disconnected\n");
509}
510
511static struct usb_driver diolan_u2c_driver = {
512 .name = DRIVER_NAME,
513 .probe = diolan_u2c_probe,
514 .disconnect = diolan_u2c_disconnect,
515 .id_table = diolan_u2c_table,
516};
517
518static int __init diolan_u2c_init(void)
519{
520 /* register this driver with the USB subsystem */
521 return usb_register(&diolan_u2c_driver);
522}
523
524static void __exit diolan_u2c_exit(void)
525{
526 /* deregister this driver with the USB subsystem */
527 usb_deregister(&diolan_u2c_driver);
528}
529
530module_init(diolan_u2c_init);
531module_exit(diolan_u2c_exit);
532
533MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
534MODULE_DESCRIPTION(DRIVER_NAME " driver");
535MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 50ea1f43bdc1..878a12026af2 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -132,6 +132,13 @@
132#define pch_pci_dbg(pdev, fmt, arg...) \ 132#define pch_pci_dbg(pdev, fmt, arg...) \
133 dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg) 133 dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
134 134
135/*
136Set the number of I2C instance max
137Intel EG20T PCH : 1ch
138OKI SEMICONDUCTOR ML7213 IOH : 2ch
139*/
140#define PCH_I2C_MAX_DEV 2
141
135/** 142/**
136 * struct i2c_algo_pch_data - for I2C driver functionalities 143 * struct i2c_algo_pch_data - for I2C driver functionalities
137 * @pch_adapter: stores the reference to i2c_adapter structure 144 * @pch_adapter: stores the reference to i2c_adapter structure
@@ -156,12 +163,14 @@ struct i2c_algo_pch_data {
156 * @pch_data: stores a list of i2c_algo_pch_data 163 * @pch_data: stores a list of i2c_algo_pch_data
157 * @pch_i2c_suspended: specifies whether the system is suspended or not 164 * @pch_i2c_suspended: specifies whether the system is suspended or not
158 * perhaps with more lines and words. 165 * perhaps with more lines and words.
166 * @ch_num: specifies the number of i2c instance
159 * 167 *
160 * pch_data has as many elements as maximum I2C channels 168 * pch_data has as many elements as maximum I2C channels
161 */ 169 */
162struct adapter_info { 170struct adapter_info {
163 struct i2c_algo_pch_data pch_data; 171 struct i2c_algo_pch_data pch_data[PCH_I2C_MAX_DEV];
164 bool pch_i2c_suspended; 172 bool pch_i2c_suspended;
173 int ch_num;
165}; 174};
166 175
167 176
@@ -170,8 +179,13 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
170static wait_queue_head_t pch_event; 179static wait_queue_head_t pch_event;
171static DEFINE_MUTEX(pch_mutex); 180static DEFINE_MUTEX(pch_mutex);
172 181
182/* Definition for ML7213 by OKI SEMICONDUCTOR */
183#define PCI_VENDOR_ID_ROHM 0x10DB
184#define PCI_DEVICE_ID_ML7213_I2C 0x802D
185
173static struct pci_device_id __devinitdata pch_pcidev_id[] = { 186static struct pci_device_id __devinitdata pch_pcidev_id[] = {
174 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)}, 187 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
188 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
175 {0,} 189 {0,}
176}; 190};
177 191
@@ -212,8 +226,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
212 /* Initialize I2C registers */ 226 /* Initialize I2C registers */
213 iowrite32(0x21, p + PCH_I2CNF); 227 iowrite32(0x21, p + PCH_I2CNF);
214 228
215 pch_setbit(adap->pch_base_address, PCH_I2CCTL, 229 pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_I2CCTL_I2CMEN);
216 PCH_I2CCTL_I2CMEN);
217 230
218 if (pch_i2c_speed != 400) 231 if (pch_i2c_speed != 400)
219 pch_i2c_speed = 100; 232 pch_i2c_speed = 100;
@@ -255,7 +268,7 @@ static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
255 * @timeout: waiting time counter (us). 268 * @timeout: waiting time counter (us).
256 */ 269 */
257static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap, 270static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
258 s32 timeout) 271 s32 timeout)
259{ 272{
260 void __iomem *p = adap->pch_base_address; 273 void __iomem *p = adap->pch_base_address;
261 274
@@ -475,8 +488,8 @@ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
475 * @last: specifies whether last message or not. 488 * @last: specifies whether last message or not.
476 * @first: specifies whether first message or not. 489 * @first: specifies whether first message or not.
477 */ 490 */
478s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, 491static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
479 u32 last, u32 first) 492 u32 last, u32 first)
480{ 493{
481 struct i2c_algo_pch_data *adap = i2c_adap->algo_data; 494 struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
482 495
@@ -569,10 +582,10 @@ s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
569} 582}
570 583
571/** 584/**
572 * pch_i2c_cb_ch0() - Interrupt handler Call back function 585 * pch_i2c_cb() - Interrupt handler Call back function
573 * @adap: Pointer to struct i2c_algo_pch_data. 586 * @adap: Pointer to struct i2c_algo_pch_data.
574 */ 587 */
575static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap) 588static void pch_i2c_cb(struct i2c_algo_pch_data *adap)
576{ 589{
577 u32 sts; 590 u32 sts;
578 void __iomem *p = adap->pch_base_address; 591 void __iomem *p = adap->pch_base_address;
@@ -600,24 +613,30 @@ static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
600 */ 613 */
601static irqreturn_t pch_i2c_handler(int irq, void *pData) 614static irqreturn_t pch_i2c_handler(int irq, void *pData)
602{ 615{
603 s32 reg_val; 616 u32 reg_val;
604 617 int flag;
605 struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData; 618 int i;
606 void __iomem *p = adap_data->pch_base_address; 619 struct adapter_info *adap_info = pData;
607 u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE); 620 void __iomem *p;
608 621 u32 mode;
609 if (mode != NORMAL_MODE) { 622
610 pch_err(adap_data, "I2C mode is not supported\n"); 623 for (i = 0, flag = 0; i < adap_info->ch_num; i++) {
611 return IRQ_NONE; 624 p = adap_info->pch_data[i].pch_base_address;
625 mode = ioread32(p + PCH_I2CMOD);
626 mode &= BUFFER_MODE | EEPROM_SR_MODE;
627 if (mode != NORMAL_MODE) {
628 pch_err(adap_info->pch_data,
629 "I2C-%d mode(%d) is not supported\n", mode, i);
630 continue;
631 }
632 reg_val = ioread32(p + PCH_I2CSR);
633 if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) {
634 pch_i2c_cb(&adap_info->pch_data[i]);
635 flag = 1;
636 }
612 } 637 }
613 638
614 reg_val = ioread32(p + PCH_I2CSR); 639 return flag ? IRQ_HANDLED : IRQ_NONE;
615 if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
616 pch_i2c_cb_ch0(adap_data);
617 else
618 return IRQ_NONE;
619
620 return IRQ_HANDLED;
621} 640}
622 641
623/** 642/**
@@ -627,7 +646,7 @@ static irqreturn_t pch_i2c_handler(int irq, void *pData)
627 * @num: number of messages. 646 * @num: number of messages.
628 */ 647 */
629static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap, 648static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
630 struct i2c_msg *msgs, s32 num) 649 struct i2c_msg *msgs, s32 num)
631{ 650{
632 struct i2c_msg *pmsg; 651 struct i2c_msg *pmsg;
633 u32 i = 0; 652 u32 i = 0;
@@ -710,11 +729,13 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
710} 729}
711 730
712static int __devinit pch_i2c_probe(struct pci_dev *pdev, 731static int __devinit pch_i2c_probe(struct pci_dev *pdev,
713 const struct pci_device_id *id) 732 const struct pci_device_id *id)
714{ 733{
715 void __iomem *base_addr; 734 void __iomem *base_addr;
716 s32 ret; 735 int ret;
736 int i, j;
717 struct adapter_info *adap_info; 737 struct adapter_info *adap_info;
738 struct i2c_adapter *pch_adap;
718 739
719 pch_pci_dbg(pdev, "Entered.\n"); 740 pch_pci_dbg(pdev, "Entered.\n");
720 741
@@ -744,44 +765,48 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
744 goto err_pci_iomap; 765 goto err_pci_iomap;
745 } 766 }
746 767
747 adap_info->pch_i2c_suspended = false; 768 /* Set the number of I2C channel instance */
769 adap_info->ch_num = id->driver_data;
748 770
749 adap_info->pch_data.p_adapter_info = adap_info; 771 for (i = 0; i < adap_info->ch_num; i++) {
772 pch_adap = &adap_info->pch_data[i].pch_adapter;
773 adap_info->pch_i2c_suspended = false;
750 774
751 adap_info->pch_data.pch_adapter.owner = THIS_MODULE; 775 adap_info->pch_data[i].p_adapter_info = adap_info;
752 adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
753 strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
754 adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
755 adap_info->pch_data.pch_adapter.algo_data =
756 &adap_info->pch_data;
757 776
758 /* (i * 0x80) + base_addr; */ 777 pch_adap->owner = THIS_MODULE;
759 adap_info->pch_data.pch_base_address = base_addr; 778 pch_adap->class = I2C_CLASS_HWMON;
779 strcpy(pch_adap->name, KBUILD_MODNAME);
780 pch_adap->algo = &pch_algorithm;
781 pch_adap->algo_data = &adap_info->pch_data[i];
760 782
761 adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev; 783 /* base_addr + offset; */
784 adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
762 785
763 ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter)); 786 pch_adap->dev.parent = &pdev->dev;
764 787
765 if (ret) { 788 ret = i2c_add_adapter(pch_adap);
766 pch_pci_err(pdev, "i2c_add_adapter FAILED\n"); 789 if (ret) {
767 goto err_i2c_add_adapter; 790 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
768 } 791 goto err_i2c_add_adapter;
792 }
769 793
770 pch_i2c_init(&adap_info->pch_data); 794 pch_i2c_init(&adap_info->pch_data[i]);
795 }
771 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, 796 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
772 KBUILD_MODNAME, &adap_info->pch_data); 797 KBUILD_MODNAME, adap_info);
773 if (ret) { 798 if (ret) {
774 pch_pci_err(pdev, "request_irq FAILED\n"); 799 pch_pci_err(pdev, "request_irq FAILED\n");
775 goto err_request_irq; 800 goto err_i2c_add_adapter;
776 } 801 }
777 802
778 pci_set_drvdata(pdev, adap_info); 803 pci_set_drvdata(pdev, adap_info);
779 pch_pci_dbg(pdev, "returns %d.\n", ret); 804 pch_pci_dbg(pdev, "returns %d.\n", ret);
780 return 0; 805 return 0;
781 806
782err_request_irq:
783 i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
784err_i2c_add_adapter: 807err_i2c_add_adapter:
808 for (j = 0; j < i; j++)
809 i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
785 pci_iounmap(pdev, base_addr); 810 pci_iounmap(pdev, base_addr);
786err_pci_iomap: 811err_pci_iomap:
787 pci_release_regions(pdev); 812 pci_release_regions(pdev);
@@ -794,17 +819,22 @@ err_pci_enable:
794 819
795static void __devexit pch_i2c_remove(struct pci_dev *pdev) 820static void __devexit pch_i2c_remove(struct pci_dev *pdev)
796{ 821{
822 int i;
797 struct adapter_info *adap_info = pci_get_drvdata(pdev); 823 struct adapter_info *adap_info = pci_get_drvdata(pdev);
798 824
799 pch_i2c_disbl_int(&adap_info->pch_data); 825 free_irq(pdev->irq, adap_info);
800 free_irq(pdev->irq, &adap_info->pch_data);
801 i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
802 826
803 if (adap_info->pch_data.pch_base_address) { 827 for (i = 0; i < adap_info->ch_num; i++) {
804 pci_iounmap(pdev, adap_info->pch_data.pch_base_address); 828 pch_i2c_disbl_int(&adap_info->pch_data[i]);
805 adap_info->pch_data.pch_base_address = 0; 829 i2c_del_adapter(&adap_info->pch_data[i].pch_adapter);
806 } 830 }
807 831
832 if (adap_info->pch_data[0].pch_base_address)
833 pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
834
835 for (i = 0; i < adap_info->ch_num; i++)
836 adap_info->pch_data[i].pch_base_address = 0;
837
808 pci_set_drvdata(pdev, NULL); 838 pci_set_drvdata(pdev, NULL);
809 839
810 pci_release_regions(pdev); 840 pci_release_regions(pdev);
@@ -817,17 +847,22 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
817static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state) 847static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
818{ 848{
819 int ret; 849 int ret;
850 int i;
820 struct adapter_info *adap_info = pci_get_drvdata(pdev); 851 struct adapter_info *adap_info = pci_get_drvdata(pdev);
821 void __iomem *p = adap_info->pch_data.pch_base_address; 852 void __iomem *p = adap_info->pch_data[0].pch_base_address;
822 853
823 adap_info->pch_i2c_suspended = true; 854 adap_info->pch_i2c_suspended = true;
824 855
825 while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) { 856 for (i = 0; i < adap_info->ch_num; i++) {
826 /* Wait until all channel transfers are completed */ 857 while ((adap_info->pch_data[i].pch_i2c_xfer_in_progress)) {
827 msleep(20); 858 /* Wait until all channel transfers are completed */
859 msleep(20);
860 }
828 } 861 }
862
829 /* Disable the i2c interrupts */ 863 /* Disable the i2c interrupts */
830 pch_i2c_disbl_int(&adap_info->pch_data); 864 for (i = 0; i < adap_info->ch_num; i++)
865 pch_i2c_disbl_int(&adap_info->pch_data[i]);
831 866
832 pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x " 867 pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
833 "invoked function pch_i2c_disbl_int successfully\n", 868 "invoked function pch_i2c_disbl_int successfully\n",
@@ -850,6 +885,7 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
850 885
851static int pch_i2c_resume(struct pci_dev *pdev) 886static int pch_i2c_resume(struct pci_dev *pdev)
852{ 887{
888 int i;
853 struct adapter_info *adap_info = pci_get_drvdata(pdev); 889 struct adapter_info *adap_info = pci_get_drvdata(pdev);
854 890
855 pci_set_power_state(pdev, PCI_D0); 891 pci_set_power_state(pdev, PCI_D0);
@@ -862,7 +898,8 @@ static int pch_i2c_resume(struct pci_dev *pdev)
862 898
863 pci_enable_wake(pdev, PCI_D3hot, 0); 899 pci_enable_wake(pdev, PCI_D3hot, 0);
864 900
865 pch_i2c_init(&adap_info->pch_data); 901 for (i = 0; i < adap_info->ch_num; i++)
902 pch_i2c_init(&adap_info->pch_data[i]);
866 903
867 adap_info->pch_i2c_suspended = false; 904 adap_info->pch_i2c_suspended = false;
868 905
@@ -894,7 +931,7 @@ static void __exit pch_pci_exit(void)
894} 931}
895module_exit(pch_pci_exit); 932module_exit(pch_pci_exit);
896 933
897MODULE_DESCRIPTION("PCH I2C PCI Driver"); 934MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
898MODULE_LICENSE("GPL"); 935MODULE_LICENSE("GPL");
899MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>"); 936MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
900module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 937module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 8022e2390a5a..caf96dc8ca1b 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -118,6 +118,8 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
118{ 118{
119 mxs_reset_block(i2c->regs); 119 mxs_reset_block(i2c->regs);
120 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); 120 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
121 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
122 i2c->regs + MXS_I2C_QUEUECTRL_SET);
121} 123}
122 124
123static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len, 125static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
@@ -347,8 +349,6 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
347 349
348 /* Do reset to enforce correct startup after pinmuxing */ 350 /* Do reset to enforce correct startup after pinmuxing */
349 mxs_i2c_reset(i2c); 351 mxs_i2c_reset(i2c);
350 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
351 i2c->regs + MXS_I2C_QUEUECTRL_SET);
352 352
353 adap = &i2c->adapter; 353 adap = &i2c->adapter;
354 strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name)); 354 strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
new file mode 100644
index 000000000000..6659d269b841
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -0,0 +1,176 @@
1/*
2 * The CE4100's I2C device is more or less the same one as found on PXA.
3 * It does not support slave mode, the register slightly moved. This PCI
4 * device provides three bars, every contains a single I2C controller.
5 */
6#include <linux/pci.h>
7#include <linux/platform_device.h>
8#include <linux/i2c/pxa-i2c.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/of_address.h>
12
13#define CE4100_PCI_I2C_DEVS 3
14
15struct ce4100_devices {
16 struct platform_device *pdev[CE4100_PCI_I2C_DEVS];
17};
18
19static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar)
20{
21 struct platform_device *pdev;
22 struct i2c_pxa_platform_data pdata;
23 struct resource res[2];
24 struct device_node *child;
25 static int devnum;
26 int ret;
27
28 memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data));
29 memset(&res, 0, sizeof(res));
30
31 res[0].flags = IORESOURCE_MEM;
32 res[0].start = pci_resource_start(dev, bar);
33 res[0].end = pci_resource_end(dev, bar);
34
35 res[1].flags = IORESOURCE_IRQ;
36 res[1].start = dev->irq;
37 res[1].end = dev->irq;
38
39 for_each_child_of_node(dev->dev.of_node, child) {
40 const void *prop;
41 struct resource r;
42 int ret;
43
44 ret = of_address_to_resource(child, 0, &r);
45 if (ret < 0)
46 continue;
47 if (r.start != res[0].start)
48 continue;
49 if (r.end != res[0].end)
50 continue;
51 if (r.flags != res[0].flags)
52 continue;
53
54 prop = of_get_property(child, "fast-mode", NULL);
55 if (prop)
56 pdata.fast_mode = 1;
57
58 break;
59 }
60
61 if (!child) {
62 dev_err(&dev->dev, "failed to match a DT node for bar %d.\n",
63 bar);
64 ret = -EINVAL;
65 goto out;
66 }
67
68 pdev = platform_device_alloc("ce4100-i2c", devnum);
69 if (!pdev) {
70 of_node_put(child);
71 ret = -ENOMEM;
72 goto out;
73 }
74 pdev->dev.parent = &dev->dev;
75 pdev->dev.of_node = child;
76
77 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
78 if (ret)
79 goto err;
80
81 ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
82 if (ret)
83 goto err;
84
85 ret = platform_device_add(pdev);
86 if (ret)
87 goto err;
88 devnum++;
89 return pdev;
90err:
91 platform_device_put(pdev);
92out:
93 return ERR_PTR(ret);
94}
95
96static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
97 const struct pci_device_id *ent)
98{
99 int ret;
100 int i;
101 struct ce4100_devices *sds;
102
103 ret = pci_enable_device_mem(dev);
104 if (ret)
105 return ret;
106
107 if (!dev->dev.of_node) {
108 dev_err(&dev->dev, "Missing device tree node.\n");
109 return -EINVAL;
110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds)
113 goto err_mem;
114
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) {
118 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add;
121 }
122 }
123 pci_set_drvdata(dev, sds);
124 return 0;
125
126err_dev_add:
127 pci_set_drvdata(dev, NULL);
128 kfree(sds);
129err_mem:
130 pci_disable_device(dev);
131 return ret;
132}
133
134static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
135{
136 struct ce4100_devices *sds;
137 unsigned int i;
138
139 sds = pci_get_drvdata(dev);
140 pci_set_drvdata(dev, NULL);
141
142 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
143 platform_device_unregister(sds->pdev[i]);
144
145 pci_disable_device(dev);
146 kfree(sds);
147}
148
149static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
150 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
151 { },
152};
153MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
154
155static struct pci_driver ce4100_i2c_driver = {
156 .name = "ce4100_i2c",
157 .id_table = ce4100_i2c_devices,
158 .probe = ce4100_i2c_probe,
159 .remove = __devexit_p(ce4100_i2c_remove),
160};
161
162static int __init ce4100_i2c_init(void)
163{
164 return pci_register_driver(&ce4100_i2c_driver);
165}
166module_init(ce4100_i2c_init);
167
168static void __exit ce4100_i2c_exit(void)
169{
170 pci_unregister_driver(&ce4100_i2c_driver);
171}
172module_exit(ce4100_i2c_exit);
173
174MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
175MODULE_LICENSE("GPL v2");
176MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f4c19a97e0b3..f59224a5c761 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -29,38 +29,75 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/i2c-pxa.h> 31#include <linux/i2c-pxa.h>
32#include <linux/of_i2c.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
33#include <linux/err.h> 34#include <linux/err.h>
34#include <linux/clk.h> 35#include <linux/clk.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/i2c/pxa-i2c.h>
37 39
38#include <asm/irq.h> 40#include <asm/irq.h>
39#include <plat/i2c.h> 41
42#ifndef CONFIG_HAVE_CLK
43#define clk_get(dev, id) NULL
44#define clk_put(clk) do { } while (0)
45#define clk_disable(clk) do { } while (0)
46#define clk_enable(clk) do { } while (0)
47#endif
48
49struct pxa_reg_layout {
50 u32 ibmr;
51 u32 idbr;
52 u32 icr;
53 u32 isr;
54 u32 isar;
55};
56
57enum pxa_i2c_types {
58 REGS_PXA2XX,
59 REGS_PXA3XX,
60 REGS_CE4100,
61};
40 62
41/* 63/*
42 * I2C register offsets will be shifted 0 or 1 bit left, depending on 64 * I2C registers definitions
43 * different SoCs
44 */ 65 */
45#define REG_SHIFT_0 (0 << 0) 66static struct pxa_reg_layout pxa_reg_layout[] = {
46#define REG_SHIFT_1 (1 << 0) 67 [REGS_PXA2XX] = {
47#define REG_SHIFT(d) ((d) & 0x1) 68 .ibmr = 0x00,
69 .idbr = 0x08,
70 .icr = 0x10,
71 .isr = 0x18,
72 .isar = 0x20,
73 },
74 [REGS_PXA3XX] = {
75 .ibmr = 0x00,
76 .idbr = 0x04,
77 .icr = 0x08,
78 .isr = 0x0c,
79 .isar = 0x10,
80 },
81 [REGS_CE4100] = {
82 .ibmr = 0x14,
83 .idbr = 0x0c,
84 .icr = 0x00,
85 .isr = 0x04,
86 /* no isar register */
87 },
88};
48 89
49static const struct platform_device_id i2c_pxa_id_table[] = { 90static const struct platform_device_id i2c_pxa_id_table[] = {
50 { "pxa2xx-i2c", REG_SHIFT_1 }, 91 { "pxa2xx-i2c", REGS_PXA2XX },
51 { "pxa3xx-pwri2c", REG_SHIFT_0 }, 92 { "pxa3xx-pwri2c", REGS_PXA3XX },
93 { "ce4100-i2c", REGS_CE4100 },
52 { }, 94 { },
53}; 95};
54MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); 96MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
55 97
56/* 98/*
57 * I2C registers and bit definitions 99 * I2C bit definitions
58 */ 100 */
59#define IBMR (0x00)
60#define IDBR (0x08)
61#define ICR (0x10)
62#define ISR (0x18)
63#define ISAR (0x20)
64 101
65#define ICR_START (1 << 0) /* start bit */ 102#define ICR_START (1 << 0) /* start bit */
66#define ICR_STOP (1 << 1) /* stop bit */ 103#define ICR_STOP (1 << 1) /* stop bit */
@@ -111,7 +148,11 @@ struct pxa_i2c {
111 u32 icrlog[32]; 148 u32 icrlog[32];
112 149
113 void __iomem *reg_base; 150 void __iomem *reg_base;
114 unsigned int reg_shift; 151 void __iomem *reg_ibmr;
152 void __iomem *reg_idbr;
153 void __iomem *reg_icr;
154 void __iomem *reg_isr;
155 void __iomem *reg_isar;
115 156
116 unsigned long iobase; 157 unsigned long iobase;
117 unsigned long iosize; 158 unsigned long iosize;
@@ -121,11 +162,11 @@ struct pxa_i2c {
121 unsigned int fast_mode :1; 162 unsigned int fast_mode :1;
122}; 163};
123 164
124#define _IBMR(i2c) ((i2c)->reg_base + (0x0 << (i2c)->reg_shift)) 165#define _IBMR(i2c) ((i2c)->reg_ibmr)
125#define _IDBR(i2c) ((i2c)->reg_base + (0x4 << (i2c)->reg_shift)) 166#define _IDBR(i2c) ((i2c)->reg_idbr)
126#define _ICR(i2c) ((i2c)->reg_base + (0x8 << (i2c)->reg_shift)) 167#define _ICR(i2c) ((i2c)->reg_icr)
127#define _ISR(i2c) ((i2c)->reg_base + (0xc << (i2c)->reg_shift)) 168#define _ISR(i2c) ((i2c)->reg_isr)
128#define _ISAR(i2c) ((i2c)->reg_base + (0x10 << (i2c)->reg_shift)) 169#define _ISAR(i2c) ((i2c)->reg_isar)
129 170
130/* 171/*
131 * I2C Slave mode address 172 * I2C Slave mode address
@@ -418,7 +459,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
418 writel(I2C_ISR_INIT, _ISR(i2c)); 459 writel(I2C_ISR_INIT, _ISR(i2c));
419 writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c)); 460 writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
420 461
421 writel(i2c->slave_addr, _ISAR(i2c)); 462 if (i2c->reg_isar)
463 writel(i2c->slave_addr, _ISAR(i2c));
422 464
423 /* set control register values */ 465 /* set control register values */
424 writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c)); 466 writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
@@ -729,8 +771,10 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
729 */ 771 */
730 ret = i2c->msg_idx; 772 ret = i2c->msg_idx;
731 773
732 if (timeout == 0) 774 if (!timeout && i2c->msg_num) {
733 i2c_pxa_scream_blue_murder(i2c, "timeout"); 775 i2c_pxa_scream_blue_murder(i2c, "timeout");
776 ret = I2C_RETRY;
777 }
734 778
735 out: 779 out:
736 return ret; 780 return ret;
@@ -915,11 +959,16 @@ static void i2c_pxa_irq_rxfull(struct pxa_i2c *i2c, u32 isr)
915 writel(icr, _ICR(i2c)); 959 writel(icr, _ICR(i2c));
916} 960}
917 961
962#define VALID_INT_SOURCE (ISR_SSD | ISR_ALD | ISR_ITE | ISR_IRF | \
963 ISR_SAD | ISR_BED)
918static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id) 964static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
919{ 965{
920 struct pxa_i2c *i2c = dev_id; 966 struct pxa_i2c *i2c = dev_id;
921 u32 isr = readl(_ISR(i2c)); 967 u32 isr = readl(_ISR(i2c));
922 968
969 if (!(isr & VALID_INT_SOURCE))
970 return IRQ_NONE;
971
923 if (i2c_debug > 2 && 0) { 972 if (i2c_debug > 2 && 0) {
924 dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n", 973 dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n",
925 __func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c))); 974 __func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c)));
@@ -934,7 +983,7 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
934 /* 983 /*
935 * Always clear all pending IRQs. 984 * Always clear all pending IRQs.
936 */ 985 */
937 writel(isr & (ISR_SSD|ISR_ALD|ISR_ITE|ISR_IRF|ISR_SAD|ISR_BED), _ISR(i2c)); 986 writel(isr & VALID_INT_SOURCE, _ISR(i2c));
938 987
939 if (isr & ISR_SAD) 988 if (isr & ISR_SAD)
940 i2c_pxa_slave_start(i2c, isr); 989 i2c_pxa_slave_start(i2c, isr);
@@ -1001,6 +1050,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1001 struct resource *res; 1050 struct resource *res;
1002 struct i2c_pxa_platform_data *plat = dev->dev.platform_data; 1051 struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
1003 const struct platform_device_id *id = platform_get_device_id(dev); 1052 const struct platform_device_id *id = platform_get_device_id(dev);
1053 enum pxa_i2c_types i2c_type = id->driver_data;
1004 int ret; 1054 int ret;
1005 int irq; 1055 int irq;
1006 1056
@@ -1044,7 +1094,13 @@ static int i2c_pxa_probe(struct platform_device *dev)
1044 ret = -EIO; 1094 ret = -EIO;
1045 goto eremap; 1095 goto eremap;
1046 } 1096 }
1047 i2c->reg_shift = REG_SHIFT(id->driver_data); 1097
1098 i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
1099 i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
1100 i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
1101 i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
1102 if (i2c_type != REGS_CE4100)
1103 i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
1048 1104
1049 i2c->iobase = res->start; 1105 i2c->iobase = res->start;
1050 i2c->iosize = resource_size(res); 1106 i2c->iosize = resource_size(res);
@@ -1072,7 +1128,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1072 i2c->adap.algo = &i2c_pxa_pio_algorithm; 1128 i2c->adap.algo = &i2c_pxa_pio_algorithm;
1073 } else { 1129 } else {
1074 i2c->adap.algo = &i2c_pxa_algorithm; 1130 i2c->adap.algo = &i2c_pxa_algorithm;
1075 ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED, 1131 ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
1076 i2c->adap.name, i2c); 1132 i2c->adap.name, i2c);
1077 if (ret) 1133 if (ret)
1078 goto ereqirq; 1134 goto ereqirq;
@@ -1082,12 +1138,19 @@ static int i2c_pxa_probe(struct platform_device *dev)
1082 1138
1083 i2c->adap.algo_data = i2c; 1139 i2c->adap.algo_data = i2c;
1084 i2c->adap.dev.parent = &dev->dev; 1140 i2c->adap.dev.parent = &dev->dev;
1141#ifdef CONFIG_OF
1142 i2c->adap.dev.of_node = dev->dev.of_node;
1143#endif
1085 1144
1086 ret = i2c_add_numbered_adapter(&i2c->adap); 1145 if (i2c_type == REGS_CE4100)
1146 ret = i2c_add_adapter(&i2c->adap);
1147 else
1148 ret = i2c_add_numbered_adapter(&i2c->adap);
1087 if (ret < 0) { 1149 if (ret < 0) {
1088 printk(KERN_INFO "I2C: Failed to add bus\n"); 1150 printk(KERN_INFO "I2C: Failed to add bus\n");
1089 goto eadapt; 1151 goto eadapt;
1090 } 1152 }
1153 of_i2c_register_devices(&i2c->adap);
1091 1154
1092 platform_set_drvdata(dev, i2c); 1155 platform_set_drvdata(dev, i2c);
1093 1156
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6f190f4cdbc0..9bec8699b8a3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -34,6 +34,16 @@ config LEDS_ATMEL_PWM
34 This option enables support for LEDs driven using outputs 34 This option enables support for LEDs driven using outputs
35 of the dedicated PWM controller found on newer Atmel SOCs. 35 of the dedicated PWM controller found on newer Atmel SOCs.
36 36
37config LEDS_LM3530
38 tristate "LCD Backlight driver for LM3530"
39 depends on LEDS_CLASS
40 depends on I2C
41 help
42 This option enables support for the LCD backlight using
43 LM3530 ambient light sensor chip. This ALS chip can be
44 controlled manually or using PWM input or using ambient
45 light automatically.
46
37config LEDS_LOCOMO 47config LEDS_LOCOMO
38 tristate "LED Support for Locomo device" 48 tristate "LED Support for Locomo device"
39 depends on LEDS_CLASS 49 depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aae6989ff6b6..39c80fca84d2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
9obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o 9obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o 10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o 11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
12obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o 13obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
13obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o 14obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
14obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o 15obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 19dc4b61a105..3ebe3824662d 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -19,7 +19,7 @@
19#include <linux/leds.h> 19#include <linux/leds.h>
20#include <linux/leds-bd2802.h> 20#include <linux/leds-bd2802.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22#include <linux/pm.h>
23 23
24#define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0)) 24#define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0))
25 25
@@ -319,20 +319,6 @@ static void bd2802_turn_off(struct bd2802_led *led, enum led_ids id,
319 bd2802_update_state(led, id, color, BD2802_OFF); 319 bd2802_update_state(led, id, color, BD2802_OFF);
320} 320}
321 321
322static void bd2802_restore_state(struct bd2802_led *led)
323{
324 int i;
325
326 for (i = 0; i < LED_NUM; i++) {
327 if (led->led[i].r)
328 bd2802_turn_on(led, i, RED, led->led[i].r);
329 if (led->led[i].g)
330 bd2802_turn_on(led, i, GREEN, led->led[i].g);
331 if (led->led[i].b)
332 bd2802_turn_on(led, i, BLUE, led->led[i].b);
333 }
334}
335
336#define BD2802_SET_REGISTER(reg_addr, reg_name) \ 322#define BD2802_SET_REGISTER(reg_addr, reg_name) \
337static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \ 323static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
338 struct device_attribute *attr, const char *buf, size_t count) \ 324 struct device_attribute *attr, const char *buf, size_t count) \
@@ -761,8 +747,25 @@ static int __exit bd2802_remove(struct i2c_client *client)
761 return 0; 747 return 0;
762} 748}
763 749
764static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg) 750#ifdef CONFIG_PM
751
752static void bd2802_restore_state(struct bd2802_led *led)
765{ 753{
754 int i;
755
756 for (i = 0; i < LED_NUM; i++) {
757 if (led->led[i].r)
758 bd2802_turn_on(led, i, RED, led->led[i].r);
759 if (led->led[i].g)
760 bd2802_turn_on(led, i, GREEN, led->led[i].g);
761 if (led->led[i].b)
762 bd2802_turn_on(led, i, BLUE, led->led[i].b);
763 }
764}
765
766static int bd2802_suspend(struct device *dev)
767{
768 struct i2c_client *client = to_i2c_client(dev);
766 struct bd2802_led *led = i2c_get_clientdata(client); 769 struct bd2802_led *led = i2c_get_clientdata(client);
767 770
768 gpio_set_value(led->pdata->reset_gpio, 0); 771 gpio_set_value(led->pdata->reset_gpio, 0);
@@ -770,8 +773,9 @@ static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg)
770 return 0; 773 return 0;
771} 774}
772 775
773static int bd2802_resume(struct i2c_client *client) 776static int bd2802_resume(struct device *dev)
774{ 777{
778 struct i2c_client *client = to_i2c_client(dev);
775 struct bd2802_led *led = i2c_get_clientdata(client); 779 struct bd2802_led *led = i2c_get_clientdata(client);
776 780
777 if (!bd2802_is_all_off(led) || led->adf_on) { 781 if (!bd2802_is_all_off(led) || led->adf_on) {
@@ -782,6 +786,12 @@ static int bd2802_resume(struct i2c_client *client)
782 return 0; 786 return 0;
783} 787}
784 788
789static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume);
790#define BD2802_PM (&bd2802_pm)
791#else /* CONFIG_PM */
792#define BD2802_PM NULL
793#endif
794
785static const struct i2c_device_id bd2802_id[] = { 795static const struct i2c_device_id bd2802_id[] = {
786 { "BD2802", 0 }, 796 { "BD2802", 0 },
787 { } 797 { }
@@ -791,11 +801,10 @@ MODULE_DEVICE_TABLE(i2c, bd2802_id);
791static struct i2c_driver bd2802_i2c_driver = { 801static struct i2c_driver bd2802_i2c_driver = {
792 .driver = { 802 .driver = {
793 .name = "BD2802", 803 .name = "BD2802",
804 .pm = BD2802_PM,
794 }, 805 },
795 .probe = bd2802_probe, 806 .probe = bd2802_probe,
796 .remove = __exit_p(bd2802_remove), 807 .remove = __exit_p(bd2802_remove),
797 .suspend = bd2802_suspend,
798 .resume = bd2802_resume,
799 .id_table = bd2802_id, 808 .id_table = bd2802_id,
800}; 809};
801 810
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
new file mode 100644
index 000000000000..e7089a1f6cb6
--- /dev/null
+++ b/drivers/leds/leds-lm3530.c
@@ -0,0 +1,378 @@
1/*
2 * Copyright (C) 2011 ST-Ericsson SA.
3 * Copyright (C) 2009 Motorola, Inc.
4 *
5 * License Terms: GNU General Public License v2
6 *
7 * Simple driver for National Semiconductor LM3530 Backlight driver chip
8 *
9 * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
10 * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
11 */
12
13#include <linux/i2c.h>
14#include <linux/leds.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/input.h>
18#include <linux/led-lm3530.h>
19#include <linux/types.h>
20
21#define LM3530_LED_DEV "lcd-backlight"
22#define LM3530_NAME "lm3530-led"
23
24#define LM3530_GEN_CONFIG 0x10
25#define LM3530_ALS_CONFIG 0x20
26#define LM3530_BRT_RAMP_RATE 0x30
27#define LM3530_ALS_ZONE_REG 0x40
28#define LM3530_ALS_IMP_SELECT 0x41
29#define LM3530_BRT_CTRL_REG 0xA0
30#define LM3530_ALS_ZB0_REG 0x60
31#define LM3530_ALS_ZB1_REG 0x61
32#define LM3530_ALS_ZB2_REG 0x62
33#define LM3530_ALS_ZB3_REG 0x63
34#define LM3530_ALS_Z0T_REG 0x70
35#define LM3530_ALS_Z1T_REG 0x71
36#define LM3530_ALS_Z2T_REG 0x72
37#define LM3530_ALS_Z3T_REG 0x73
38#define LM3530_ALS_Z4T_REG 0x74
39#define LM3530_REG_MAX 15
40
41/* General Control Register */
42#define LM3530_EN_I2C_SHIFT (0)
43#define LM3530_RAMP_LAW_SHIFT (1)
44#define LM3530_MAX_CURR_SHIFT (2)
45#define LM3530_EN_PWM_SHIFT (5)
46#define LM3530_PWM_POL_SHIFT (6)
47#define LM3530_EN_PWM_SIMPLE_SHIFT (7)
48
49#define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT)
50#define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT)
51#define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT)
52#define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT)
53
54/* ALS Config Register Options */
55#define LM3530_ALS_AVG_TIME_SHIFT (0)
56#define LM3530_EN_ALS_SHIFT (3)
57#define LM3530_ALS_SEL_SHIFT (5)
58
59#define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT)
60
61/* Brightness Ramp Rate Register */
62#define LM3530_BRT_RAMP_FALL_SHIFT (0)
63#define LM3530_BRT_RAMP_RISE_SHIFT (3)
64
65/* ALS Resistor Select */
66#define LM3530_ALS1_IMP_SHIFT (0)
67#define LM3530_ALS2_IMP_SHIFT (4)
68
69/* Zone Boundary Register defaults */
70#define LM3530_DEF_ZB_0 (0x33)
71#define LM3530_DEF_ZB_1 (0x66)
72#define LM3530_DEF_ZB_2 (0x99)
73#define LM3530_DEF_ZB_3 (0xCC)
74
75/* Zone Target Register defaults */
76#define LM3530_DEF_ZT_0 (0x19)
77#define LM3530_DEF_ZT_1 (0x33)
78#define LM3530_DEF_ZT_2 (0x4C)
79#define LM3530_DEF_ZT_3 (0x66)
80#define LM3530_DEF_ZT_4 (0x7F)
81
82struct lm3530_mode_map {
83 const char *mode;
84 enum lm3530_mode mode_val;
85};
86
87static struct lm3530_mode_map mode_map[] = {
88 { "man", LM3530_BL_MODE_MANUAL },
89 { "als", LM3530_BL_MODE_ALS },
90 { "pwm", LM3530_BL_MODE_PWM },
91};
92
93/**
94 * struct lm3530_data
95 * @led_dev: led class device
96 * @client: i2c client
97 * @pdata: LM3530 platform data
98 * @mode: mode of operation - manual, ALS, PWM
99 */
100struct lm3530_data {
101 struct led_classdev led_dev;
102 struct i2c_client *client;
103 struct lm3530_platform_data *pdata;
104 enum lm3530_mode mode;
105};
106
107static const u8 lm3530_reg[LM3530_REG_MAX] = {
108 LM3530_GEN_CONFIG,
109 LM3530_ALS_CONFIG,
110 LM3530_BRT_RAMP_RATE,
111 LM3530_ALS_ZONE_REG,
112 LM3530_ALS_IMP_SELECT,
113 LM3530_BRT_CTRL_REG,
114 LM3530_ALS_ZB0_REG,
115 LM3530_ALS_ZB1_REG,
116 LM3530_ALS_ZB2_REG,
117 LM3530_ALS_ZB3_REG,
118 LM3530_ALS_Z0T_REG,
119 LM3530_ALS_Z1T_REG,
120 LM3530_ALS_Z2T_REG,
121 LM3530_ALS_Z3T_REG,
122 LM3530_ALS_Z4T_REG,
123};
124
125static int lm3530_get_mode_from_str(const char *str)
126{
127 int i;
128
129 for (i = 0; i < ARRAY_SIZE(mode_map); i++)
130 if (sysfs_streq(str, mode_map[i].mode))
131 return mode_map[i].mode_val;
132
133 return -1;
134}
135
136static int lm3530_init_registers(struct lm3530_data *drvdata)
137{
138 int ret = 0;
139 int i;
140 u8 gen_config;
141 u8 als_config = 0;
142 u8 brt_ramp;
143 u8 als_imp_sel = 0;
144 u8 brightness;
145 u8 reg_val[LM3530_REG_MAX];
146 struct lm3530_platform_data *pltfm = drvdata->pdata;
147 struct i2c_client *client = drvdata->client;
148
149 gen_config = (pltfm->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
150 ((pltfm->max_current & 7) << LM3530_MAX_CURR_SHIFT);
151
152 if (drvdata->mode == LM3530_BL_MODE_MANUAL ||
153 drvdata->mode == LM3530_BL_MODE_ALS)
154 gen_config |= (LM3530_ENABLE_I2C);
155
156 if (drvdata->mode == LM3530_BL_MODE_ALS) {
157 als_config =
158 (pltfm->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
159 (LM3530_ENABLE_ALS) |
160 (pltfm->als_input_mode << LM3530_ALS_SEL_SHIFT);
161
162 als_imp_sel =
163 (pltfm->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
164 (pltfm->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
165 }
166
167 if (drvdata->mode == LM3530_BL_MODE_PWM)
168 gen_config |= (LM3530_ENABLE_PWM) |
169 (pltfm->pwm_pol_hi << LM3530_PWM_POL_SHIFT) |
170 (LM3530_ENABLE_PWM_SIMPLE);
171
172 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
173 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
174
175 brightness = pltfm->brt_val;
176
177 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
178 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
179 reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
180 reg_val[3] = 0x00; /* LM3530_ALS_ZONE_REG */
181 reg_val[4] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
182 reg_val[5] = brightness; /* LM3530_BRT_CTRL_REG */
183 reg_val[6] = LM3530_DEF_ZB_0; /* LM3530_ALS_ZB0_REG */
184 reg_val[7] = LM3530_DEF_ZB_1; /* LM3530_ALS_ZB1_REG */
185 reg_val[8] = LM3530_DEF_ZB_2; /* LM3530_ALS_ZB2_REG */
186 reg_val[9] = LM3530_DEF_ZB_3; /* LM3530_ALS_ZB3_REG */
187 reg_val[10] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
188 reg_val[11] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
189 reg_val[12] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
190 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
191 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
192
193 for (i = 0; i < LM3530_REG_MAX; i++) {
194 ret = i2c_smbus_write_byte_data(client,
195 lm3530_reg[i], reg_val[i]);
196 if (ret)
197 break;
198 }
199
200 return ret;
201}
202
203static void lm3530_brightness_set(struct led_classdev *led_cdev,
204 enum led_brightness brt_val)
205{
206 int err;
207 struct lm3530_data *drvdata =
208 container_of(led_cdev, struct lm3530_data, led_dev);
209
210 switch (drvdata->mode) {
211 case LM3530_BL_MODE_MANUAL:
212
213 /* set the brightness in brightness control register*/
214 err = i2c_smbus_write_byte_data(drvdata->client,
215 LM3530_BRT_CTRL_REG, brt_val / 2);
216 if (err)
217 dev_err(&drvdata->client->dev,
218 "Unable to set brightness: %d\n", err);
219 break;
220 case LM3530_BL_MODE_ALS:
221 break;
222 case LM3530_BL_MODE_PWM:
223 break;
224 default:
225 break;
226 }
227}
228
229
230static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
231 *attr, const char *buf, size_t size)
232{
233 int err;
234 struct i2c_client *client = container_of(
235 dev->parent, struct i2c_client, dev);
236 struct lm3530_data *drvdata = i2c_get_clientdata(client);
237 int mode;
238
239 mode = lm3530_get_mode_from_str(buf);
240 if (mode < 0) {
241 dev_err(dev, "Invalid mode\n");
242 return -EINVAL;
243 }
244
245 if (mode == LM3530_BL_MODE_MANUAL)
246 drvdata->mode = LM3530_BL_MODE_MANUAL;
247 else if (mode == LM3530_BL_MODE_ALS)
248 drvdata->mode = LM3530_BL_MODE_ALS;
249 else if (mode == LM3530_BL_MODE_PWM) {
250 dev_err(dev, "PWM mode not supported\n");
251 return -EINVAL;
252 }
253
254 err = lm3530_init_registers(drvdata);
255 if (err) {
256 dev_err(dev, "Setting %s Mode failed :%d\n", buf, err);
257 return err;
258 }
259
260 return sizeof(drvdata->mode);
261}
262
263static DEVICE_ATTR(mode, 0644, NULL, lm3530_mode_set);
264
265static int __devinit lm3530_probe(struct i2c_client *client,
266 const struct i2c_device_id *id)
267{
268 struct lm3530_platform_data *pdata = client->dev.platform_data;
269 struct lm3530_data *drvdata;
270 int err = 0;
271
272 if (pdata == NULL) {
273 dev_err(&client->dev, "platform data required\n");
274 err = -ENODEV;
275 goto err_out;
276 }
277
278 /* BL mode */
279 if (pdata->mode > LM3530_BL_MODE_PWM) {
280 dev_err(&client->dev, "Illegal Mode request\n");
281 err = -EINVAL;
282 goto err_out;
283 }
284
285 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
286 dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
287 err = -EIO;
288 goto err_out;
289 }
290
291 drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL);
292 if (drvdata == NULL) {
293 err = -ENOMEM;
294 goto err_out;
295 }
296
297 drvdata->mode = pdata->mode;
298 drvdata->client = client;
299 drvdata->pdata = pdata;
300 drvdata->led_dev.name = LM3530_LED_DEV;
301 drvdata->led_dev.brightness_set = lm3530_brightness_set;
302
303 i2c_set_clientdata(client, drvdata);
304
305 err = lm3530_init_registers(drvdata);
306 if (err < 0) {
307 dev_err(&client->dev, "Register Init failed: %d\n", err);
308 err = -ENODEV;
309 goto err_reg_init;
310 }
311
312 err = led_classdev_register((struct device *)
313 &client->dev, &drvdata->led_dev);
314 if (err < 0) {
315 dev_err(&client->dev, "Register led class failed: %d\n", err);
316 err = -ENODEV;
317 goto err_class_register;
318 }
319
320 err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
321 if (err < 0) {
322 dev_err(&client->dev, "File device creation failed: %d\n", err);
323 err = -ENODEV;
324 goto err_create_file;
325 }
326
327 return 0;
328
329err_create_file:
330 led_classdev_unregister(&drvdata->led_dev);
331err_class_register:
332err_reg_init:
333 kfree(drvdata);
334err_out:
335 return err;
336}
337
338static int __devexit lm3530_remove(struct i2c_client *client)
339{
340 struct lm3530_data *drvdata = i2c_get_clientdata(client);
341
342 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
343 led_classdev_unregister(&drvdata->led_dev);
344 kfree(drvdata);
345 return 0;
346}
347
348static const struct i2c_device_id lm3530_id[] = {
349 {LM3530_NAME, 0},
350 {}
351};
352
353static struct i2c_driver lm3530_i2c_driver = {
354 .probe = lm3530_probe,
355 .remove = lm3530_remove,
356 .id_table = lm3530_id,
357 .driver = {
358 .name = LM3530_NAME,
359 .owner = THIS_MODULE,
360 },
361};
362
363static int __init lm3530_init(void)
364{
365 return i2c_add_driver(&lm3530_i2c_driver);
366}
367
368static void __exit lm3530_exit(void)
369{
370 i2c_del_driver(&lm3530_i2c_driver);
371}
372
373module_init(lm3530_init);
374module_exit(lm3530_exit);
375
376MODULE_DESCRIPTION("Back Light driver for LM3530");
377MODULE_LICENSE("GPL v2");
378MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 80a3ae3c00b9..c0cff64a1ae6 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev,
534} 534}
535 535
536/* led class device attributes */ 536/* led class device attributes */
537static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); 537static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
538static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); 538static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
539 539
540static struct attribute *lp5521_led_attributes[] = { 540static struct attribute *lp5521_led_attributes[] = {
@@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = {
548}; 548};
549 549
550/* device attributes */ 550/* device attributes */
551static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, 551static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
552 show_engine1_mode, store_engine1_mode); 552 show_engine1_mode, store_engine1_mode);
553static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, 553static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
554 show_engine2_mode, store_engine2_mode); 554 show_engine2_mode, store_engine2_mode);
555static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, 555static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
556 show_engine3_mode, store_engine3_mode); 556 show_engine3_mode, store_engine3_mode);
557static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); 557static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
558static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); 558static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
559static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); 559static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
560static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); 560static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
561 561
562static struct attribute *lp5521_attributes[] = { 562static struct attribute *lp5521_attributes[] = {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index d0c4068ecddd..e19fed25f137 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev,
713} 713}
714 714
715/* led class device attributes */ 715/* led class device attributes */
716static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); 716static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
717static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); 717static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
718 718
719static struct attribute *lp5523_led_attributes[] = { 719static struct attribute *lp5523_led_attributes[] = {
@@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = {
727}; 727};
728 728
729/* device attributes */ 729/* device attributes */
730static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, 730static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
731 show_engine1_mode, store_engine1_mode); 731 show_engine1_mode, store_engine1_mode);
732static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, 732static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
733 show_engine2_mode, store_engine2_mode); 733 show_engine2_mode, store_engine2_mode);
734static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, 734static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
735 show_engine3_mode, store_engine3_mode); 735 show_engine3_mode, store_engine3_mode);
736static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, 736static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR,
737 show_engine1_leds, store_engine1_leds); 737 show_engine1_leds, store_engine1_leds);
738static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, 738static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR,
739 show_engine2_leds, store_engine2_leds); 739 show_engine2_leds, store_engine2_leds);
740static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, 740static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR,
741 show_engine3_leds, store_engine3_leds); 741 show_engine3_leds, store_engine3_leds);
742static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); 742static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
743static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); 743static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
744static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); 744static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
745static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); 745static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
746 746
747static struct attribute *lp5523_attributes[] = { 747static struct attribute *lp5523_attributes[] = {
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 1739557a9038..7e764b8365e6 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -19,7 +19,7 @@
19 19
20#include <asm/geode.h> 20#include <asm/geode.h>
21 21
22static struct gpio_led net5501_leds[] = { 22static const struct gpio_led net5501_leds[] = {
23 { 23 {
24 .name = "error", 24 .name = "error",
25 .gpio = 6, 25 .gpio = 6,
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index ade1e656bfb2..b1d91170ded0 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -163,6 +163,7 @@ void __init pmu_backlight_init()
163 snprintf(name, sizeof(name), "pmubl"); 163 snprintf(name, sizeof(name), "pmubl");
164 164
165 memset(&props, 0, sizeof(struct backlight_properties)); 165 memset(&props, 0, sizeof(struct backlight_properties));
166 props.type = BACKLIGHT_PLATFORM;
166 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 167 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
167 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data, 168 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data,
168 &props); 169 &props);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 203500d9b848..4e007c6a4b44 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -402,6 +402,16 @@ config DS1682
402 This driver can also be built as a module. If so, the module 402 This driver can also be built as a module. If so, the module
403 will be called ds1682. 403 will be called ds1682.
404 404
405config SPEAR13XX_PCIE_GADGET
406 bool "PCIe gadget support for SPEAr13XX platform"
407 depends on ARCH_SPEAR13XX
408 default n
409 help
410 This option enables gadget support for PCIe controller. If
411 board file defines any controller as PCIe endpoint then a sysfs
412 entry will be created for that controller. User can use these
413 sysfs node to configure PCIe EP as per his requirements.
414
405config TI_DAC7512 415config TI_DAC7512
406 tristate "Texas Instruments DAC7512" 416 tristate "Texas Instruments DAC7512"
407 depends on SPI && SYSFS 417 depends on SPI && SYSFS
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 804f421bc079..f5468602961f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
37obj-$(CONFIG_HMC6352) += hmc6352.o 37obj-$(CONFIG_HMC6352) += hmc6352.o
38obj-y += eeprom/ 38obj-y += eeprom/
39obj-y += cb710/ 39obj-y += cb710/
40obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
40obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o 41obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
41obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 42obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
42obj-$(CONFIG_PCH_PHUB) += pch_phub.o 43obj-$(CONFIG_PCH_PHUB) += pch_phub.o
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 644d4cd071cc..81db7811cf68 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -245,9 +245,8 @@ static int apds9802als_probe(struct i2c_client *client,
245 als_set_default_config(client); 245 als_set_default_config(client);
246 mutex_init(&data->mutex); 246 mutex_init(&data->mutex);
247 247
248 pm_runtime_set_active(&client->dev);
248 pm_runtime_enable(&client->dev); 249 pm_runtime_enable(&client->dev);
249 pm_runtime_get(&client->dev);
250 pm_runtime_put(&client->dev);
251 250
252 return res; 251 return res;
253als_error1: 252als_error1:
@@ -255,12 +254,19 @@ als_error1:
255 return res; 254 return res;
256} 255}
257 256
258static int apds9802als_remove(struct i2c_client *client) 257static int __devexit apds9802als_remove(struct i2c_client *client)
259{ 258{
260 struct als_data *data = i2c_get_clientdata(client); 259 struct als_data *data = i2c_get_clientdata(client);
261 260
261 pm_runtime_get_sync(&client->dev);
262
262 als_set_power_state(client, false); 263 als_set_power_state(client, false);
263 sysfs_remove_group(&client->dev.kobj, &m_als_gr); 264 sysfs_remove_group(&client->dev.kobj, &m_als_gr);
265
266 pm_runtime_disable(&client->dev);
267 pm_runtime_set_suspended(&client->dev);
268 pm_runtime_put_noidle(&client->dev);
269
264 kfree(data); 270 kfree(data);
265 return 0; 271 return 0;
266} 272}
@@ -275,9 +281,6 @@ static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
275static int apds9802als_resume(struct i2c_client *client) 281static int apds9802als_resume(struct i2c_client *client)
276{ 282{
277 als_set_default_config(client); 283 als_set_default_config(client);
278
279 pm_runtime_get(&client->dev);
280 pm_runtime_put(&client->dev);
281 return 0; 284 return 0;
282} 285}
283 286
@@ -323,7 +326,7 @@ static struct i2c_driver apds9802als_driver = {
323 .pm = APDS9802ALS_PM_OPS, 326 .pm = APDS9802ALS_PM_OPS,
324 }, 327 },
325 .probe = apds9802als_probe, 328 .probe = apds9802als_probe,
326 .remove = apds9802als_remove, 329 .remove = __devexit_p(apds9802als_remove),
327 .suspend = apds9802als_suspend, 330 .suspend = apds9802als_suspend,
328 .resume = apds9802als_resume, 331 .resume = apds9802als_resume,
329 .id_table = apds9802als_id, 332 .id_table = apds9802als_id,
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 3891124001f2..a844810b50f6 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -75,7 +75,7 @@ out:
75 return tc; 75 return tc;
76 76
77fail_ioremap: 77fail_ioremap:
78 release_resource(r); 78 release_mem_region(r->start, ATMEL_TC_IOMEM_SIZE);
79fail: 79fail:
80 tc = NULL; 80 tc = NULL;
81 goto out; 81 goto out;
@@ -95,7 +95,7 @@ void atmel_tc_free(struct atmel_tc *tc)
95 spin_lock(&tc_list_lock); 95 spin_lock(&tc_list_lock);
96 if (tc->regs) { 96 if (tc->regs) {
97 iounmap(tc->regs); 97 iounmap(tc->regs);
98 release_resource(tc->iomem); 98 release_mem_region(tc->iomem->start, ATMEL_TC_IOMEM_SIZE);
99 tc->regs = NULL; 99 tc->regs = NULL;
100 tc->iomem = NULL; 100 tc->iomem = NULL;
101 } 101 }
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index d5f3a3fd2319..d07cd67c951c 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -196,10 +196,11 @@ static int __devexit bh1780_remove(struct i2c_client *client)
196} 196}
197 197
198#ifdef CONFIG_PM 198#ifdef CONFIG_PM
199static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg) 199static int bh1780_suspend(struct device *dev)
200{ 200{
201 struct bh1780_data *ddata; 201 struct bh1780_data *ddata;
202 int state, ret; 202 int state, ret;
203 struct i2c_client *client = to_i2c_client(dev);
203 204
204 ddata = i2c_get_clientdata(client); 205 ddata = i2c_get_clientdata(client);
205 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); 206 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
@@ -217,14 +218,14 @@ static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
217 return 0; 218 return 0;
218} 219}
219 220
220static int bh1780_resume(struct i2c_client *client) 221static int bh1780_resume(struct device *dev)
221{ 222{
222 struct bh1780_data *ddata; 223 struct bh1780_data *ddata;
223 int state, ret; 224 int state, ret;
225 struct i2c_client *client = to_i2c_client(dev);
224 226
225 ddata = i2c_get_clientdata(client); 227 ddata = i2c_get_clientdata(client);
226 state = ddata->power_state; 228 state = ddata->power_state;
227
228 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, 229 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
229 "CONTROL"); 230 "CONTROL");
230 231
@@ -233,9 +234,10 @@ static int bh1780_resume(struct i2c_client *client)
233 234
234 return 0; 235 return 0;
235} 236}
237static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
238#define BH1780_PMOPS (&bh1780_pm)
236#else 239#else
237#define bh1780_suspend NULL 240#define BH1780_PMOPS NULL
238#define bh1780_resume NULL
239#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
240 242
241static const struct i2c_device_id bh1780_id[] = { 243static const struct i2c_device_id bh1780_id[] = {
@@ -247,11 +249,10 @@ static struct i2c_driver bh1780_driver = {
247 .probe = bh1780_probe, 249 .probe = bh1780_probe,
248 .remove = bh1780_remove, 250 .remove = bh1780_remove,
249 .id_table = bh1780_id, 251 .id_table = bh1780_id,
250 .suspend = bh1780_suspend,
251 .resume = bh1780_resume,
252 .driver = { 252 .driver = {
253 .name = "bh1780" 253 .name = "bh1780",
254 }, 254 .pm = BH1780_PMOPS,
255},
255}; 256};
256 257
257static int __init bh1780_init(void) 258static int __init bh1780_init(void)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index b6e1c9a6679e..ecd276ad6b19 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -402,7 +402,7 @@ exit:
402 return status; 402 return status;
403} 403}
404 404
405static int bmp085_probe(struct i2c_client *client, 405static int __devinit bmp085_probe(struct i2c_client *client,
406 const struct i2c_device_id *id) 406 const struct i2c_device_id *id)
407{ 407{
408 struct bmp085_data *data; 408 struct bmp085_data *data;
@@ -438,7 +438,7 @@ exit:
438 return err; 438 return err;
439} 439}
440 440
441static int bmp085_remove(struct i2c_client *client) 441static int __devexit bmp085_remove(struct i2c_client *client)
442{ 442{
443 sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group); 443 sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
444 kfree(i2c_get_clientdata(client)); 444 kfree(i2c_get_clientdata(client));
@@ -458,7 +458,7 @@ static struct i2c_driver bmp085_driver = {
458 }, 458 },
459 .id_table = bmp085_id, 459 .id_table = bmp085_id,
460 .probe = bmp085_probe, 460 .probe = bmp085_probe,
461 .remove = bmp085_remove, 461 .remove = __devexit_p(bmp085_remove),
462 462
463 .detect = bmp085_detect, 463 .detect = bmp085_detect,
464 .address_list = normal_i2c 464 .address_list = normal_i2c
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
index 46b3439673e9..16d7179e2f9b 100644
--- a/drivers/misc/ep93xx_pwm.c
+++ b/drivers/misc/ep93xx_pwm.c
@@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev,
249 249
250static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); 250static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
251static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); 251static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
252static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, 252static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
253 ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); 253 ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
254static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, 254static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
255 ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); 255 ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
256static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, 256static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
257 ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); 257 ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
258 258
259static struct attribute *ep93xx_pwm_attrs[] = { 259static struct attribute *ep93xx_pwm_attrs[] = {
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 234bfcaf2099..ca938fc8a8d6 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -75,7 +75,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
75{ 75{
76 struct i2c_client *client = to_i2c_client(dev); 76 struct i2c_client *client = to_i2c_client(dev);
77 unsigned char i2c_data[2]; 77 unsigned char i2c_data[2];
78 unsigned int ret; 78 int ret;
79 79
80 mutex_lock(&compass_mutex); 80 mutex_lock(&compass_mutex);
81 ret = compass_command(client, 'A'); 81 ret = compass_command(client, 'A');
@@ -86,7 +86,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
86 msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */ 86 msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
87 ret = i2c_master_recv(client, i2c_data, 2); 87 ret = i2c_master_recv(client, i2c_data, 2);
88 mutex_unlock(&compass_mutex); 88 mutex_unlock(&compass_mutex);
89 if (ret != 1) { 89 if (ret < 0) {
90 dev_warn(dev, "i2c read data cmd failed\n"); 90 dev_warn(dev, "i2c read data cmd failed\n");
91 return ret; 91 return ret;
92 } 92 }
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 380ba806495d..a19cb710a246 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -735,6 +735,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
735 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, 735 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
736 { } 736 { }
737}; 737};
738MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
738 739
739static struct pci_driver pch_phub_driver = { 740static struct pci_driver pch_phub_driver = {
740 .name = "pch_phub", 741 .name = "pch_phub",
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
new file mode 100644
index 000000000000..ec3b8c911833
--- /dev/null
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -0,0 +1,908 @@
1/*
2 * drivers/misc/spear13xx_pcie_gadget.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Pratyush Anand<pratyush.anand@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/slab.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/pci_regs.h>
22#include <linux/configfs.h>
23#include <mach/pcie.h>
24#include <mach/misc_regs.h>
25
26#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
27/* In current implementation address translation is done using IN0 only.
28 * So IN1 start address and IN0 end address has been kept same
29*/
30#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
31#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
32#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
33#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
34#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
35/* Keep default BAR size as 4K*/
36/* AORAM would be mapped by default*/
37#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
38
39#define INT_TYPE_NO_INT 0
40#define INT_TYPE_INTX 1
41#define INT_TYPE_MSI 2
42struct spear_pcie_gadget_config {
43 void __iomem *base;
44 void __iomem *va_app_base;
45 void __iomem *va_dbi_base;
46 char int_type[10];
47 ulong requested_msi;
48 ulong configured_msi;
49 ulong bar0_size;
50 ulong bar0_rw_offset;
51 void __iomem *va_bar0_address;
52};
53
54struct pcie_gadget_target {
55 struct configfs_subsystem subsys;
56 struct spear_pcie_gadget_config config;
57};
58
59struct pcie_gadget_target_attr {
60 struct configfs_attribute attr;
61 ssize_t (*show)(struct spear_pcie_gadget_config *config,
62 char *buf);
63 ssize_t (*store)(struct spear_pcie_gadget_config *config,
64 const char *buf,
65 size_t count);
66};
67
68static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
69{
70 /* Enable DBI access */
71 writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
72 &app_reg->slv_armisc);
73 writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
74 &app_reg->slv_awmisc);
75
76}
77
78static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
79{
80 /* disable DBI access */
81 writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
82 &app_reg->slv_armisc);
83 writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
84 &app_reg->slv_awmisc);
85
86}
87
88static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
89 int where, int size, u32 *val)
90{
91 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
92 ulong va_address;
93
94 /* Enable DBI access */
95 enable_dbi_access(app_reg);
96
97 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
98
99 *val = readl(va_address);
100
101 if (size == 1)
102 *val = (*val >> (8 * (where & 3))) & 0xff;
103 else if (size == 2)
104 *val = (*val >> (8 * (where & 3))) & 0xffff;
105
106 /* Disable DBI access */
107 disable_dbi_access(app_reg);
108}
109
110static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
111 int where, int size, u32 val)
112{
113 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
114 ulong va_address;
115
116 /* Enable DBI access */
117 enable_dbi_access(app_reg);
118
119 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
120
121 if (size == 4)
122 writel(val, va_address);
123 else if (size == 2)
124 writew(val, va_address + (where & 2));
125 else if (size == 1)
126 writeb(val, va_address + (where & 3));
127
128 /* Disable DBI access */
129 disable_dbi_access(app_reg);
130}
131
132#define PCI_FIND_CAP_TTL 48
133
134static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
135 u32 pos, int cap, int *ttl)
136{
137 u32 id;
138
139 while ((*ttl)--) {
140 spear_dbi_read_reg(config, pos, 1, &pos);
141 if (pos < 0x40)
142 break;
143 pos &= ~3;
144 spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
145 if (id == 0xff)
146 break;
147 if (id == cap)
148 return pos;
149 pos += PCI_CAP_LIST_NEXT;
150 }
151 return 0;
152}
153
154static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
155 u32 pos, int cap)
156{
157 int ttl = PCI_FIND_CAP_TTL;
158
159 return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
160}
161
162static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
163 u8 hdr_type)
164{
165 u32 status;
166
167 spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
168 if (!(status & PCI_STATUS_CAP_LIST))
169 return 0;
170
171 switch (hdr_type) {
172 case PCI_HEADER_TYPE_NORMAL:
173 case PCI_HEADER_TYPE_BRIDGE:
174 return PCI_CAPABILITY_LIST;
175 case PCI_HEADER_TYPE_CARDBUS:
176 return PCI_CB_CAPABILITY_LIST;
177 default:
178 return 0;
179 }
180
181 return 0;
182}
183
184/*
185 * Tell if a device supports a given PCI capability.
186 * Returns the address of the requested capability structure within the
187 * device's PCI configuration space or 0 in case the device does not
188 * support it. Possible values for @cap:
189 *
190 * %PCI_CAP_ID_PM Power Management
191 * %PCI_CAP_ID_AGP Accelerated Graphics Port
192 * %PCI_CAP_ID_VPD Vital Product Data
193 * %PCI_CAP_ID_SLOTID Slot Identification
194 * %PCI_CAP_ID_MSI Message Signalled Interrupts
195 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
196 * %PCI_CAP_ID_PCIX PCI-X
197 * %PCI_CAP_ID_EXP PCI Express
198 */
199static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
200 int cap)
201{
202 u32 pos;
203 u32 hdr_type;
204
205 spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
206
207 pos = pci_find_own_cap_start(config, hdr_type);
208 if (pos)
209 pos = pci_find_own_next_cap(config, pos, cap);
210
211 return pos;
212}
213
214static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
215{
216 return 0;
217}
218
219/*
220 * configfs interfaces show/store functions
221 */
222static ssize_t pcie_gadget_show_link(
223 struct spear_pcie_gadget_config *config,
224 char *buf)
225{
226 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
227
228 if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
229 return sprintf(buf, "UP");
230 else
231 return sprintf(buf, "DOWN");
232}
233
234static ssize_t pcie_gadget_store_link(
235 struct spear_pcie_gadget_config *config,
236 const char *buf, size_t count)
237{
238 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
239
240 if (sysfs_streq(buf, "UP"))
241 writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
242 &app_reg->app_ctrl_0);
243 else if (sysfs_streq(buf, "DOWN"))
244 writel(readl(&app_reg->app_ctrl_0)
245 & ~(1 << APP_LTSSM_ENABLE_ID),
246 &app_reg->app_ctrl_0);
247 else
248 return -EINVAL;
249 return count;
250}
251
252static ssize_t pcie_gadget_show_int_type(
253 struct spear_pcie_gadget_config *config,
254 char *buf)
255{
256 return sprintf(buf, "%s", config->int_type);
257}
258
259static ssize_t pcie_gadget_store_int_type(
260 struct spear_pcie_gadget_config *config,
261 const char *buf, size_t count)
262{
263 u32 cap, vec, flags;
264 ulong vector;
265
266 if (sysfs_streq(buf, "INTA"))
267 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
268
269 else if (sysfs_streq(buf, "MSI")) {
270 vector = config->requested_msi;
271 vec = 0;
272 while (vector > 1) {
273 vector /= 2;
274 vec++;
275 }
276 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
277 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
278 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
279 flags &= ~PCI_MSI_FLAGS_QMASK;
280 flags |= vec << 1;
281 spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
282 } else
283 return -EINVAL;
284
285 strcpy(config->int_type, buf);
286
287 return count;
288}
289
290static ssize_t pcie_gadget_show_no_of_msi(
291 struct spear_pcie_gadget_config *config,
292 char *buf)
293{
294 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
295 u32 cap, vec, flags;
296 ulong vector;
297
298 if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
299 != (1 << CFG_MSI_EN_ID))
300 vector = 0;
301 else {
302 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
303 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
304 flags &= ~PCI_MSI_FLAGS_QSIZE;
305 vec = flags >> 4;
306 vector = 1;
307 while (vec--)
308 vector *= 2;
309 }
310 config->configured_msi = vector;
311
312 return sprintf(buf, "%lu", vector);
313}
314
315static ssize_t pcie_gadget_store_no_of_msi(
316 struct spear_pcie_gadget_config *config,
317 const char *buf, size_t count)
318{
319 if (strict_strtoul(buf, 0, &config->requested_msi))
320 return -EINVAL;
321 if (config->requested_msi > 32)
322 config->requested_msi = 32;
323
324 return count;
325}
326
327static ssize_t pcie_gadget_store_inta(
328 struct spear_pcie_gadget_config *config,
329 const char *buf, size_t count)
330{
331 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
332 ulong en;
333
334 if (strict_strtoul(buf, 0, &en))
335 return -EINVAL;
336
337 if (en)
338 writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
339 &app_reg->app_ctrl_0);
340 else
341 writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
342 &app_reg->app_ctrl_0);
343
344 return count;
345}
346
347static ssize_t pcie_gadget_store_send_msi(
348 struct spear_pcie_gadget_config *config,
349 const char *buf, size_t count)
350{
351 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
352 ulong vector;
353 u32 ven_msi;
354
355 if (strict_strtoul(buf, 0, &vector))
356 return -EINVAL;
357
358 if (!config->configured_msi)
359 return -EINVAL;
360
361 if (vector >= config->configured_msi)
362 return -EINVAL;
363
364 ven_msi = readl(&app_reg->ven_msi_1);
365 ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
366 ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
367 ven_msi &= ~VEN_MSI_TC_MASK;
368 ven_msi |= 0 << VEN_MSI_TC_ID;
369 ven_msi &= ~VEN_MSI_VECTOR_MASK;
370 ven_msi |= vector << VEN_MSI_VECTOR_ID;
371
372 /* generating interrupt for msi vector */
373 ven_msi |= VEN_MSI_REQ_EN;
374 writel(ven_msi, &app_reg->ven_msi_1);
375 udelay(1);
376 ven_msi &= ~VEN_MSI_REQ_EN;
377 writel(ven_msi, &app_reg->ven_msi_1);
378
379 return count;
380}
381
382static ssize_t pcie_gadget_show_vendor_id(
383 struct spear_pcie_gadget_config *config,
384 char *buf)
385{
386 u32 id;
387
388 spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
389
390 return sprintf(buf, "%x", id);
391}
392
393static ssize_t pcie_gadget_store_vendor_id(
394 struct spear_pcie_gadget_config *config,
395 const char *buf, size_t count)
396{
397 ulong id;
398
399 if (strict_strtoul(buf, 0, &id))
400 return -EINVAL;
401
402 spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
403
404 return count;
405}
406
407static ssize_t pcie_gadget_show_device_id(
408 struct spear_pcie_gadget_config *config,
409 char *buf)
410{
411 u32 id;
412
413 spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
414
415 return sprintf(buf, "%x", id);
416}
417
418static ssize_t pcie_gadget_store_device_id(
419 struct spear_pcie_gadget_config *config,
420 const char *buf, size_t count)
421{
422 ulong id;
423
424 if (strict_strtoul(buf, 0, &id))
425 return -EINVAL;
426
427 spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
428
429 return count;
430}
431
432static ssize_t pcie_gadget_show_bar0_size(
433 struct spear_pcie_gadget_config *config,
434 char *buf)
435{
436 return sprintf(buf, "%lx", config->bar0_size);
437}
438
439static ssize_t pcie_gadget_store_bar0_size(
440 struct spear_pcie_gadget_config *config,
441 const char *buf, size_t count)
442{
443 ulong size;
444 u32 pos, pos1;
445 u32 no_of_bit = 0;
446
447 if (strict_strtoul(buf, 0, &size))
448 return -EINVAL;
449 /* min bar size is 256 */
450 if (size <= 0x100)
451 size = 0x100;
452 /* max bar size is 1MB*/
453 else if (size >= 0x100000)
454 size = 0x100000;
455 else {
456 pos = 0;
457 pos1 = 0;
458 while (pos < 21) {
459 pos = find_next_bit((ulong *)&size, 21, pos);
460 if (pos != 21)
461 pos1 = pos + 1;
462 pos++;
463 no_of_bit++;
464 }
465 if (no_of_bit == 2)
466 pos1--;
467
468 size = 1 << pos1;
469 }
470 config->bar0_size = size;
471 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
472
473 return count;
474}
475
476static ssize_t pcie_gadget_show_bar0_address(
477 struct spear_pcie_gadget_config *config,
478 char *buf)
479{
480 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
481
482 u32 address = readl(&app_reg->pim0_mem_addr_start);
483
484 return sprintf(buf, "%x", address);
485}
486
487static ssize_t pcie_gadget_store_bar0_address(
488 struct spear_pcie_gadget_config *config,
489 const char *buf, size_t count)
490{
491 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
492 ulong address;
493
494 if (strict_strtoul(buf, 0, &address))
495 return -EINVAL;
496
497 address &= ~(config->bar0_size - 1);
498 if (config->va_bar0_address)
499 iounmap(config->va_bar0_address);
500 config->va_bar0_address = ioremap(address, config->bar0_size);
501 if (!config->va_bar0_address)
502 return -ENOMEM;
503
504 writel(address, &app_reg->pim0_mem_addr_start);
505
506 return count;
507}
508
509static ssize_t pcie_gadget_show_bar0_rw_offset(
510 struct spear_pcie_gadget_config *config,
511 char *buf)
512{
513 return sprintf(buf, "%lx", config->bar0_rw_offset);
514}
515
516static ssize_t pcie_gadget_store_bar0_rw_offset(
517 struct spear_pcie_gadget_config *config,
518 const char *buf, size_t count)
519{
520 ulong offset;
521
522 if (strict_strtoul(buf, 0, &offset))
523 return -EINVAL;
524
525 if (offset % 4)
526 return -EINVAL;
527
528 config->bar0_rw_offset = offset;
529
530 return count;
531}
532
533static ssize_t pcie_gadget_show_bar0_data(
534 struct spear_pcie_gadget_config *config,
535 char *buf)
536{
537 ulong data;
538
539 if (!config->va_bar0_address)
540 return -ENOMEM;
541
542 data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
543
544 return sprintf(buf, "%lx", data);
545}
546
547static ssize_t pcie_gadget_store_bar0_data(
548 struct spear_pcie_gadget_config *config,
549 const char *buf, size_t count)
550{
551 ulong data;
552
553 if (strict_strtoul(buf, 0, &data))
554 return -EINVAL;
555
556 if (!config->va_bar0_address)
557 return -ENOMEM;
558
559 writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
560
561 return count;
562}
563
564/*
565 * Attribute definitions.
566 */
567
568#define PCIE_GADGET_TARGET_ATTR_RO(_name) \
569static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
570 __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
571
572#define PCIE_GADGET_TARGET_ATTR_WO(_name) \
573static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
574 __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
575
576#define PCIE_GADGET_TARGET_ATTR_RW(_name) \
577static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
578 __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
579 pcie_gadget_store_##_name)
580PCIE_GADGET_TARGET_ATTR_RW(link);
581PCIE_GADGET_TARGET_ATTR_RW(int_type);
582PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
583PCIE_GADGET_TARGET_ATTR_WO(inta);
584PCIE_GADGET_TARGET_ATTR_WO(send_msi);
585PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
586PCIE_GADGET_TARGET_ATTR_RW(device_id);
587PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
588PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
589PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
590PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
591
592static struct configfs_attribute *pcie_gadget_target_attrs[] = {
593 &pcie_gadget_target_link.attr,
594 &pcie_gadget_target_int_type.attr,
595 &pcie_gadget_target_no_of_msi.attr,
596 &pcie_gadget_target_inta.attr,
597 &pcie_gadget_target_send_msi.attr,
598 &pcie_gadget_target_vendor_id.attr,
599 &pcie_gadget_target_device_id.attr,
600 &pcie_gadget_target_bar0_size.attr,
601 &pcie_gadget_target_bar0_address.attr,
602 &pcie_gadget_target_bar0_rw_offset.attr,
603 &pcie_gadget_target_bar0_data.attr,
604 NULL,
605};
606
607static struct pcie_gadget_target *to_target(struct config_item *item)
608{
609 return item ?
610 container_of(to_configfs_subsystem(to_config_group(item)),
611 struct pcie_gadget_target, subsys) : NULL;
612}
613
614/*
615 * Item operations and type for pcie_gadget_target.
616 */
617
618static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
619 struct configfs_attribute *attr,
620 char *buf)
621{
622 ssize_t ret = -EINVAL;
623 struct pcie_gadget_target *target = to_target(item);
624 struct pcie_gadget_target_attr *t_attr =
625 container_of(attr, struct pcie_gadget_target_attr, attr);
626
627 if (t_attr->show)
628 ret = t_attr->show(&target->config, buf);
629 return ret;
630}
631
632static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
633 struct configfs_attribute *attr,
634 const char *buf,
635 size_t count)
636{
637 ssize_t ret = -EINVAL;
638 struct pcie_gadget_target *target = to_target(item);
639 struct pcie_gadget_target_attr *t_attr =
640 container_of(attr, struct pcie_gadget_target_attr, attr);
641
642 if (t_attr->store)
643 ret = t_attr->store(&target->config, buf, count);
644 return ret;
645}
646
647static struct configfs_item_operations pcie_gadget_target_item_ops = {
648 .show_attribute = pcie_gadget_target_attr_show,
649 .store_attribute = pcie_gadget_target_attr_store,
650};
651
652static struct config_item_type pcie_gadget_target_type = {
653 .ct_attrs = pcie_gadget_target_attrs,
654 .ct_item_ops = &pcie_gadget_target_item_ops,
655 .ct_owner = THIS_MODULE,
656};
657
658static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
659{
660 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
661
662 /*setup registers for outbound translation */
663
664 writel(config->base, &app_reg->in0_mem_addr_start);
665 writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
666 &app_reg->in0_mem_addr_limit);
667 writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
668 writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
669 &app_reg->in1_mem_addr_limit);
670 writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
671 writel(app_reg->in_io_addr_start + IN_IO_SIZE,
672 &app_reg->in_io_addr_limit);
673 writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
674 writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
675 &app_reg->in_cfg0_addr_limit);
676 writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
677 writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
678 &app_reg->in_cfg1_addr_limit);
679 writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
680 writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
681 &app_reg->in_msg_addr_limit);
682
683 writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
684 writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
685 writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
686
687 /*setup registers for inbound translation */
688
689 /* Keep AORAM mapped at BAR0 as default */
690 config->bar0_size = INBOUND_ADDR_MASK + 1;
691 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
692 spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
693 config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
694 config->bar0_size);
695
696 writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
697 writel(0, &app_reg->pim1_mem_addr_start);
698 writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
699
700 writel(0x0, &app_reg->pim_io_addr_start);
701 writel(0x0, &app_reg->pim_io_addr_start);
702 writel(0x0, &app_reg->pim_rom_addr_start);
703
704 writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
705 | ((u32)1 << REG_TRANSLATION_ENABLE),
706 &app_reg->app_ctrl_0);
707 /* disable all rx interrupts */
708 writel(0, &app_reg->int_mask);
709
710 /* Select INTA as default*/
711 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
712}
713
714static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
715{
716 struct resource *res0, *res1;
717 unsigned int status = 0;
718 int irq;
719 struct clk *clk;
720 static struct pcie_gadget_target *target;
721 struct spear_pcie_gadget_config *config;
722 struct config_item *cg_item;
723 struct configfs_subsystem *subsys;
724
725 /* get resource for application registers*/
726
727 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
728 if (!res0) {
729 dev_err(&pdev->dev, "no resource defined\n");
730 return -EBUSY;
731 }
732 if (!request_mem_region(res0->start, resource_size(res0),
733 pdev->name)) {
734 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
735 return -EBUSY;
736 }
737 /* get resource for dbi registers*/
738
739 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
740 if (!res1) {
741 dev_err(&pdev->dev, "no resource defined\n");
742 goto err_rel_res0;
743 }
744 if (!request_mem_region(res1->start, resource_size(res1),
745 pdev->name)) {
746 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
747 goto err_rel_res0;
748 }
749
750 target = kzalloc(sizeof(*target), GFP_KERNEL);
751 if (!target) {
752 dev_err(&pdev->dev, "out of memory\n");
753 status = -ENOMEM;
754 goto err_rel_res;
755 }
756
757 cg_item = &target->subsys.su_group.cg_item;
758 sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
759 cg_item->ci_type = &pcie_gadget_target_type;
760 config = &target->config;
761 config->va_app_base = (void __iomem *)ioremap(res0->start,
762 resource_size(res0));
763 if (!config->va_app_base) {
764 dev_err(&pdev->dev, "ioremap fail\n");
765 status = -ENOMEM;
766 goto err_kzalloc;
767 }
768
769 config->base = (void __iomem *)res1->start;
770
771 config->va_dbi_base = (void __iomem *)ioremap(res1->start,
772 resource_size(res1));
773 if (!config->va_dbi_base) {
774 dev_err(&pdev->dev, "ioremap fail\n");
775 status = -ENOMEM;
776 goto err_iounmap_app;
777 }
778
779 dev_set_drvdata(&pdev->dev, target);
780
781 irq = platform_get_irq(pdev, 0);
782 if (irq < 0) {
783 dev_err(&pdev->dev, "no update irq?\n");
784 status = irq;
785 goto err_iounmap;
786 }
787
788 status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
789 if (status) {
790 dev_err(&pdev->dev, "pcie gadget interrupt IRQ%d already \
791 claimed\n", irq);
792 goto err_iounmap;
793 }
794
795 /* Register configfs hooks */
796 subsys = &target->subsys;
797 config_group_init(&subsys->su_group);
798 mutex_init(&subsys->su_mutex);
799 status = configfs_register_subsystem(subsys);
800 if (status)
801 goto err_irq;
802
803 /*
804 * init basic pcie application registers
805 * do not enable clock if it is PCIE0.Ideally , all controller should
806 * have been independent from others with respect to clock. But PCIE1
807 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
808 */
809 if (pdev->id == 1) {
810 /*
811 * Ideally CFG Clock should have been also enabled here. But
812 * it is done currently during board init routne
813 */
814 clk = clk_get_sys("pcie1", NULL);
815 if (IS_ERR(clk)) {
816 pr_err("%s:couldn't get clk for pcie1\n", __func__);
817 goto err_irq;
818 }
819 if (clk_enable(clk)) {
820 pr_err("%s:couldn't enable clk for pcie1\n", __func__);
821 goto err_irq;
822 }
823 } else if (pdev->id == 2) {
824 /*
825 * Ideally CFG Clock should have been also enabled here. But
826 * it is done currently during board init routne
827 */
828 clk = clk_get_sys("pcie2", NULL);
829 if (IS_ERR(clk)) {
830 pr_err("%s:couldn't get clk for pcie2\n", __func__);
831 goto err_irq;
832 }
833 if (clk_enable(clk)) {
834 pr_err("%s:couldn't enable clk for pcie2\n", __func__);
835 goto err_irq;
836 }
837 }
838 spear13xx_pcie_device_init(config);
839
840 return 0;
841err_irq:
842 free_irq(irq, NULL);
843err_iounmap:
844 iounmap(config->va_dbi_base);
845err_iounmap_app:
846 iounmap(config->va_app_base);
847err_kzalloc:
848 kfree(config);
849err_rel_res:
850 release_mem_region(res1->start, resource_size(res1));
851err_rel_res0:
852 release_mem_region(res0->start, resource_size(res0));
853 return status;
854}
855
856static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
857{
858 struct resource *res0, *res1;
859 static struct pcie_gadget_target *target;
860 struct spear_pcie_gadget_config *config;
861 int irq;
862
863 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
865 irq = platform_get_irq(pdev, 0);
866 target = dev_get_drvdata(&pdev->dev);
867 config = &target->config;
868
869 free_irq(irq, NULL);
870 iounmap(config->va_dbi_base);
871 iounmap(config->va_app_base);
872 release_mem_region(res1->start, resource_size(res1));
873 release_mem_region(res0->start, resource_size(res0));
874 configfs_unregister_subsystem(&target->subsys);
875 kfree(target);
876
877 return 0;
878}
879
880static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
881{
882}
883
884static struct platform_driver spear_pcie_gadget_driver = {
885 .probe = spear_pcie_gadget_probe,
886 .remove = spear_pcie_gadget_remove,
887 .shutdown = spear_pcie_gadget_shutdown,
888 .driver = {
889 .name = "pcie-gadget-spear",
890 .bus = &platform_bus_type
891 },
892};
893
894static int __init spear_pcie_gadget_init(void)
895{
896 return platform_driver_register(&spear_pcie_gadget_driver);
897}
898module_init(spear_pcie_gadget_init);
899
900static void __exit spear_pcie_gadget_exit(void)
901{
902 platform_driver_unregister(&spear_pcie_gadget_driver);
903}
904module_exit(spear_pcie_gadget_exit);
905
906MODULE_ALIAS("pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 379d2ffe4c87..2e032f0e8cf4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1417,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1417 if (res == NULL || irq < 0) 1417 if (res == NULL || irq < 0)
1418 return -ENXIO; 1418 return -ENXIO;
1419 1419
1420 res = request_mem_region(res->start, res->end - res->start + 1, 1420 res = request_mem_region(res->start, resource_size(res),
1421 pdev->name); 1421 pdev->name);
1422 if (res == NULL) 1422 if (res == NULL)
1423 return -EBUSY; 1423 return -EBUSY;
@@ -1457,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1457 1457
1458 host->irq = irq; 1458 host->irq = irq;
1459 host->phys_base = host->mem_res->start; 1459 host->phys_base = host->mem_res->start;
1460 host->virt_base = ioremap(res->start, res->end - res->start + 1); 1460 host->virt_base = ioremap(res->start, resource_size(res));
1461 if (!host->virt_base) 1461 if (!host->virt_base)
1462 goto err_ioremap; 1462 goto err_ioremap;
1463 1463
@@ -1514,7 +1514,7 @@ err_free_mmc_host:
1514err_ioremap: 1514err_ioremap:
1515 kfree(host); 1515 kfree(host);
1516err_free_mem_region: 1516err_free_mem_region:
1517 release_mem_region(res->start, res->end - res->start + 1); 1517 release_mem_region(res->start, resource_size(res));
1518 return ret; 1518 return ret;
1519} 1519}
1520 1520
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 158c0ee53b2c..259ece047afc 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2047,8 +2047,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2047 2047
2048 res->start += pdata->reg_offset; 2048 res->start += pdata->reg_offset;
2049 res->end += pdata->reg_offset; 2049 res->end += pdata->reg_offset;
2050 res = request_mem_region(res->start, res->end - res->start + 1, 2050 res = request_mem_region(res->start, resource_size(res), pdev->name);
2051 pdev->name);
2052 if (res == NULL) 2051 if (res == NULL)
2053 return -EBUSY; 2052 return -EBUSY;
2054 2053
@@ -2287,7 +2286,7 @@ err1:
2287err_alloc: 2286err_alloc:
2288 omap_hsmmc_gpio_free(pdata); 2287 omap_hsmmc_gpio_free(pdata);
2289err: 2288err:
2290 release_mem_region(res->start, res->end - res->start + 1); 2289 release_mem_region(res->start, resource_size(res));
2291 return ret; 2290 return ret;
2292} 2291}
2293 2292
@@ -2324,7 +2323,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2324 2323
2325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2326 if (res) 2325 if (res)
2327 release_mem_region(res->start, res->end - res->start + 1); 2326 release_mem_region(res->start, resource_size(res));
2328 platform_set_drvdata(pdev, NULL); 2327 platform_set_drvdata(pdev, NULL);
2329 2328
2330 return 0; 2329 return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1a6e9eb7af43..338bea147c64 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2130,7 +2130,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2130} 2130}
2131 2131
2132/* 2132/*
2133* First release a slave and than destroy the bond if no more slaves are left. 2133* First release a slave and then destroy the bond if no more slaves are left.
2134* Must be under rtnl_lock when this function is called. 2134* Must be under rtnl_lock when this function is called.
2135*/ 2135*/
2136static int bond_release_and_destroy(struct net_device *bond_dev, 2136static int bond_release_and_destroy(struct net_device *bond_dev,
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 14050786218a..110eda01843c 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -633,9 +633,6 @@ static void c_can_start(struct net_device *dev)
633{ 633{
634 struct c_can_priv *priv = netdev_priv(dev); 634 struct c_can_priv *priv = netdev_priv(dev);
635 635
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */ 636 /* basic c_can configuration */
640 c_can_chip_config(dev); 637 c_can_chip_config(dev);
641 638
@@ -643,6 +640,9 @@ static void c_can_start(struct net_device *dev)
643 640
644 /* reset tx helper pointers */ 641 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0; 642 priv->tx_next = priv->tx_echo = 0;
643
644 /* enable status change, error and module interrupts */
645 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
646} 646}
647 647
648static void c_can_stop(struct net_device *dev) 648static void c_can_stop(struct net_device *dev)
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
index 1d6f4b8d393a..a31661948c42 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ftmac100.c
@@ -1102,7 +1102,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1102 goto err_req_mem; 1102 goto err_req_mem;
1103 } 1103 }
1104 1104
1105 priv->base = ioremap(res->start, res->end - res->start); 1105 priv->base = ioremap(res->start, resource_size(res));
1106 if (!priv->base) { 1106 if (!priv->base) {
1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1108 err = -EIO; 1108 err = -EIO;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ccb231c4d933..2a0ad9a501bb 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -949,6 +949,11 @@ static void gfar_detect_errata(struct gfar_private *priv)
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 950 priv->errata |= GFAR_ERRATA_A002;
951 951
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12;
956
952 if (priv->errata) 957 if (priv->errata)
953 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
954 priv->errata); 959 priv->errata);
@@ -2154,8 +2159,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2154 /* Set up checksumming */ 2159 /* Set up checksumming */
2155 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2160 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2156 fcb = gfar_add_fcb(skb); 2161 fcb = gfar_add_fcb(skb);
2157 lstatus |= BD_LFLAG(TXBD_TOE); 2162 /* as specified by errata */
2158 gfar_tx_checksum(skb, fcb); 2163 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2164 && ((unsigned long)fcb % 0x20) > 0x18)) {
2165 __skb_pull(skb, GMAC_FCB_LEN);
2166 skb_checksum_help(skb);
2167 } else {
2168 lstatus |= BD_LFLAG(TXBD_TOE);
2169 gfar_tx_checksum(skb, fcb);
2170 }
2159 } 2171 }
2160 2172
2161 if (vlan_tx_tag_present(skb)) { 2173 if (vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 54de4135e932..ec5d595ce2e2 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1039,6 +1039,7 @@ enum gfar_errata {
1039 GFAR_ERRATA_74 = 0x01, 1039 GFAR_ERRATA_74 = 0x01,
1040 GFAR_ERRATA_76 = 0x02, 1040 GFAR_ERRATA_76 = 0x02,
1041 GFAR_ERRATA_A002 = 0x04, 1041 GFAR_ERRATA_A002 = 0x04,
1042 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1043};
1043 1044
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1045/* Struct stolen almost completely (and shamelessly) from the FCC enet source
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 5b37d3c191e4..78e34e9e4f00 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,8 +39,11 @@ struct macvlan_port {
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru; 41 bool passthru;
42 int count;
42}; 43};
43 44
45static void macvlan_port_destroy(struct net_device *dev);
46
44#define macvlan_port_get_rcu(dev) \ 47#define macvlan_port_get_rcu(dev) \
45 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 48 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
46#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 49#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
@@ -457,8 +460,13 @@ static int macvlan_init(struct net_device *dev)
457static void macvlan_uninit(struct net_device *dev) 460static void macvlan_uninit(struct net_device *dev)
458{ 461{
459 struct macvlan_dev *vlan = netdev_priv(dev); 462 struct macvlan_dev *vlan = netdev_priv(dev);
463 struct macvlan_port *port = vlan->port;
460 464
461 free_percpu(vlan->pcpu_stats); 465 free_percpu(vlan->pcpu_stats);
466
467 port->count -= 1;
468 if (!port->count)
469 macvlan_port_destroy(port->dev);
462} 470}
463 471
464static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 472static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -691,12 +699,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
691 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 699 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
692 700
693 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 701 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
694 if (!list_empty(&port->vlans)) 702 if (port->count)
695 return -EINVAL; 703 return -EINVAL;
696 port->passthru = true; 704 port->passthru = true;
697 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); 705 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
698 } 706 }
699 707
708 port->count += 1;
700 err = register_netdevice(dev); 709 err = register_netdevice(dev);
701 if (err < 0) 710 if (err < 0)
702 goto destroy_port; 711 goto destroy_port;
@@ -707,7 +716,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
707 return 0; 716 return 0;
708 717
709destroy_port: 718destroy_port:
710 if (list_empty(&port->vlans)) 719 port->count -= 1;
720 if (!port->count)
711 macvlan_port_destroy(lowerdev); 721 macvlan_port_destroy(lowerdev);
712 722
713 return err; 723 return err;
@@ -725,13 +735,9 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
725void macvlan_dellink(struct net_device *dev, struct list_head *head) 735void macvlan_dellink(struct net_device *dev, struct list_head *head)
726{ 736{
727 struct macvlan_dev *vlan = netdev_priv(dev); 737 struct macvlan_dev *vlan = netdev_priv(dev);
728 struct macvlan_port *port = vlan->port;
729 738
730 list_del(&vlan->list); 739 list_del(&vlan->list);
731 unregister_netdevice_queue(dev, head); 740 unregister_netdevice_queue(dev, head);
732
733 if (list_empty(&port->vlans))
734 macvlan_port_destroy(port->dev);
735} 741}
736EXPORT_SYMBOL_GPL(macvlan_dellink); 742EXPORT_SYMBOL_GPL(macvlan_dellink);
737 743
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 40fa59e2fd5c..32678b6c6b39 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9501,7 +9501,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9501 struct niu_parent *p; 9501 struct niu_parent *p;
9502 int i; 9502 int i;
9503 9503
9504 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9504 plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9505 NULL, 0); 9505 NULL, 0);
9506 if (IS_ERR(plat_dev)) 9506 if (IS_ERR(plat_dev))
9507 return NULL; 9507 return NULL;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 43583309a65d..31e9407a0739 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -129,7 +129,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
129 129
130 state->strm.next_in = NULL; 130 state->strm.next_in = NULL;
131 state->w_size = w_size; 131 state->w_size = w_size;
132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); 132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
133 if (state->strm.workspace == NULL) 133 if (state->strm.workspace == NULL)
134 goto out_free; 134 goto out_free;
135 135
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 105d7f0630cc..2de9b90c5f8f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -171,7 +171,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
171 if (skb->ip_summed == CHECKSUM_NONE) 171 if (skb->ip_summed == CHECKSUM_NONE)
172 skb->ip_summed = rcv_priv->ip_summed; 172 skb->ip_summed = rcv_priv->ip_summed;
173 173
174 length = skb->len + ETH_HLEN; 174 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
176 goto rx_drop; 176 goto rx_drop;
177 177
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ad3d099bf5c1..c9784705f6ac 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1031,6 +1031,7 @@ static int __devinit acer_backlight_init(struct device *dev)
1031 struct backlight_device *bd; 1031 struct backlight_device *bd;
1032 1032
1033 memset(&props, 0, sizeof(struct backlight_properties)); 1033 memset(&props, 0, sizeof(struct backlight_properties));
1034 props.type = BACKLIGHT_PLATFORM;
1034 props.max_brightness = max_brightness; 1035 props.max_brightness = max_brightness;
1035 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, 1036 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
1036 &props); 1037 &props);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f3aa6a7fdab6..5a6f7d7575d6 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -667,6 +667,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
667 667
668 memset(&props, 0, sizeof(struct backlight_properties)); 668 memset(&props, 0, sizeof(struct backlight_properties));
669 props.max_brightness = 15; 669 props.max_brightness = 15;
670 props.type = BACKLIGHT_PLATFORM;
670 671
671 bd = backlight_device_register(ASUS_LAPTOP_FILE, 672 bd = backlight_device_register(ASUS_LAPTOP_FILE,
672 &asus->platform_device->dev, asus, 673 &asus->platform_device->dev, asus,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index fe495939c307..f503607c0645 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1507,6 +1507,7 @@ static int __init asus_acpi_init(void)
1507 } 1507 }
1508 1508
1509 memset(&props, 0, sizeof(struct backlight_properties)); 1509 memset(&props, 0, sizeof(struct backlight_properties));
1510 props.type = BACKLIGHT_PLATFORM;
1510 props.max_brightness = 15; 1511 props.max_brightness = 15;
1511 asus_backlight_device = backlight_device_register("asus", NULL, NULL, 1512 asus_backlight_device = backlight_device_register("asus", NULL, NULL,
1512 &asus_backlight_data, 1513 &asus_backlight_data,
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 911135425224..94f93b621d7b 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -564,6 +564,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
564 return -ENOMEM; 564 return -ENOMEM;
565 565
566 memset(&props, 0, sizeof(struct backlight_properties)); 566 memset(&props, 0, sizeof(struct backlight_properties));
567 props.type = BACKLIGHT_PLATFORM;
567 props.max_brightness = 7; 568 props.max_brightness = 7;
568 ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev, 569 ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev,
569 acpi->handle, &cmpc_bl_ops, 570 acpi->handle, &cmpc_bl_ops,
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 034572b980c9..eb95878fa583 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -970,6 +970,7 @@ static int __init compal_init(void)
970 if (!acpi_video_backlight_support()) { 970 if (!acpi_video_backlight_support()) {
971 struct backlight_properties props; 971 struct backlight_properties props;
972 memset(&props, 0, sizeof(struct backlight_properties)); 972 memset(&props, 0, sizeof(struct backlight_properties));
973 props.type = BACKLIGHT_PLATFORM;
973 props.max_brightness = BACKLIGHT_LEVEL_MAX; 974 props.max_brightness = BACKLIGHT_LEVEL_MAX;
974 compalbl_device = backlight_device_register(DRIVER_NAME, 975 compalbl_device = backlight_device_register(DRIVER_NAME,
975 NULL, NULL, 976 NULL, NULL,
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ad24ef36f9f7..de301aa8e5c3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -671,6 +671,7 @@ static int __init dell_init(void)
671 if (max_intensity) { 671 if (max_intensity) {
672 struct backlight_properties props; 672 struct backlight_properties props;
673 memset(&props, 0, sizeof(struct backlight_properties)); 673 memset(&props, 0, sizeof(struct backlight_properties));
674 props.type = BACKLIGHT_PLATFORM;
674 props.max_brightness = max_intensity; 675 props.max_brightness = max_intensity;
675 dell_backlight_device = backlight_device_register("dell_backlight", 676 dell_backlight_device = backlight_device_register("dell_backlight",
676 &platform_device->dev, 677 &platform_device->dev,
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 49d9ad708f89..6605beac0d0e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1147,6 +1147,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
1147 struct backlight_device *bd; 1147 struct backlight_device *bd;
1148 1148
1149 memset(&props, 0, sizeof(struct backlight_properties)); 1149 memset(&props, 0, sizeof(struct backlight_properties));
1150 props.type = BACKLIGHT_PLATFORM;
1150 props.max_brightness = 15; 1151 props.max_brightness = 15;
1151 bd = backlight_device_register(EEEPC_LAPTOP_FILE, 1152 bd = backlight_device_register(EEEPC_LAPTOP_FILE,
1152 &eeepc->platform_device->dev, eeepc, 1153 &eeepc->platform_device->dev, eeepc,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 95e3b0948e9c..493054c2dbe1 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1128,6 +1128,7 @@ static int __init fujitsu_init(void)
1128 1128
1129 memset(&props, 0, sizeof(struct backlight_properties)); 1129 memset(&props, 0, sizeof(struct backlight_properties));
1130 max_brightness = fujitsu->max_brightness; 1130 max_brightness = fujitsu->max_brightness;
1131 props.type = BACKLIGHT_PLATFORM;
1131 props.max_brightness = max_brightness - 1; 1132 props.max_brightness = max_brightness - 1;
1132 fujitsu->bl_device = backlight_device_register("fujitsu-laptop", 1133 fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
1133 NULL, NULL, 1134 NULL, NULL,
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 7e9bb6df9d39..142d38579314 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -804,6 +804,7 @@ static int __init msi_init(void)
804 } else { 804 } else {
805 struct backlight_properties props; 805 struct backlight_properties props;
806 memset(&props, 0, sizeof(struct backlight_properties)); 806 memset(&props, 0, sizeof(struct backlight_properties));
807 props.type = BACKLIGHT_PLATFORM;
807 props.max_brightness = MSI_LCD_LEVEL_MAX - 1; 808 props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
808 msibl_device = backlight_device_register("msi-laptop-bl", NULL, 809 msibl_device = backlight_device_register("msi-laptop-bl", NULL,
809 NULL, &msibl_ops, 810 NULL, &msibl_ops,
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 35278ad7e628..d5419c9ec07a 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -254,6 +254,7 @@ static int __init msi_wmi_init(void)
254 if (!acpi_video_backlight_support()) { 254 if (!acpi_video_backlight_support()) {
255 struct backlight_properties props; 255 struct backlight_properties props;
256 memset(&props, 0, sizeof(struct backlight_properties)); 256 memset(&props, 0, sizeof(struct backlight_properties));
257 props.type = BACKLIGHT_PLATFORM;
257 props.max_brightness = ARRAY_SIZE(backlight_map) - 1; 258 props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
258 backlight = backlight_device_register(DRV_NAME, NULL, NULL, 259 backlight = backlight_device_register(DRV_NAME, NULL, NULL,
259 &msi_backlight_ops, 260 &msi_backlight_ops,
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index cc1e0ba104d7..05be30ee158b 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -602,6 +602,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
602 } 602 }
603 /* initialize backlight */ 603 /* initialize backlight */
604 memset(&props, 0, sizeof(struct backlight_properties)); 604 memset(&props, 0, sizeof(struct backlight_properties));
605 props.type = BACKLIGHT_PLATFORM;
605 props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; 606 props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
606 pcc->backlight = backlight_device_register("panasonic", NULL, pcc, 607 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
607 &pcc_backlight_ops, &props); 608 &pcc_backlight_ops, &props);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5e83370b0812..13d8d63bcca9 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1305,8 +1305,9 @@ static int sony_nc_add(struct acpi_device *device)
1305 "controlled by ACPI video driver\n"); 1305 "controlled by ACPI video driver\n");
1306 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1306 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1307 &handle))) { 1307 &handle))) {
1308 struct backlight_properties props; 1308 struct backlight_properties props;
1309 memset(&props, 0, sizeof(struct backlight_properties)); 1309 memset(&props, 0, sizeof(struct backlight_properties));
1310 props.type = BACKLIGHT_PLATFORM;
1310 props.max_brightness = SONY_MAX_BRIGHTNESS - 1; 1311 props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
1311 sony_backlight_device = backlight_device_register("sony", NULL, 1312 sony_backlight_device = backlight_device_register("sony", NULL,
1312 NULL, 1313 NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb9922385ef8..947bdcaa0ce9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6307,6 +6307,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6307 return 1; 6307 return 1;
6308 6308
6309 memset(&props, 0, sizeof(struct backlight_properties)); 6309 memset(&props, 0, sizeof(struct backlight_properties));
6310 props.type = BACKLIGHT_PLATFORM;
6310 props.max_brightness = bright_maxlvl; 6311 props.max_brightness = bright_maxlvl;
6311 props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; 6312 props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
6312 ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME, 6313 ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 209cced786c6..63f42a22e102 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1018,6 +1018,7 @@ static int __init toshiba_acpi_init(void)
1018 create_toshiba_proc_entries(); 1018 create_toshiba_proc_entries();
1019 } 1019 }
1020 1020
1021 props.type = BACKLIGHT_PLATFORM;
1021 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 1022 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1022 toshiba_backlight_device = backlight_device_register("toshiba", 1023 toshiba_backlight_device = backlight_device_register("toshiba",
1023 &toshiba_acpi.p_dev->dev, 1024 &toshiba_acpi.p_dev->dev,
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 19bc73695475..fa4e0a5db3f8 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -142,7 +142,9 @@ void __pnp_remove_device(struct pnp_dev *dev);
142int pnp_check_port(struct pnp_dev *dev, struct resource *res); 142int pnp_check_port(struct pnp_dev *dev, struct resource *res);
143int pnp_check_mem(struct pnp_dev *dev, struct resource *res); 143int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
144int pnp_check_irq(struct pnp_dev *dev, struct resource *res); 144int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
145#ifdef CONFIG_ISA_DMA_API
145int pnp_check_dma(struct pnp_dev *dev, struct resource *res); 146int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
147#endif
146 148
147char *pnp_resource_type_name(struct resource *res); 149char *pnp_resource_type_name(struct resource *res);
148void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc); 150void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 0a15664eef1c..ed9ce507149a 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -171,6 +171,7 @@ __add:
171 return 0; 171 return 0;
172} 172}
173 173
174#ifdef CONFIG_ISA_DMA_API
174static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 175static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
175{ 176{
176 struct resource *res, local_res; 177 struct resource *res, local_res;
@@ -210,6 +211,7 @@ __add:
210 pnp_add_dma_resource(dev, res->start, res->flags); 211 pnp_add_dma_resource(dev, res->start, res->flags);
211 return 0; 212 return 0;
212} 213}
214#endif /* CONFIG_ISA_DMA_API */
213 215
214void pnp_init_resources(struct pnp_dev *dev) 216void pnp_init_resources(struct pnp_dev *dev)
215{ 217{
@@ -234,7 +236,8 @@ static void pnp_clean_resource_table(struct pnp_dev *dev)
234static int pnp_assign_resources(struct pnp_dev *dev, int set) 236static int pnp_assign_resources(struct pnp_dev *dev, int set)
235{ 237{
236 struct pnp_option *option; 238 struct pnp_option *option;
237 int nport = 0, nmem = 0, nirq = 0, ndma = 0; 239 int nport = 0, nmem = 0, nirq = 0;
240 int ndma __maybe_unused = 0;
238 int ret = 0; 241 int ret = 0;
239 242
240 pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set); 243 pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
@@ -256,9 +259,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
256 case IORESOURCE_IRQ: 259 case IORESOURCE_IRQ:
257 ret = pnp_assign_irq(dev, &option->u.irq, nirq++); 260 ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
258 break; 261 break;
262#ifdef CONFIG_ISA_DMA_API
259 case IORESOURCE_DMA: 263 case IORESOURCE_DMA:
260 ret = pnp_assign_dma(dev, &option->u.dma, ndma++); 264 ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
261 break; 265 break;
266#endif
262 default: 267 default:
263 ret = -EINVAL; 268 ret = -EINVAL;
264 break; 269 break;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index a925e6b63d72..b0ecacbe53b1 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -409,9 +409,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
409 return 1; 409 return 1;
410} 410}
411 411
412#ifdef CONFIG_ISA_DMA_API
412int pnp_check_dma(struct pnp_dev *dev, struct resource *res) 413int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
413{ 414{
414#ifndef CONFIG_IA64
415 int i; 415 int i;
416 struct pnp_dev *tdev; 416 struct pnp_dev *tdev;
417 struct resource *tres; 417 struct resource *tres;
@@ -466,11 +466,8 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
466 } 466 }
467 467
468 return 1; 468 return 1;
469#else
470 /* IA64 does not have legacy DMA */
471 return 0;
472#endif
473} 469}
470#endif /* CONFIG_ISA_DMA_API */
474 471
475unsigned long pnp_resource_type(struct resource *res) 472unsigned long pnp_resource_type(struct resource *res)
476{ 473{
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index b93af3ebb5ba..dcd39fba6ddd 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -216,11 +216,6 @@ static void parport_attach(struct parport *port)
216 216
217 hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 217 hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
218 device.timer.function = hrtimer_event; 218 device.timer.function = hrtimer_event;
219#ifdef CONFIG_PREEMPT_RT
220 /* hrtimer interrupt will run in the interrupt context with this */
221 device.timer.irqsafe = 1;
222#endif
223
224 hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS); 219 hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
225 220
226 return; 221 return;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4941cade319f..e1878877399c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -985,4 +985,14 @@ config RTC_DRV_LPC32XX
985 This driver can also be buillt as a module. If so, the module 985 This driver can also be buillt as a module. If so, the module
986 will be called rtc-lpc32xx. 986 will be called rtc-lpc32xx.
987 987
988config RTC_DRV_TEGRA
989 tristate "NVIDIA Tegra Internal RTC driver"
990 depends on RTC_CLASS && ARCH_TEGRA
991 help
992 If you say yes here you get support for the
993 Tegra 200 series internal RTC module.
994
995 This drive can also be built as a module. If so, the module
996 will be called rtc-tegra.
997
988endif # RTC_CLASS 998endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5f6c3838dcf6..ca91c3c42e98 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
91obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 91obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
92obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o 92obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o 93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
94obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
94obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 95obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
95obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o 96obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
96obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o 97obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index d834a63ec4b0..e6e71deb188f 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -25,6 +25,7 @@
25#include <linux/bcd.h> 25#include <linux/bcd.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/pm.h>
28 29
29#define DS1374_REG_TOD0 0x00 /* Time of Day */ 30#define DS1374_REG_TOD0 0x00 /* Time of Day */
30#define DS1374_REG_TOD1 0x01 31#define DS1374_REG_TOD1 0x01
@@ -409,32 +410,38 @@ static int __devexit ds1374_remove(struct i2c_client *client)
409} 410}
410 411
411#ifdef CONFIG_PM 412#ifdef CONFIG_PM
412static int ds1374_suspend(struct i2c_client *client, pm_message_t state) 413static int ds1374_suspend(struct device *dev)
413{ 414{
415 struct i2c_client *client = to_i2c_client(dev);
416
414 if (client->irq >= 0 && device_may_wakeup(&client->dev)) 417 if (client->irq >= 0 && device_may_wakeup(&client->dev))
415 enable_irq_wake(client->irq); 418 enable_irq_wake(client->irq);
416 return 0; 419 return 0;
417} 420}
418 421
419static int ds1374_resume(struct i2c_client *client) 422static int ds1374_resume(struct device *dev)
420{ 423{
424 struct i2c_client *client = to_i2c_client(dev);
425
421 if (client->irq >= 0 && device_may_wakeup(&client->dev)) 426 if (client->irq >= 0 && device_may_wakeup(&client->dev))
422 disable_irq_wake(client->irq); 427 disable_irq_wake(client->irq);
423 return 0; 428 return 0;
424} 429}
430
431static SIMPLE_DEV_PM_OPS(ds1374_pm, ds1374_suspend, ds1374_resume);
432
433#define DS1374_PM (&ds1374_pm)
425#else 434#else
426#define ds1374_suspend NULL 435#define DS1374_PM NULL
427#define ds1374_resume NULL
428#endif 436#endif
429 437
430static struct i2c_driver ds1374_driver = { 438static struct i2c_driver ds1374_driver = {
431 .driver = { 439 .driver = {
432 .name = "rtc-ds1374", 440 .name = "rtc-ds1374",
433 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
442 .pm = DS1374_PM,
434 }, 443 },
435 .probe = ds1374_probe, 444 .probe = ds1374_probe,
436 .suspend = ds1374_suspend,
437 .resume = ds1374_resume,
438 .remove = __devexit_p(ds1374_remove), 445 .remove = __devexit_p(ds1374_remove),
439 .id_table = ds1374_id, 446 .id_table = ds1374_id,
440}; 447};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 3fffd708711f..fbabc773dded 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -468,7 +468,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj,
468static struct bin_attribute ds1511_nvram_attr = { 468static struct bin_attribute ds1511_nvram_attr = {
469 .attr = { 469 .attr = {
470 .name = "nvram", 470 .name = "nvram",
471 .mode = S_IRUGO | S_IWUGO, 471 .mode = S_IRUGO | S_IWUSR,
472 }, 472 },
473 .size = DS1511_RAM_MAX, 473 .size = DS1511_RAM_MAX,
474 .read = ds1511_nvram_read, 474 .read = ds1511_nvram_read,
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 468200c38ecb..da8beb8cae51 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -39,6 +39,8 @@
39#define ISL1208_REG_SR_BAT (1<<1) /* battery */ 39#define ISL1208_REG_SR_BAT (1<<1) /* battery */
40#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */ 40#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
41#define ISL1208_REG_INT 0x08 41#define ISL1208_REG_INT 0x08
42#define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */
43#define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */
42#define ISL1208_REG_09 0x09 /* reserved */ 44#define ISL1208_REG_09 0x09 /* reserved */
43#define ISL1208_REG_ATR 0x0a 45#define ISL1208_REG_ATR 0x0a
44#define ISL1208_REG_DTR 0x0b 46#define ISL1208_REG_DTR 0x0b
@@ -202,6 +204,30 @@ isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
202} 204}
203 205
204static int 206static int
207isl1208_rtc_toggle_alarm(struct i2c_client *client, int enable)
208{
209 int icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
210
211 if (icr < 0) {
212 dev_err(&client->dev, "%s: reading INT failed\n", __func__);
213 return icr;
214 }
215
216 if (enable)
217 icr |= ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM;
218 else
219 icr &= ~(ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM);
220
221 icr = i2c_smbus_write_byte_data(client, ISL1208_REG_INT, icr);
222 if (icr < 0) {
223 dev_err(&client->dev, "%s: writing INT failed\n", __func__);
224 return icr;
225 }
226
227 return 0;
228}
229
230static int
205isl1208_rtc_proc(struct device *dev, struct seq_file *seq) 231isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
206{ 232{
207 struct i2c_client *const client = to_i2c_client(dev); 233 struct i2c_client *const client = to_i2c_client(dev);
@@ -288,9 +314,8 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
288{ 314{
289 struct rtc_time *const tm = &alarm->time; 315 struct rtc_time *const tm = &alarm->time;
290 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; 316 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
291 int sr; 317 int icr, yr, sr = isl1208_i2c_get_sr(client);
292 318
293 sr = isl1208_i2c_get_sr(client);
294 if (sr < 0) { 319 if (sr < 0) {
295 dev_err(&client->dev, "%s: reading SR failed\n", __func__); 320 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
296 return sr; 321 return sr;
@@ -313,6 +338,73 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
313 bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1; 338 bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
314 tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03); 339 tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
315 340
341 /* The alarm doesn't store the year so get it from the rtc section */
342 yr = i2c_smbus_read_byte_data(client, ISL1208_REG_YR);
343 if (yr < 0) {
344 dev_err(&client->dev, "%s: reading RTC YR failed\n", __func__);
345 return yr;
346 }
347 tm->tm_year = bcd2bin(yr) + 100;
348
349 icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
350 if (icr < 0) {
351 dev_err(&client->dev, "%s: reading INT failed\n", __func__);
352 return icr;
353 }
354 alarm->enabled = !!(icr & ISL1208_REG_INT_ALME);
355
356 return 0;
357}
358
359static int
360isl1208_i2c_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
361{
362 struct rtc_time *alarm_tm = &alarm->time;
363 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
364 const int offs = ISL1208_REG_SCA;
365 unsigned long rtc_secs, alarm_secs;
366 struct rtc_time rtc_tm;
367 int err, enable;
368
369 err = isl1208_i2c_read_time(client, &rtc_tm);
370 if (err)
371 return err;
372 err = rtc_tm_to_time(&rtc_tm, &rtc_secs);
373 if (err)
374 return err;
375 err = rtc_tm_to_time(alarm_tm, &alarm_secs);
376 if (err)
377 return err;
378
379 /* If the alarm time is before the current time disable the alarm */
380 if (!alarm->enabled || alarm_secs <= rtc_secs)
381 enable = 0x00;
382 else
383 enable = 0x80;
384
385 /* Program the alarm and enable it for each setting */
386 regs[ISL1208_REG_SCA - offs] = bin2bcd(alarm_tm->tm_sec) | enable;
387 regs[ISL1208_REG_MNA - offs] = bin2bcd(alarm_tm->tm_min) | enable;
388 regs[ISL1208_REG_HRA - offs] = bin2bcd(alarm_tm->tm_hour) |
389 ISL1208_REG_HR_MIL | enable;
390
391 regs[ISL1208_REG_DTA - offs] = bin2bcd(alarm_tm->tm_mday) | enable;
392 regs[ISL1208_REG_MOA - offs] = bin2bcd(alarm_tm->tm_mon + 1) | enable;
393 regs[ISL1208_REG_DWA - offs] = bin2bcd(alarm_tm->tm_wday & 7) | enable;
394
395 /* write ALARM registers */
396 err = isl1208_i2c_set_regs(client, offs, regs,
397 ISL1208_ALARM_SECTION_LEN);
398 if (err < 0) {
399 dev_err(&client->dev, "%s: writing ALARM section failed\n",
400 __func__);
401 return err;
402 }
403
404 err = isl1208_rtc_toggle_alarm(client, enable);
405 if (err)
406 return err;
407
316 return 0; 408 return 0;
317} 409}
318 410
@@ -391,12 +483,63 @@ isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
391 return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm); 483 return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
392} 484}
393 485
486static int
487isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
488{
489 return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm);
490}
491
492static irqreturn_t
493isl1208_rtc_interrupt(int irq, void *data)
494{
495 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
496 struct i2c_client *client = data;
497 int handled = 0, sr, err;
498
499 /*
500 * I2C reads get NAK'ed if we read straight away after an interrupt?
501 * Using a mdelay/msleep didn't seem to help either, so we work around
502 * this by continually trying to read the register for a short time.
503 */
504 while (1) {
505 sr = isl1208_i2c_get_sr(client);
506 if (sr >= 0)
507 break;
508
509 if (time_after(jiffies, timeout)) {
510 dev_err(&client->dev, "%s: reading SR failed\n",
511 __func__);
512 return sr;
513 }
514 }
515
516 if (sr & ISL1208_REG_SR_ALM) {
517 dev_dbg(&client->dev, "alarm!\n");
518
519 /* Clear the alarm */
520 sr &= ~ISL1208_REG_SR_ALM;
521 sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
522 if (sr < 0)
523 dev_err(&client->dev, "%s: writing SR failed\n",
524 __func__);
525 else
526 handled = 1;
527
528 /* Disable the alarm */
529 err = isl1208_rtc_toggle_alarm(client, 0);
530 if (err)
531 return err;
532 }
533
534 return handled ? IRQ_HANDLED : IRQ_NONE;
535}
536
394static const struct rtc_class_ops isl1208_rtc_ops = { 537static const struct rtc_class_ops isl1208_rtc_ops = {
395 .proc = isl1208_rtc_proc, 538 .proc = isl1208_rtc_proc,
396 .read_time = isl1208_rtc_read_time, 539 .read_time = isl1208_rtc_read_time,
397 .set_time = isl1208_rtc_set_time, 540 .set_time = isl1208_rtc_set_time,
398 .read_alarm = isl1208_rtc_read_alarm, 541 .read_alarm = isl1208_rtc_read_alarm,
399 /*.set_alarm = isl1208_rtc_set_alarm, */ 542 .set_alarm = isl1208_rtc_set_alarm,
400}; 543};
401 544
402/* sysfs interface */ 545/* sysfs interface */
@@ -488,11 +631,29 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
488 dev_info(&client->dev, 631 dev_info(&client->dev,
489 "chip found, driver version " DRV_VERSION "\n"); 632 "chip found, driver version " DRV_VERSION "\n");
490 633
634 if (client->irq > 0) {
635 rc = request_threaded_irq(client->irq, NULL,
636 isl1208_rtc_interrupt,
637 IRQF_SHARED,
638 isl1208_driver.driver.name, client);
639 if (!rc) {
640 device_init_wakeup(&client->dev, 1);
641 enable_irq_wake(client->irq);
642 } else {
643 dev_err(&client->dev,
644 "Unable to request irq %d, no alarm support\n",
645 client->irq);
646 client->irq = 0;
647 }
648 }
649
491 rtc = rtc_device_register(isl1208_driver.driver.name, 650 rtc = rtc_device_register(isl1208_driver.driver.name,
492 &client->dev, &isl1208_rtc_ops, 651 &client->dev, &isl1208_rtc_ops,
493 THIS_MODULE); 652 THIS_MODULE);
494 if (IS_ERR(rtc)) 653 if (IS_ERR(rtc)) {
495 return PTR_ERR(rtc); 654 rc = PTR_ERR(rtc);
655 goto exit_free_irq;
656 }
496 657
497 i2c_set_clientdata(client, rtc); 658 i2c_set_clientdata(client, rtc);
498 659
@@ -514,6 +675,9 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
514 675
515exit_unregister: 676exit_unregister:
516 rtc_device_unregister(rtc); 677 rtc_device_unregister(rtc);
678exit_free_irq:
679 if (client->irq)
680 free_irq(client->irq, client);
517 681
518 return rc; 682 return rc;
519} 683}
@@ -525,6 +689,8 @@ isl1208_remove(struct i2c_client *client)
525 689
526 sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); 690 sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
527 rtc_device_unregister(rtc); 691 rtc_device_unregister(rtc);
692 if (client->irq)
693 free_irq(client->irq, client);
528 694
529 return 0; 695 return 0;
530} 696}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
new file mode 100644
index 000000000000..2fc31aac3f4e
--- /dev/null
+++ b/drivers/rtc/rtc-tegra.c
@@ -0,0 +1,488 @@
1/*
2 * An RTC driver for the NVIDIA Tegra 200 series internal RTC.
3 *
4 * Copyright (c) 2010, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/delay.h>
27#include <linux/rtc.h>
28#include <linux/platform_device.h>
29
30/* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */
31#define TEGRA_RTC_REG_BUSY 0x004
32#define TEGRA_RTC_REG_SECONDS 0x008
33/* when msec is read, the seconds are buffered into shadow seconds. */
34#define TEGRA_RTC_REG_SHADOW_SECONDS 0x00c
35#define TEGRA_RTC_REG_MILLI_SECONDS 0x010
36#define TEGRA_RTC_REG_SECONDS_ALARM0 0x014
37#define TEGRA_RTC_REG_SECONDS_ALARM1 0x018
38#define TEGRA_RTC_REG_MILLI_SECONDS_ALARM0 0x01c
39#define TEGRA_RTC_REG_INTR_MASK 0x028
40/* write 1 bits to clear status bits */
41#define TEGRA_RTC_REG_INTR_STATUS 0x02c
42
43/* bits in INTR_MASK */
44#define TEGRA_RTC_INTR_MASK_MSEC_CDN_ALARM (1<<4)
45#define TEGRA_RTC_INTR_MASK_SEC_CDN_ALARM (1<<3)
46#define TEGRA_RTC_INTR_MASK_MSEC_ALARM (1<<2)
47#define TEGRA_RTC_INTR_MASK_SEC_ALARM1 (1<<1)
48#define TEGRA_RTC_INTR_MASK_SEC_ALARM0 (1<<0)
49
50/* bits in INTR_STATUS */
51#define TEGRA_RTC_INTR_STATUS_MSEC_CDN_ALARM (1<<4)
52#define TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM (1<<3)
53#define TEGRA_RTC_INTR_STATUS_MSEC_ALARM (1<<2)
54#define TEGRA_RTC_INTR_STATUS_SEC_ALARM1 (1<<1)
55#define TEGRA_RTC_INTR_STATUS_SEC_ALARM0 (1<<0)
56
57struct tegra_rtc_info {
58 struct platform_device *pdev;
59 struct rtc_device *rtc_dev;
60 void __iomem *rtc_base; /* NULL if not initialized. */
61 int tegra_rtc_irq; /* alarm and periodic irq */
62 spinlock_t tegra_rtc_lock;
63};
64
65/* RTC hardware is busy when it is updating its values over AHB once
66 * every eight 32kHz clocks (~250uS).
67 * outside of these updates the CPU is free to write.
68 * CPU is always free to read.
69 */
70static inline u32 tegra_rtc_check_busy(struct tegra_rtc_info *info)
71{
72 return readl(info->rtc_base + TEGRA_RTC_REG_BUSY) & 1;
73}
74
75/* Wait for hardware to be ready for writing.
76 * This function tries to maximize the amount of time before the next update.
77 * It does this by waiting for the RTC to become busy with its periodic update,
78 * then returning once the RTC first becomes not busy.
79 * This periodic update (where the seconds and milliseconds are copied to the
80 * AHB side) occurs every eight 32kHz clocks (~250uS).
81 * The behavior of this function allows us to make some assumptions without
82 * introducing a race, because 250uS is plenty of time to read/write a value.
83 */
84static int tegra_rtc_wait_while_busy(struct device *dev)
85{
86 struct tegra_rtc_info *info = dev_get_drvdata(dev);
87
88 int retries = 500; /* ~490 us is the worst case, ~250 us is best. */
89
90 /* first wait for the RTC to become busy. this is when it
91 * posts its updated seconds+msec registers to AHB side. */
92 while (tegra_rtc_check_busy(info)) {
93 if (!retries--)
94 goto retry_failed;
95 udelay(1);
96 }
97
98 /* now we have about 250 us to manipulate registers */
99 return 0;
100
101retry_failed:
102 dev_err(dev, "write failed:retry count exceeded.\n");
103 return -ETIMEDOUT;
104}
105
106static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
107{
108 struct tegra_rtc_info *info = dev_get_drvdata(dev);
109 unsigned long sec, msec;
110 unsigned long sl_irq_flags;
111
112 /* RTC hardware copies seconds to shadow seconds when a read
113 * of milliseconds occurs. use a lock to keep other threads out. */
114 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
115
116 msec = readl(info->rtc_base + TEGRA_RTC_REG_MILLI_SECONDS);
117 sec = readl(info->rtc_base + TEGRA_RTC_REG_SHADOW_SECONDS);
118
119 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
120
121 rtc_time_to_tm(sec, tm);
122
123 dev_vdbg(dev, "time read as %lu. %d/%d/%d %d:%02u:%02u\n",
124 sec,
125 tm->tm_mon + 1,
126 tm->tm_mday,
127 tm->tm_year + 1900,
128 tm->tm_hour,
129 tm->tm_min,
130 tm->tm_sec
131 );
132
133 return 0;
134}
135
136static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm)
137{
138 struct tegra_rtc_info *info = dev_get_drvdata(dev);
139 unsigned long sec;
140 int ret;
141
142 /* convert tm to seconds. */
143 ret = rtc_valid_tm(tm);
144 if (ret)
145 return ret;
146
147 rtc_tm_to_time(tm, &sec);
148
149 dev_vdbg(dev, "time set to %lu. %d/%d/%d %d:%02u:%02u\n",
150 sec,
151 tm->tm_mon+1,
152 tm->tm_mday,
153 tm->tm_year+1900,
154 tm->tm_hour,
155 tm->tm_min,
156 tm->tm_sec
157 );
158
159 /* seconds only written if wait succeeded. */
160 ret = tegra_rtc_wait_while_busy(dev);
161 if (!ret)
162 writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS);
163
164 dev_vdbg(dev, "time read back as %d\n",
165 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS));
166
167 return ret;
168}
169
170static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
171{
172 struct tegra_rtc_info *info = dev_get_drvdata(dev);
173 unsigned long sec;
174 unsigned tmp;
175
176 sec = readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
177
178 if (sec == 0) {
179 /* alarm is disabled. */
180 alarm->enabled = 0;
181 alarm->time.tm_mon = -1;
182 alarm->time.tm_mday = -1;
183 alarm->time.tm_year = -1;
184 alarm->time.tm_hour = -1;
185 alarm->time.tm_min = -1;
186 alarm->time.tm_sec = -1;
187 } else {
188 /* alarm is enabled. */
189 alarm->enabled = 1;
190 rtc_time_to_tm(sec, &alarm->time);
191 }
192
193 tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
194 alarm->pending = (tmp & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0;
195
196 return 0;
197}
198
199static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
200{
201 struct tegra_rtc_info *info = dev_get_drvdata(dev);
202 unsigned status;
203 unsigned long sl_irq_flags;
204
205 tegra_rtc_wait_while_busy(dev);
206 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
207
208 /* read the original value, and OR in the flag. */
209 status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
210 if (enabled)
211 status |= TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* set it */
212 else
213 status &= ~TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* clear it */
214
215 writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
216
217 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
218
219 return 0;
220}
221
222static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
223{
224 struct tegra_rtc_info *info = dev_get_drvdata(dev);
225 unsigned long sec;
226
227 if (alarm->enabled)
228 rtc_tm_to_time(&alarm->time, &sec);
229 else
230 sec = 0;
231
232 tegra_rtc_wait_while_busy(dev);
233 writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
234 dev_vdbg(dev, "alarm read back as %d\n",
235 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
236
237 /* if successfully written and alarm is enabled ... */
238 if (sec) {
239 tegra_rtc_alarm_irq_enable(dev, 1);
240
241 dev_vdbg(dev, "alarm set as %lu. %d/%d/%d %d:%02u:%02u\n",
242 sec,
243 alarm->time.tm_mon+1,
244 alarm->time.tm_mday,
245 alarm->time.tm_year+1900,
246 alarm->time.tm_hour,
247 alarm->time.tm_min,
248 alarm->time.tm_sec);
249 } else {
250 /* disable alarm if 0 or write error. */
251 dev_vdbg(dev, "alarm disabled\n");
252 tegra_rtc_alarm_irq_enable(dev, 0);
253 }
254
255 return 0;
256}
257
258static int tegra_rtc_proc(struct device *dev, struct seq_file *seq)
259{
260 if (!dev || !dev->driver)
261 return 0;
262
263 return seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
264}
265
266static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
267{
268 struct device *dev = data;
269 struct tegra_rtc_info *info = dev_get_drvdata(dev);
270 unsigned long events = 0;
271 unsigned status;
272 unsigned long sl_irq_flags;
273
274 status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
275 if (status) {
276 /* clear the interrupt masks and status on any irq. */
277 tegra_rtc_wait_while_busy(dev);
278 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
279 writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
280 writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
281 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
282 }
283
284 /* check if Alarm */
285 if ((status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0))
286 events |= RTC_IRQF | RTC_AF;
287
288 /* check if Periodic */
289 if ((status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM))
290 events |= RTC_IRQF | RTC_PF;
291
292 rtc_update_irq(info->rtc_dev, 1, events);
293
294 return IRQ_HANDLED;
295}
296
297static struct rtc_class_ops tegra_rtc_ops = {
298 .read_time = tegra_rtc_read_time,
299 .set_time = tegra_rtc_set_time,
300 .read_alarm = tegra_rtc_read_alarm,
301 .set_alarm = tegra_rtc_set_alarm,
302 .proc = tegra_rtc_proc,
303 .alarm_irq_enable = tegra_rtc_alarm_irq_enable,
304};
305
306static int __devinit tegra_rtc_probe(struct platform_device *pdev)
307{
308 struct tegra_rtc_info *info;
309 struct resource *res;
310 int ret;
311
312 info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
313 if (!info)
314 return -ENOMEM;
315
316 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
317 if (!res) {
318 dev_err(&pdev->dev,
319 "Unable to allocate resources for device.\n");
320 ret = -EBUSY;
321 goto err_free_info;
322 }
323
324 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
325 dev_err(&pdev->dev,
326 "Unable to request mem region for device.\n");
327 ret = -EBUSY;
328 goto err_free_info;
329 }
330
331 info->tegra_rtc_irq = platform_get_irq(pdev, 0);
332 if (info->tegra_rtc_irq <= 0) {
333 ret = -EBUSY;
334 goto err_release_mem_region;
335 }
336
337 info->rtc_base = ioremap_nocache(res->start, resource_size(res));
338 if (!info->rtc_base) {
339 dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
340 ret = -EBUSY;
341 goto err_release_mem_region;
342 }
343
344 /* set context info. */
345 info->pdev = pdev;
346 info->tegra_rtc_lock = __SPIN_LOCK_UNLOCKED(info->tegra_rtc_lock);
347
348 platform_set_drvdata(pdev, info);
349
350 /* clear out the hardware. */
351 writel(0, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
352 writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
353 writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
354
355 device_init_wakeup(&pdev->dev, 1);
356
357 info->rtc_dev = rtc_device_register(
358 pdev->name, &pdev->dev, &tegra_rtc_ops, THIS_MODULE);
359 if (IS_ERR(info->rtc_dev)) {
360 ret = PTR_ERR(info->rtc_dev);
361 info->rtc_dev = NULL;
362 dev_err(&pdev->dev,
363 "Unable to register device (err=%d).\n",
364 ret);
365 goto err_iounmap;
366 }
367
368 ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
369 IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
370 if (ret) {
371 dev_err(&pdev->dev,
372 "Unable to request interrupt for device (err=%d).\n",
373 ret);
374 goto err_dev_unreg;
375 }
376
377 dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
378
379 return 0;
380
381err_dev_unreg:
382 rtc_device_unregister(info->rtc_dev);
383err_iounmap:
384 iounmap(info->rtc_base);
385err_release_mem_region:
386 release_mem_region(res->start, resource_size(res));
387err_free_info:
388 kfree(info);
389
390 return ret;
391}
392
393static int __devexit tegra_rtc_remove(struct platform_device *pdev)
394{
395 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
396 struct resource *res;
397
398 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
399 if (!res)
400 return -EBUSY;
401
402 free_irq(info->tegra_rtc_irq, &pdev->dev);
403 rtc_device_unregister(info->rtc_dev);
404 iounmap(info->rtc_base);
405 release_mem_region(res->start, resource_size(res));
406 kfree(info);
407
408 platform_set_drvdata(pdev, NULL);
409
410 return 0;
411}
412
413#ifdef CONFIG_PM
414static int tegra_rtc_suspend(struct platform_device *pdev, pm_message_t state)
415{
416 struct device *dev = &pdev->dev;
417 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
418
419 tegra_rtc_wait_while_busy(dev);
420
421 /* only use ALARM0 as a wake source. */
422 writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
423 writel(TEGRA_RTC_INTR_STATUS_SEC_ALARM0,
424 info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
425
426 dev_vdbg(dev, "alarm sec = %d\n",
427 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
428
429 dev_vdbg(dev, "Suspend (device_may_wakeup=%d) irq:%d\n",
430 device_may_wakeup(dev), info->tegra_rtc_irq);
431
432 /* leave the alarms on as a wake source. */
433 if (device_may_wakeup(dev))
434 enable_irq_wake(info->tegra_rtc_irq);
435
436 return 0;
437}
438
439static int tegra_rtc_resume(struct platform_device *pdev)
440{
441 struct device *dev = &pdev->dev;
442 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
443
444 dev_vdbg(dev, "Resume (device_may_wakeup=%d)\n",
445 device_may_wakeup(dev));
446 /* alarms were left on as a wake source, turn them off. */
447 if (device_may_wakeup(dev))
448 disable_irq_wake(info->tegra_rtc_irq);
449
450 return 0;
451}
452#endif
453
454static void tegra_rtc_shutdown(struct platform_device *pdev)
455{
456 dev_vdbg(&pdev->dev, "disabling interrupts.\n");
457 tegra_rtc_alarm_irq_enable(&pdev->dev, 0);
458}
459
460MODULE_ALIAS("platform:tegra_rtc");
461static struct platform_driver tegra_rtc_driver = {
462 .remove = __devexit_p(tegra_rtc_remove),
463 .shutdown = tegra_rtc_shutdown,
464 .driver = {
465 .name = "tegra_rtc",
466 .owner = THIS_MODULE,
467 },
468#ifdef CONFIG_PM
469 .suspend = tegra_rtc_suspend,
470 .resume = tegra_rtc_resume,
471#endif
472};
473
474static int __init tegra_rtc_init(void)
475{
476 return platform_driver_probe(&tegra_rtc_driver, tegra_rtc_probe);
477}
478module_init(tegra_rtc_init);
479
480static void __exit tegra_rtc_exit(void)
481{
482 platform_driver_unregister(&tegra_rtc_driver);
483}
484module_exit(tegra_rtc_exit);
485
486MODULE_AUTHOR("Jon Mayo <jmayo@nvidia.com>");
487MODULE_DESCRIPTION("driver for Tegra internal RTC");
488MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3be5db5d6343..7ff61d76b4c5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -597,6 +597,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
597 break; 597 break;
598 598
599 default: 599 default:
600 ret = BLKPREP_KILL;
600 goto out; 601 goto out;
601 } 602 }
602 603
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index b90c2cf3e247..750fe5045efa 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -574,6 +574,7 @@ static const struct backlight_ops dcon_bl_ops = {
574 574
575static struct backlight_properties dcon_bl_props = { 575static struct backlight_properties dcon_bl_props = {
576 .max_brightness = 15, 576 .max_brightness = 15,
577 .type = BACKLIGHT_RAW,
577 .power = FB_BLANK_UNBLANK, 578 .power = FB_BLANK_UNBLANK,
578}; 579};
579 580
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index 6607a89ccb4b..25294462b8b6 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -781,6 +781,7 @@ static int __init samsung_init(void)
781 781
782 /* create a backlight device to talk to this one */ 782 /* create a backlight device to talk to this one */
783 memset(&props, 0, sizeof(struct backlight_properties)); 783 memset(&props, 0, sizeof(struct backlight_properties));
784 props.type = BACKLIGHT_PLATFORM;
784 props.max_brightness = sabi_config->max_brightness; 785 props.max_brightness = sabi_config->max_brightness;
785 backlight_device = backlight_device_register("samsung", &sdev->dev, 786 backlight_device = backlight_device_register("samsung", &sdev->dev,
786 NULL, &backlight_ops, 787 NULL, &backlight_ops,
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index d8210ca00720..b9451219528b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
322 if (tty->buf.tail != NULL) 322 if (tty->buf.tail != NULL)
323 tty->buf.tail->commit = tty->buf.tail->used; 323 tty->buf.tail->commit = tty->buf.tail->used;
324 spin_unlock_irqrestore(&tty->buf.lock, flags); 324 spin_unlock_irqrestore(&tty->buf.lock, flags);
325 schedule_delayed_work(&tty->buf.work, 1); 325 schedule_work(&tty->buf.work);
326} 326}
327EXPORT_SYMBOL(tty_schedule_flip); 327EXPORT_SYMBOL(tty_schedule_flip);
328 328
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
402static void flush_to_ldisc(struct work_struct *work) 402static void flush_to_ldisc(struct work_struct *work)
403{ 403{
404 struct tty_struct *tty = 404 struct tty_struct *tty =
405 container_of(work, struct tty_struct, buf.work.work); 405 container_of(work, struct tty_struct, buf.work);
406 unsigned long flags; 406 unsigned long flags;
407 struct tty_ldisc *disc; 407 struct tty_ldisc *disc;
408 408
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
443 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
444 break; 444 break;
445 if (!tty->receive_room || seen_tail) { 445 if (!tty->receive_room || seen_tail) {
446 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_work(&tty->buf.work);
447 break; 447 break;
448 } 448 }
449 if (count > tty->receive_room) 449 if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
481 */ 481 */
482void tty_flush_to_ldisc(struct tty_struct *tty) 482void tty_flush_to_ldisc(struct tty_struct *tty)
483{ 483{
484 flush_delayed_work(&tty->buf.work); 484 flush_work(&tty->buf.work);
485} 485}
486 486
487/** 487/**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
506 spin_unlock_irqrestore(&tty->buf.lock, flags); 506 spin_unlock_irqrestore(&tty->buf.lock, flags);
507 507
508 if (tty->low_latency) 508 if (tty->low_latency)
509 flush_to_ldisc(&tty->buf.work.work); 509 flush_to_ldisc(&tty->buf.work);
510 else 510 else
511 schedule_delayed_work(&tty->buf.work, 1); 511 schedule_work(&tty->buf.work);
512} 512}
513EXPORT_SYMBOL(tty_flip_buffer_push); 513EXPORT_SYMBOL(tty_flip_buffer_push);
514 514
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
529 tty->buf.tail = NULL; 529 tty->buf.tail = NULL;
530 tty->buf.free = NULL; 530 tty->buf.free = NULL;
531 tty->buf.memory_used = 0; 531 tty->buf.memory_used = 0;
532 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); 532 INIT_WORK(&tty->buf.work, flush_to_ldisc);
533} 533}
534 534
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 0fc564a97706..e19e13647116 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -529,7 +529,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
529static int tty_ldisc_halt(struct tty_struct *tty) 529static int tty_ldisc_halt(struct tty_struct *tty)
530{ 530{
531 clear_bit(TTY_LDISC, &tty->flags); 531 clear_bit(TTY_LDISC, &tty->flags);
532 return cancel_delayed_work_sync(&tty->buf.work); 532 return cancel_work_sync(&tty->buf.work);
533} 533}
534 534
535/** 535/**
@@ -542,7 +542,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
542{ 542{
543 flush_work_sync(&tty->hangup_work); 543 flush_work_sync(&tty->hangup_work);
544 flush_work_sync(&tty->SAK_work); 544 flush_work_sync(&tty->SAK_work);
545 flush_delayed_work_sync(&tty->buf.work); 545 flush_work_sync(&tty->buf.work);
546} 546}
547 547
548/** 548/**
@@ -722,9 +722,9 @@ enable:
722 /* Restart the work queue in case no characters kick it off. Safe if 722 /* Restart the work queue in case no characters kick it off. Safe if
723 already running */ 723 already running */
724 if (work) 724 if (work)
725 schedule_delayed_work(&tty->buf.work, 1); 725 schedule_work(&tty->buf.work);
726 if (o_work) 726 if (o_work)
727 schedule_delayed_work(&o_tty->buf.work, 1); 727 schedule_work(&o_tty->buf.work);
728 mutex_unlock(&tty->ldisc_mutex); 728 mutex_unlock(&tty->ldisc_mutex);
729 tty_unlock(); 729 tty_unlock();
730 return retval; 730 return retval;
@@ -830,12 +830,12 @@ void tty_ldisc_hangup(struct tty_struct *tty)
830 830
831 /* 831 /*
832 * this is like tty_ldisc_halt, but we need to give up 832 * this is like tty_ldisc_halt, but we need to give up
833 * the BTM before calling cancel_delayed_work_sync, 833 * the BTM before calling cancel_work_sync, which may
834 * which may need to wait for another function taking the BTM 834 * need to wait for another function taking the BTM
835 */ 835 */
836 clear_bit(TTY_LDISC, &tty->flags); 836 clear_bit(TTY_LDISC, &tty->flags);
837 tty_unlock(); 837 tty_unlock();
838 cancel_delayed_work_sync(&tty->buf.work); 838 cancel_work_sync(&tty->buf.work);
839 mutex_unlock(&tty->ldisc_mutex); 839 mutex_unlock(&tty->ldisc_mutex);
840 840
841 tty_lock(); 841 tty_lock();
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1fa6ce3e4a23..68ab460a735c 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -282,6 +282,7 @@ static int appledisplay_probe(struct usb_interface *iface,
282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d", 282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
283 atomic_inc_return(&count_displays) - 1); 283 atomic_inc_return(&count_displays) - 1);
284 memset(&props, 0, sizeof(struct backlight_properties)); 284 memset(&props, 0, sizeof(struct backlight_properties));
285 props.type = BACKLIGHT_RAW;
285 props.max_brightness = 0xff; 286 props.max_brightness = 0xff;
286 pdata->bd = backlight_device_register(bl_name, NULL, pdata, 287 pdata->bd = backlight_device_register(bl_name, NULL, pdata,
287 &appledisplay_bl_data, &props); 288 &appledisplay_bl_data, &props);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f616cefc95ba..2f7c76a85e53 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
60{ 60{
61 int seg = 0; 61 int seg = 0;
62 size_t size; 62 size_t size;
63
63 while (len && seg < iov_count) { 64 while (len && seg < iov_count) {
64 size = min(from->iov_len, len); 65 size = min(from->iov_len, len);
65 to->iov_base = from->iov_base; 66 to->iov_base = from->iov_base;
@@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
79{ 80{
80 int seg = 0; 81 int seg = 0;
81 size_t size; 82 size_t size;
83
82 while (len && seg < iovcount) { 84 while (len && seg < iovcount) {
83 size = min(from->iov_len, len); 85 size = min(from->iov_len, len);
84 to->iov_base = from->iov_base; 86 to->iov_base = from->iov_base;
@@ -211,12 +213,13 @@ static int peek_head_len(struct sock *sk)
211{ 213{
212 struct sk_buff *head; 214 struct sk_buff *head;
213 int len = 0; 215 int len = 0;
216 unsigned long flags;
214 217
215 lock_sock(sk); 218 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
216 head = skb_peek(&sk->sk_receive_queue); 219 head = skb_peek(&sk->sk_receive_queue);
217 if (head) 220 if (likely(head))
218 len = head->len; 221 len = head->len;
219 release_sock(sk); 222 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
220 return len; 223 return len;
221} 224}
222 225
@@ -227,6 +230,7 @@ static int peek_head_len(struct sock *sk)
227 * @iovcount - returned count of io vectors we fill 230 * @iovcount - returned count of io vectors we fill
228 * @log - vhost log 231 * @log - vhost log
229 * @log_num - log offset 232 * @log_num - log offset
233 * @quota - headcount quota, 1 for big buffer
230 * returns number of buffer heads allocated, negative on error 234 * returns number of buffer heads allocated, negative on error
231 */ 235 */
232static int get_rx_bufs(struct vhost_virtqueue *vq, 236static int get_rx_bufs(struct vhost_virtqueue *vq,
@@ -234,7 +238,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
234 int datalen, 238 int datalen,
235 unsigned *iovcount, 239 unsigned *iovcount,
236 struct vhost_log *log, 240 struct vhost_log *log,
237 unsigned *log_num) 241 unsigned *log_num,
242 unsigned int quota)
238{ 243{
239 unsigned int out, in; 244 unsigned int out, in;
240 int seg = 0; 245 int seg = 0;
@@ -242,7 +247,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
242 unsigned d; 247 unsigned d;
243 int r, nlogs = 0; 248 int r, nlogs = 0;
244 249
245 while (datalen > 0) { 250 while (datalen > 0 && headcount < quota) {
246 if (unlikely(seg >= UIO_MAXIOV)) { 251 if (unlikely(seg >= UIO_MAXIOV)) {
247 r = -ENOBUFS; 252 r = -ENOBUFS;
248 goto err; 253 goto err;
@@ -282,117 +287,7 @@ err:
282 287
283/* Expects to be always run from workqueue - which acts as 288/* Expects to be always run from workqueue - which acts as
284 * read-size critical section for our kind of RCU. */ 289 * read-size critical section for our kind of RCU. */
285static void handle_rx_big(struct vhost_net *net) 290static void handle_rx(struct vhost_net *net)
286{
287 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
288 unsigned out, in, log, s;
289 int head;
290 struct vhost_log *vq_log;
291 struct msghdr msg = {
292 .msg_name = NULL,
293 .msg_namelen = 0,
294 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
295 .msg_controllen = 0,
296 .msg_iov = vq->iov,
297 .msg_flags = MSG_DONTWAIT,
298 };
299
300 struct virtio_net_hdr hdr = {
301 .flags = 0,
302 .gso_type = VIRTIO_NET_HDR_GSO_NONE
303 };
304
305 size_t len, total_len = 0;
306 int err;
307 size_t hdr_size;
308 /* TODO: check that we are running from vhost_worker? */
309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
311 return;
312
313 mutex_lock(&vq->mutex);
314 vhost_disable_notify(vq);
315 hdr_size = vq->vhost_hlen;
316
317 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
318 vq->log : NULL;
319
320 for (;;) {
321 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
322 ARRAY_SIZE(vq->iov),
323 &out, &in,
324 vq_log, &log);
325 /* On error, stop handling until the next kick. */
326 if (unlikely(head < 0))
327 break;
328 /* OK, now we need to know about added descriptors. */
329 if (head == vq->num) {
330 if (unlikely(vhost_enable_notify(vq))) {
331 /* They have slipped one in as we were
332 * doing that: check again. */
333 vhost_disable_notify(vq);
334 continue;
335 }
336 /* Nothing new? Wait for eventfd to tell us
337 * they refilled. */
338 break;
339 }
340 /* We don't need to be notified again. */
341 if (out) {
342 vq_err(vq, "Unexpected descriptor format for RX: "
343 "out %d, int %d\n",
344 out, in);
345 break;
346 }
347 /* Skip header. TODO: support TSO/mergeable rx buffers. */
348 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
349 msg.msg_iovlen = in;
350 len = iov_length(vq->iov, in);
351 /* Sanity check */
352 if (!len) {
353 vq_err(vq, "Unexpected header len for RX: "
354 "%zd expected %zd\n",
355 iov_length(vq->hdr, s), hdr_size);
356 break;
357 }
358 err = sock->ops->recvmsg(NULL, sock, &msg,
359 len, MSG_DONTWAIT | MSG_TRUNC);
360 /* TODO: Check specific error and bomb out unless EAGAIN? */
361 if (err < 0) {
362 vhost_discard_vq_desc(vq, 1);
363 break;
364 }
365 /* TODO: Should check and handle checksum. */
366 if (err > len) {
367 pr_debug("Discarded truncated rx packet: "
368 " len %d > %zd\n", err, len);
369 vhost_discard_vq_desc(vq, 1);
370 continue;
371 }
372 len = err;
373 err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
374 if (err) {
375 vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
376 vq->iov->iov_base, err);
377 break;
378 }
379 len += hdr_size;
380 vhost_add_used_and_signal(&net->dev, vq, head, len);
381 if (unlikely(vq_log))
382 vhost_log_write(vq, vq_log, log, len);
383 total_len += len;
384 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
385 vhost_poll_queue(&vq->poll);
386 break;
387 }
388 }
389
390 mutex_unlock(&vq->mutex);
391}
392
393/* Expects to be always run from workqueue - which acts as
394 * read-size critical section for our kind of RCU. */
395static void handle_rx_mergeable(struct vhost_net *net)
396{ 291{
397 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; 292 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
398 unsigned uninitialized_var(in), log; 293 unsigned uninitialized_var(in), log;
@@ -405,19 +300,18 @@ static void handle_rx_mergeable(struct vhost_net *net)
405 .msg_iov = vq->iov, 300 .msg_iov = vq->iov,
406 .msg_flags = MSG_DONTWAIT, 301 .msg_flags = MSG_DONTWAIT,
407 }; 302 };
408
409 struct virtio_net_hdr_mrg_rxbuf hdr = { 303 struct virtio_net_hdr_mrg_rxbuf hdr = {
410 .hdr.flags = 0, 304 .hdr.flags = 0,
411 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE 305 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
412 }; 306 };
413
414 size_t total_len = 0; 307 size_t total_len = 0;
415 int err, headcount; 308 int err, headcount, mergeable;
416 size_t vhost_hlen, sock_hlen; 309 size_t vhost_hlen, sock_hlen;
417 size_t vhost_len, sock_len; 310 size_t vhost_len, sock_len;
418 /* TODO: check that we are running from vhost_worker? */ 311 /* TODO: check that we are running from vhost_worker? */
419 struct socket *sock = rcu_dereference_check(vq->private_data, 1); 312 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
420 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 313
314 if (!sock)
421 return; 315 return;
422 316
423 mutex_lock(&vq->mutex); 317 mutex_lock(&vq->mutex);
@@ -427,12 +321,14 @@ static void handle_rx_mergeable(struct vhost_net *net)
427 321
428 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? 322 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
429 vq->log : NULL; 323 vq->log : NULL;
324 mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
430 325
431 while ((sock_len = peek_head_len(sock->sk))) { 326 while ((sock_len = peek_head_len(sock->sk))) {
432 sock_len += sock_hlen; 327 sock_len += sock_hlen;
433 vhost_len = sock_len + vhost_hlen; 328 vhost_len = sock_len + vhost_hlen;
434 headcount = get_rx_bufs(vq, vq->heads, vhost_len, 329 headcount = get_rx_bufs(vq, vq->heads, vhost_len,
435 &in, vq_log, &log); 330 &in, vq_log, &log,
331 likely(mergeable) ? UIO_MAXIOV : 1);
436 /* On error, stop handling until the next kick. */ 332 /* On error, stop handling until the next kick. */
437 if (unlikely(headcount < 0)) 333 if (unlikely(headcount < 0))
438 break; 334 break;
@@ -476,7 +372,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
476 break; 372 break;
477 } 373 }
478 /* TODO: Should check and handle checksum. */ 374 /* TODO: Should check and handle checksum. */
479 if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) && 375 if (likely(mergeable) &&
480 memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, 376 memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
481 offsetof(typeof(hdr), num_buffers), 377 offsetof(typeof(hdr), num_buffers),
482 sizeof hdr.num_buffers)) { 378 sizeof hdr.num_buffers)) {
@@ -498,14 +394,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
498 mutex_unlock(&vq->mutex); 394 mutex_unlock(&vq->mutex);
499} 395}
500 396
501static void handle_rx(struct vhost_net *net)
502{
503 if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
504 handle_rx_mergeable(net);
505 else
506 handle_rx_big(net);
507}
508
509static void handle_tx_kick(struct vhost_work *work) 397static void handle_tx_kick(struct vhost_work *work)
510{ 398{
511 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 399 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -654,6 +542,7 @@ static struct socket *get_raw_socket(int fd)
654 } uaddr; 542 } uaddr;
655 int uaddr_len = sizeof uaddr, r; 543 int uaddr_len = sizeof uaddr, r;
656 struct socket *sock = sockfd_lookup(fd, &r); 544 struct socket *sock = sockfd_lookup(fd, &r);
545
657 if (!sock) 546 if (!sock)
658 return ERR_PTR(-ENOTSOCK); 547 return ERR_PTR(-ENOTSOCK);
659 548
@@ -682,6 +571,7 @@ static struct socket *get_tap_socket(int fd)
682{ 571{
683 struct file *file = fget(fd); 572 struct file *file = fget(fd);
684 struct socket *sock; 573 struct socket *sock;
574
685 if (!file) 575 if (!file)
686 return ERR_PTR(-EBADF); 576 return ERR_PTR(-EBADF);
687 sock = tun_get_socket(file); 577 sock = tun_get_socket(file);
@@ -696,6 +586,7 @@ static struct socket *get_tap_socket(int fd)
696static struct socket *get_socket(int fd) 586static struct socket *get_socket(int fd)
697{ 587{
698 struct socket *sock; 588 struct socket *sock;
589
699 /* special case to disable backend */ 590 /* special case to disable backend */
700 if (fd == -1) 591 if (fd == -1)
701 return NULL; 592 return NULL;
@@ -741,9 +632,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
741 oldsock = rcu_dereference_protected(vq->private_data, 632 oldsock = rcu_dereference_protected(vq->private_data,
742 lockdep_is_held(&vq->mutex)); 633 lockdep_is_held(&vq->mutex));
743 if (sock != oldsock) { 634 if (sock != oldsock) {
744 vhost_net_disable_vq(n, vq); 635 vhost_net_disable_vq(n, vq);
745 rcu_assign_pointer(vq->private_data, sock); 636 rcu_assign_pointer(vq->private_data, sock);
746 vhost_net_enable_vq(n, vq); 637 vhost_net_enable_vq(n, vq);
747 } 638 }
748 639
749 mutex_unlock(&vq->mutex); 640 mutex_unlock(&vq->mutex);
@@ -768,6 +659,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
768 struct socket *tx_sock = NULL; 659 struct socket *tx_sock = NULL;
769 struct socket *rx_sock = NULL; 660 struct socket *rx_sock = NULL;
770 long err; 661 long err;
662
771 mutex_lock(&n->dev.mutex); 663 mutex_lock(&n->dev.mutex);
772 err = vhost_dev_check_owner(&n->dev); 664 err = vhost_dev_check_owner(&n->dev);
773 if (err) 665 if (err)
@@ -829,6 +721,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
829 struct vhost_vring_file backend; 721 struct vhost_vring_file backend;
830 u64 features; 722 u64 features;
831 int r; 723 int r;
724
832 switch (ioctl) { 725 switch (ioctl) {
833 case VHOST_NET_SET_BACKEND: 726 case VHOST_NET_SET_BACKEND:
834 if (copy_from_user(&backend, argp, sizeof backend)) 727 if (copy_from_user(&backend, argp, sizeof backend))
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ade0568c07a4..2ab291241635 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
41 poll_table *pt) 41 poll_table *pt)
42{ 42{
43 struct vhost_poll *poll; 43 struct vhost_poll *poll;
44 poll = container_of(pt, struct vhost_poll, table);
45 44
45 poll = container_of(pt, struct vhost_poll, table);
46 poll->wqh = wqh; 46 poll->wqh = wqh;
47 add_wait_queue(wqh, &poll->wait); 47 add_wait_queue(wqh, &poll->wait);
48} 48}
@@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
85void vhost_poll_start(struct vhost_poll *poll, struct file *file) 85void vhost_poll_start(struct vhost_poll *poll, struct file *file)
86{ 86{
87 unsigned long mask; 87 unsigned long mask;
88
88 mask = file->f_op->poll(file, &poll->table); 89 mask = file->f_op->poll(file, &poll->table);
89 if (mask) 90 if (mask)
90 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 91 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
@@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
101 unsigned seq) 102 unsigned seq)
102{ 103{
103 int left; 104 int left;
105
104 spin_lock_irq(&dev->work_lock); 106 spin_lock_irq(&dev->work_lock);
105 left = seq - work->done_seq; 107 left = seq - work->done_seq;
106 spin_unlock_irq(&dev->work_lock); 108 spin_unlock_irq(&dev->work_lock);
@@ -222,6 +224,7 @@ static int vhost_worker(void *data)
222static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 224static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
223{ 225{
224 int i; 226 int i;
227
225 for (i = 0; i < dev->nvqs; ++i) { 228 for (i = 0; i < dev->nvqs; ++i) {
226 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * 229 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
227 UIO_MAXIOV, GFP_KERNEL); 230 UIO_MAXIOV, GFP_KERNEL);
@@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
235 goto err_nomem; 238 goto err_nomem;
236 } 239 }
237 return 0; 240 return 0;
241
238err_nomem: 242err_nomem:
239 for (; i >= 0; --i) { 243 for (; i >= 0; --i) {
240 kfree(dev->vqs[i].indirect); 244 kfree(dev->vqs[i].indirect);
@@ -247,6 +251,7 @@ err_nomem:
247static void vhost_dev_free_iovecs(struct vhost_dev *dev) 251static void vhost_dev_free_iovecs(struct vhost_dev *dev)
248{ 252{
249 int i; 253 int i;
254
250 for (i = 0; i < dev->nvqs; ++i) { 255 for (i = 0; i < dev->nvqs; ++i) {
251 kfree(dev->vqs[i].indirect); 256 kfree(dev->vqs[i].indirect);
252 dev->vqs[i].indirect = NULL; 257 dev->vqs[i].indirect = NULL;
@@ -296,26 +301,28 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
296} 301}
297 302
298struct vhost_attach_cgroups_struct { 303struct vhost_attach_cgroups_struct {
299 struct vhost_work work; 304 struct vhost_work work;
300 struct task_struct *owner; 305 struct task_struct *owner;
301 int ret; 306 int ret;
302}; 307};
303 308
304static void vhost_attach_cgroups_work(struct vhost_work *work) 309static void vhost_attach_cgroups_work(struct vhost_work *work)
305{ 310{
306 struct vhost_attach_cgroups_struct *s; 311 struct vhost_attach_cgroups_struct *s;
307 s = container_of(work, struct vhost_attach_cgroups_struct, work); 312
308 s->ret = cgroup_attach_task_all(s->owner, current); 313 s = container_of(work, struct vhost_attach_cgroups_struct, work);
314 s->ret = cgroup_attach_task_all(s->owner, current);
309} 315}
310 316
311static int vhost_attach_cgroups(struct vhost_dev *dev) 317static int vhost_attach_cgroups(struct vhost_dev *dev)
312{ 318{
313 struct vhost_attach_cgroups_struct attach; 319 struct vhost_attach_cgroups_struct attach;
314 attach.owner = current; 320
315 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 321 attach.owner = current;
316 vhost_work_queue(dev, &attach.work); 322 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
317 vhost_work_flush(dev, &attach.work); 323 vhost_work_queue(dev, &attach.work);
318 return attach.ret; 324 vhost_work_flush(dev, &attach.work);
325 return attach.ret;
319} 326}
320 327
321/* Caller should have device mutex */ 328/* Caller should have device mutex */
@@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
323{ 330{
324 struct task_struct *worker; 331 struct task_struct *worker;
325 int err; 332 int err;
333
326 /* Is there an owner already? */ 334 /* Is there an owner already? */
327 if (dev->mm) { 335 if (dev->mm) {
328 err = -EBUSY; 336 err = -EBUSY;
329 goto err_mm; 337 goto err_mm;
330 } 338 }
339
331 /* No owner, become one */ 340 /* No owner, become one */
332 dev->mm = get_task_mm(current); 341 dev->mm = get_task_mm(current);
333 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); 342 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
@@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
380void vhost_dev_cleanup(struct vhost_dev *dev) 389void vhost_dev_cleanup(struct vhost_dev *dev)
381{ 390{
382 int i; 391 int i;
392
383 for (i = 0; i < dev->nvqs; ++i) { 393 for (i = 0; i < dev->nvqs; ++i) {
384 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { 394 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
385 vhost_poll_stop(&dev->vqs[i].poll); 395 vhost_poll_stop(&dev->vqs[i].poll);
@@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
421static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 431static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
422{ 432{
423 u64 a = addr / VHOST_PAGE_SIZE / 8; 433 u64 a = addr / VHOST_PAGE_SIZE / 8;
434
424 /* Make sure 64 bit math will not overflow. */ 435 /* Make sure 64 bit math will not overflow. */
425 if (a > ULONG_MAX - (unsigned long)log_base || 436 if (a > ULONG_MAX - (unsigned long)log_base ||
426 a + (unsigned long)log_base > ULONG_MAX) 437 a + (unsigned long)log_base > ULONG_MAX)
@@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
461 int log_all) 472 int log_all)
462{ 473{
463 int i; 474 int i;
475
464 for (i = 0; i < d->nvqs; ++i) { 476 for (i = 0; i < d->nvqs; ++i) {
465 int ok; 477 int ok;
466 mutex_lock(&d->vqs[i].mutex); 478 mutex_lock(&d->vqs[i].mutex);
@@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
527{ 539{
528 struct vhost_memory mem, *newmem, *oldmem; 540 struct vhost_memory mem, *newmem, *oldmem;
529 unsigned long size = offsetof(struct vhost_memory, regions); 541 unsigned long size = offsetof(struct vhost_memory, regions);
542
530 if (copy_from_user(&mem, m, size)) 543 if (copy_from_user(&mem, m, size))
531 return -EFAULT; 544 return -EFAULT;
532 if (mem.padding) 545 if (mem.padding)
@@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
544 return -EFAULT; 557 return -EFAULT;
545 } 558 }
546 559
547 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { 560 if (!memory_access_ok(d, newmem,
561 vhost_has_feature(d, VHOST_F_LOG_ALL))) {
548 kfree(newmem); 562 kfree(newmem);
549 return -EFAULT; 563 return -EFAULT;
550 } 564 }
@@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
560 struct vring_used __user *used) 574 struct vring_used __user *used)
561{ 575{
562 int r = put_user(vq->used_flags, &used->flags); 576 int r = put_user(vq->used_flags, &used->flags);
577
563 if (r) 578 if (r)
564 return r; 579 return r;
565 return get_user(vq->last_used_idx, &used->idx); 580 return get_user(vq->last_used_idx, &used->idx);
@@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
849{ 864{
850 struct vhost_memory_region *reg; 865 struct vhost_memory_region *reg;
851 int i; 866 int i;
867
852 /* linear search is not brilliant, but we really have on the order of 6 868 /* linear search is not brilliant, but we really have on the order of 6
853 * regions in practice */ 869 * regions in practice */
854 for (i = 0; i < mem->nregions; ++i) { 870 for (i = 0; i < mem->nregions; ++i) {
@@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
871 void *base; 887 void *base;
872 int bit = nr + (log % PAGE_SIZE) * 8; 888 int bit = nr + (log % PAGE_SIZE) * 8;
873 int r; 889 int r;
890
874 r = get_user_pages_fast(log, 1, 1, &page); 891 r = get_user_pages_fast(log, 1, 1, &page);
875 if (r < 0) 892 if (r < 0)
876 return r; 893 return r;
@@ -888,6 +905,7 @@ static int log_write(void __user *log_base,
888{ 905{
889 u64 write_page = write_address / VHOST_PAGE_SIZE; 906 u64 write_page = write_address / VHOST_PAGE_SIZE;
890 int r; 907 int r;
908
891 if (!write_length) 909 if (!write_length)
892 return 0; 910 return 0;
893 write_length += write_address % VHOST_PAGE_SIZE; 911 write_length += write_address % VHOST_PAGE_SIZE;
@@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1037 i, count); 1055 i, count);
1038 return -EINVAL; 1056 return -EINVAL;
1039 } 1057 }
1040 if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, 1058 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1041 sizeof desc))) { 1059 vq->indirect, sizeof desc))) {
1042 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1060 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1043 i, (size_t)indirect->addr + i * sizeof desc); 1061 i, (size_t)indirect->addr + i * sizeof desc);
1044 return -EINVAL; 1062 return -EINVAL;
@@ -1153,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1153 i, vq->num, head); 1171 i, vq->num, head);
1154 return -EINVAL; 1172 return -EINVAL;
1155 } 1173 }
1156 ret = copy_from_user(&desc, vq->desc + i, sizeof desc); 1174 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1157 if (unlikely(ret)) { 1175 if (unlikely(ret)) {
1158 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 1176 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1159 i, vq->desc + i); 1177 i, vq->desc + i);
@@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1317void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1335void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1318{ 1336{
1319 __u16 flags; 1337 __u16 flags;
1338
1320 /* Flush out used index updates. This is paired 1339 /* Flush out used index updates. This is paired
1321 * with the barrier that the Guest executes when enabling 1340 * with the barrier that the Guest executes when enabling
1322 * interrupts. */ 1341 * interrupts. */
@@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1361{ 1380{
1362 u16 avail_idx; 1381 u16 avail_idx;
1363 int r; 1382 int r;
1383
1364 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1384 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1365 return false; 1385 return false;
1366 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 1386 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1387void vhost_disable_notify(struct vhost_virtqueue *vq) 1407void vhost_disable_notify(struct vhost_virtqueue *vq)
1388{ 1408{
1389 int r; 1409 int r;
1410
1390 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1411 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1391 return; 1412 return;
1392 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1413 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index bac163450216..4b4e8dadd6b2 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -127,6 +127,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
127 return; 127 return;
128 128
129 memset(&props, 0, sizeof(struct backlight_properties)); 129 memset(&props, 0, sizeof(struct backlight_properties));
130 props.type = BACKLIGHT_RAW;
130 props.max_brightness = 0xff; 131 props.max_brightness = 0xff;
131 bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo, 132 bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo,
132 &atmel_lcdc_bl_ops, &props); 133 &atmel_lcdc_bl_ops, &props);
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 4cb6a576c567..b0b2ac335347 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1818,6 +1818,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
1818 snprintf(name, sizeof(name), "aty128bl%d", info->node); 1818 snprintf(name, sizeof(name), "aty128bl%d", info->node);
1819 1819
1820 memset(&props, 0, sizeof(struct backlight_properties)); 1820 memset(&props, 0, sizeof(struct backlight_properties));
1821 props.type = BACKLIGHT_RAW;
1821 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 1822 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
1822 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data, 1823 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
1823 &props); 1824 &props);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 94e293fce1d2..d437b3daf1f5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2241,6 +2241,7 @@ static void aty_bl_init(struct atyfb_par *par)
2241 snprintf(name, sizeof(name), "atybl%d", info->node); 2241 snprintf(name, sizeof(name), "atybl%d", info->node);
2242 2242
2243 memset(&props, 0, sizeof(struct backlight_properties)); 2243 memset(&props, 0, sizeof(struct backlight_properties));
2244 props.type = BACKLIGHT_RAW;
2244 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 2245 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
2245 bd = backlight_device_register(name, info->dev, par, &aty_bl_data, 2246 bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
2246 &props); 2247 &props);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 9b811ddbce83..db572df7e1ef 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -158,6 +158,7 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
158 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node); 158 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
159 159
160 memset(&props, 0, sizeof(struct backlight_properties)); 160 memset(&props, 0, sizeof(struct backlight_properties));
161 props.type = BACKLIGHT_RAW;
161 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 162 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
162 bd = backlight_device_register(name, rinfo->info->dev, pdata, 163 bd = backlight_device_register(name, rinfo->info->dev, pdata,
163 &radeon_bl_data, &props); 164 &radeon_bl_data, &props);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b224396b86d5..e59623a15f3f 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -227,6 +227,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
227 } 227 }
228 228
229 memset(&props, 0, sizeof(struct backlight_properties)); 229 memset(&props, 0, sizeof(struct backlight_properties));
230 props.type = BACKLIGHT_RAW;
230 props.max_brightness = MAX_BRIGHTNESS; 231 props.max_brightness = MAX_BRIGHTNESS;
231 bl = backlight_device_register(name, &pdev->dev, data, 232 bl = backlight_device_register(name, &pdev->dev, data,
232 &pm860x_backlight_ops, &props); 233 &pm860x_backlight_ops, &props);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index e54a337227ea..0c9373bedd1f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -109,6 +109,14 @@ config LCD_S6E63M0
109 If you have an S6E63M0 LCD Panel, say Y to enable its 109 If you have an S6E63M0 LCD Panel, say Y to enable its
110 LCD control driver. 110 LCD control driver.
111 111
112config LCD_LD9040
113 tristate "LD9040 AMOLED LCD Driver"
114 depends on SPI && BACKLIGHT_CLASS_DEVICE
115 default n
116 help
117 If you have an LD9040 Panel, say Y to enable its
118 control driver.
119
112endif # LCD_CLASS_DEVICE 120endif # LCD_CLASS_DEVICE
113 121
114# 122#
@@ -236,12 +244,12 @@ config BACKLIGHT_MAX8925
236 If you have a LCD backlight connected to the WLED output of MAX8925 244 If you have a LCD backlight connected to the WLED output of MAX8925
237 WLED output, say Y here to enable this driver. 245 WLED output, say Y here to enable this driver.
238 246
239config BACKLIGHT_MBP_NVIDIA 247config BACKLIGHT_APPLE
240 tristate "MacBook Pro Nvidia Backlight Driver" 248 tristate "Apple Backlight Driver"
241 depends on X86 249 depends on X86 && ACPI
242 help 250 help
243 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y 251 If you have an Intel-based Apple say Y to enable a driver for its
244 to enable a driver for its backlight 252 backlight.
245 253
246config BACKLIGHT_TOSA 254config BACKLIGHT_TOSA
247 tristate "Sharp SL-6000 Backlight Driver" 255 tristate "Sharp SL-6000 Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 44c0f81ad85d..b9ca8490df87 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o 12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o 13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
14obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o 14obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
15obj-$(CONFIG_LCD_LD9040) += ld9040.o
15 16
16obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 17obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
17obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o 18obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
@@ -26,7 +27,7 @@ obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
26obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o 27obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
27obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o 28obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
28obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o 29obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
29obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 30obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
30obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 31obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
31obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 32obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
32obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o 33obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 9f436e014f85..af3119707dbf 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -303,6 +303,7 @@ static int __devinit adp5520_bl_probe(struct platform_device *pdev)
303 mutex_init(&data->lock); 303 mutex_init(&data->lock);
304 304
305 memset(&props, 0, sizeof(struct backlight_properties)); 305 memset(&props, 0, sizeof(struct backlight_properties));
306 props.type = BACKLIGHT_RAW;
306 props.max_brightness = ADP5020_MAX_BRIGHTNESS; 307 props.max_brightness = ADP5020_MAX_BRIGHTNESS;
307 bl = backlight_device_register(pdev->name, data->master, data, 308 bl = backlight_device_register(pdev->name, data->master, data,
308 &adp5520_bl_ops, &props); 309 &adp5520_bl_ops, &props);
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 734c650a47c4..d2a96a421ffd 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -709,6 +709,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
709 i2c_set_clientdata(client, data); 709 i2c_set_clientdata(client, data);
710 710
711 memset(&props, 0, sizeof(props)); 711 memset(&props, 0, sizeof(props));
712 props.type = BACKLIGHT_RAW;
712 props.max_brightness = ADP8860_MAX_BRIGHTNESS; 713 props.max_brightness = ADP8860_MAX_BRIGHTNESS;
713 714
714 mutex_init(&data->lock); 715 mutex_init(&data->lock);
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index fe9af129c5dd..c861c41af442 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -104,6 +104,7 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
104 } 104 }
105 105
106 memset(&props, 0, sizeof(struct backlight_properties)); 106 memset(&props, 0, sizeof(struct backlight_properties));
107 props.type = BACKLIGHT_RAW;
107 props.max_brightness = 0xff; 108 props.max_brightness = 0xff;
108 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, 109 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
109 bl, &adx_backlight_ops, &props); 110 bl, &adx_backlight_ops, &props);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
new file mode 100644
index 000000000000..be98d152b7fd
--- /dev/null
+++ b/drivers/video/backlight/apple_bl.c
@@ -0,0 +1,241 @@
1/*
2 * Backlight Driver for Intel-based Apples
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Based on code from Pommed:
6 * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
7 * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
8 * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This driver triggers SMIs which cause the firmware to change the
15 * backlight brightness. This is icky in many ways, but it's impractical to
16 * get at the firmware code in order to figure out what it's actually doing.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/backlight.h>
23#include <linux/err.h>
24#include <linux/io.h>
25#include <linux/pci.h>
26#include <linux/acpi.h>
27
28static struct backlight_device *apple_backlight_device;
29
30struct hw_data {
31 /* I/O resource to allocate. */
32 unsigned long iostart;
33 unsigned long iolen;
34 /* Backlight operations structure. */
35 const struct backlight_ops backlight_ops;
36 void (*set_brightness)(int);
37};
38
39static const struct hw_data *hw_data;
40
41#define DRIVER "apple_backlight: "
42
43/* Module parameters. */
44static int debug;
45module_param_named(debug, debug, int, 0644);
46MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
47
48/*
49 * Implementation for machines with Intel chipset.
50 */
51static void intel_chipset_set_brightness(int intensity)
52{
53 outb(0x04 | (intensity << 4), 0xb3);
54 outb(0xbf, 0xb2);
55}
56
57static int intel_chipset_send_intensity(struct backlight_device *bd)
58{
59 int intensity = bd->props.brightness;
60
61 if (debug)
62 printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
63 intensity);
64
65 intel_chipset_set_brightness(intensity);
66 return 0;
67}
68
69static int intel_chipset_get_intensity(struct backlight_device *bd)
70{
71 int intensity;
72
73 outb(0x03, 0xb3);
74 outb(0xbf, 0xb2);
75 intensity = inb(0xb3) >> 4;
76
77 if (debug)
78 printk(KERN_DEBUG DRIVER "read brightness of %d\n",
79 intensity);
80
81 return intensity;
82}
83
84static const struct hw_data intel_chipset_data = {
85 .iostart = 0xb2,
86 .iolen = 2,
87 .backlight_ops = {
88 .options = BL_CORE_SUSPENDRESUME,
89 .get_brightness = intel_chipset_get_intensity,
90 .update_status = intel_chipset_send_intensity,
91 },
92 .set_brightness = intel_chipset_set_brightness,
93};
94
95/*
96 * Implementation for machines with Nvidia chipset.
97 */
98static void nvidia_chipset_set_brightness(int intensity)
99{
100 outb(0x04 | (intensity << 4), 0x52f);
101 outb(0xbf, 0x52e);
102}
103
104static int nvidia_chipset_send_intensity(struct backlight_device *bd)
105{
106 int intensity = bd->props.brightness;
107
108 if (debug)
109 printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
110 intensity);
111
112 nvidia_chipset_set_brightness(intensity);
113 return 0;
114}
115
116static int nvidia_chipset_get_intensity(struct backlight_device *bd)
117{
118 int intensity;
119
120 outb(0x03, 0x52f);
121 outb(0xbf, 0x52e);
122 intensity = inb(0x52f) >> 4;
123
124 if (debug)
125 printk(KERN_DEBUG DRIVER "read brightness of %d\n",
126 intensity);
127
128 return intensity;
129}
130
131static const struct hw_data nvidia_chipset_data = {
132 .iostart = 0x52e,
133 .iolen = 2,
134 .backlight_ops = {
135 .options = BL_CORE_SUSPENDRESUME,
136 .get_brightness = nvidia_chipset_get_intensity,
137 .update_status = nvidia_chipset_send_intensity
138 },
139 .set_brightness = nvidia_chipset_set_brightness,
140};
141
142static int __devinit apple_bl_add(struct acpi_device *dev)
143{
144 struct backlight_properties props;
145 struct pci_dev *host;
146 int intensity;
147
148 host = pci_get_bus_and_slot(0, 0);
149
150 if (!host) {
151 printk(KERN_ERR DRIVER "unable to find PCI host\n");
152 return -ENODEV;
153 }
154
155 if (host->vendor == PCI_VENDOR_ID_INTEL)
156 hw_data = &intel_chipset_data;
157 else if (host->vendor == PCI_VENDOR_ID_NVIDIA)
158 hw_data = &nvidia_chipset_data;
159
160 pci_dev_put(host);
161
162 if (!hw_data) {
163 printk(KERN_ERR DRIVER "unknown hardware\n");
164 return -ENODEV;
165 }
166
167 /* Check that the hardware responds - this may not work under EFI */
168
169 intensity = hw_data->backlight_ops.get_brightness(NULL);
170
171 if (!intensity) {
172 hw_data->set_brightness(1);
173 if (!hw_data->backlight_ops.get_brightness(NULL))
174 return -ENODEV;
175
176 hw_data->set_brightness(0);
177 }
178
179 if (!request_region(hw_data->iostart, hw_data->iolen,
180 "Apple backlight"))
181 return -ENXIO;
182
183 memset(&props, 0, sizeof(struct backlight_properties));
184 props.type = BACKLIGHT_PLATFORM;
185 props.max_brightness = 15;
186 apple_backlight_device = backlight_device_register("apple_backlight",
187 NULL, NULL, &hw_data->backlight_ops, &props);
188
189 if (IS_ERR(apple_backlight_device)) {
190 release_region(hw_data->iostart, hw_data->iolen);
191 return PTR_ERR(apple_backlight_device);
192 }
193
194 apple_backlight_device->props.brightness =
195 hw_data->backlight_ops.get_brightness(apple_backlight_device);
196 backlight_update_status(apple_backlight_device);
197
198 return 0;
199}
200
201static int __devexit apple_bl_remove(struct acpi_device *dev, int type)
202{
203 backlight_device_unregister(apple_backlight_device);
204
205 release_region(hw_data->iostart, hw_data->iolen);
206 hw_data = NULL;
207 return 0;
208}
209
210static const struct acpi_device_id apple_bl_ids[] = {
211 {"APP0002", 0},
212 {"", 0},
213};
214
215static struct acpi_driver apple_bl_driver = {
216 .name = "Apple backlight",
217 .ids = apple_bl_ids,
218 .ops = {
219 .add = apple_bl_add,
220 .remove = apple_bl_remove,
221 },
222};
223
224static int __init apple_bl_init(void)
225{
226 return acpi_bus_register_driver(&apple_bl_driver);
227}
228
229static void __exit apple_bl_exit(void)
230{
231 acpi_bus_unregister_driver(&apple_bl_driver);
232}
233
234module_init(apple_bl_init);
235module_exit(apple_bl_exit);
236
237MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
238MODULE_DESCRIPTION("Apple Backlight Driver");
239MODULE_LICENSE("GPL");
240MODULE_DEVICE_TABLE(acpi, apple_bl_ids);
241MODULE_ALIAS("mbp_nvidia_bl");
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index e6a66dab088c..0443a4f71858 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -168,6 +168,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
168 } 168 }
169 169
170 memset(&props, 0, sizeof(struct backlight_properties)); 170 memset(&props, 0, sizeof(struct backlight_properties));
171 props.type = BACKLIGHT_RAW;
171 props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; 172 props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
172 bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, 173 bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
173 &atmel_pwm_bl_ops, &props); 174 &atmel_pwm_bl_ops, &props);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 08703299ef61..80d292fb92d8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,6 +19,12 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware",
26};
27
22#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ 28#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
23 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) 29 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
24/* This callback gets called when something important happens inside a 30/* This callback gets called when something important happens inside a
@@ -169,6 +175,14 @@ static ssize_t backlight_store_brightness(struct device *dev,
169 return rc; 175 return rc;
170} 176}
171 177
178static ssize_t backlight_show_type(struct device *dev,
179 struct device_attribute *attr, char *buf)
180{
181 struct backlight_device *bd = to_backlight_device(dev);
182
183 return sprintf(buf, "%s\n", backlight_types[bd->props.type]);
184}
185
172static ssize_t backlight_show_max_brightness(struct device *dev, 186static ssize_t backlight_show_max_brightness(struct device *dev,
173 struct device_attribute *attr, char *buf) 187 struct device_attribute *attr, char *buf)
174{ 188{
@@ -234,6 +248,7 @@ static struct device_attribute bl_device_attributes[] = {
234 __ATTR(actual_brightness, 0444, backlight_show_actual_brightness, 248 __ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
235 NULL), 249 NULL),
236 __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL), 250 __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
251 __ATTR(type, 0444, backlight_show_type, NULL),
237 __ATTR_NULL, 252 __ATTR_NULL,
238}; 253};
239 254
@@ -292,9 +307,16 @@ struct backlight_device *backlight_device_register(const char *name,
292 dev_set_drvdata(&new_bd->dev, devdata); 307 dev_set_drvdata(&new_bd->dev, devdata);
293 308
294 /* Set default properties */ 309 /* Set default properties */
295 if (props) 310 if (props) {
296 memcpy(&new_bd->props, props, 311 memcpy(&new_bd->props, props,
297 sizeof(struct backlight_properties)); 312 sizeof(struct backlight_properties));
313 if (props->type <= 0 || props->type >= BACKLIGHT_TYPE_MAX) {
314 WARN(1, "%s: invalid backlight type", name);
315 new_bd->props.type = BACKLIGHT_RAW;
316 }
317 } else {
318 new_bd->props.type = BACKLIGHT_RAW;
319 }
298 320
299 rc = device_register(&new_bd->dev); 321 rc = device_register(&new_bd->dev);
300 if (rc) { 322 if (rc) {
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 1e71c35083bb..af6098396fe6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -562,6 +562,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
562 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; 562 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
563 563
564 memset(&props, 0, sizeof(struct backlight_properties)); 564 memset(&props, 0, sizeof(struct backlight_properties));
565 props.type = BACKLIGHT_RAW;
565 props.max_brightness = pdata->max_intensity; 566 props.max_brightness = pdata->max_intensity;
566 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd, 567 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
567 &corgi_bl_ops, &props); 568 &corgi_bl_ops, &props);
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 397d15eb1ea8..6c8c54041fae 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -193,6 +193,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
193 } 193 }
194 194
195 memset(&props, 0, sizeof(struct backlight_properties)); 195 memset(&props, 0, sizeof(struct backlight_properties));
196 props.type = BACKLIGHT_RAW;
196 bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL, 197 bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
197 &cr_backlight_ops, &props); 198 &cr_backlight_ops, &props);
198 if (IS_ERR(bdp)) { 199 if (IS_ERR(bdp)) {
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 87659ed79bd7..62043f12a5a4 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2, 136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
137 DA9034_WLED_ISET(pdata->output_current)); 137 DA9034_WLED_ISET(pdata->output_current));
138 138
139 props.type = BACKLIGHT_RAW;
139 props.max_brightness = max_brightness; 140 props.max_brightness = max_brightness;
140 bl = backlight_device_register(pdev->name, data->da903x_dev, data, 141 bl = backlight_device_register(pdev->name, data->da903x_dev, data,
141 &da903x_backlight_ops, &props); 142 &da903x_backlight_ops, &props);
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index b0cc49184803..9f1e389d51d2 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -87,6 +87,7 @@ static int __init ep93xxbl_probe(struct platform_device *dev)
87 ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS; 87 ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
88 88
89 memset(&props, 0, sizeof(struct backlight_properties)); 89 memset(&props, 0, sizeof(struct backlight_properties));
90 props.type = BACKLIGHT_RAW;
90 props.max_brightness = EP93XX_MAX_BRIGHT; 91 props.max_brightness = EP93XX_MAX_BRIGHT;
91 bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl, 92 bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
92 &ep93xxbl_ops, &props); 93 &ep93xxbl_ops, &props);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 312ca619735d..8c6befd65a33 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -91,6 +91,7 @@ static int genericbl_probe(struct platform_device *pdev)
91 name = machinfo->name; 91 name = machinfo->name;
92 92
93 memset(&props, 0, sizeof(struct backlight_properties)); 93 memset(&props, 0, sizeof(struct backlight_properties));
94 props.type = BACKLIGHT_RAW;
94 props.max_brightness = machinfo->max_intensity; 95 props.max_brightness = machinfo->max_intensity;
95 bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops, 96 bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
96 &props); 97 &props);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 267d23f8d645..38aa00272141 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -109,6 +109,7 @@ static int __devinit hp680bl_probe(struct platform_device *pdev)
109 struct backlight_device *bd; 109 struct backlight_device *bd;
110 110
111 memset(&props, 0, sizeof(struct backlight_properties)); 111 memset(&props, 0, sizeof(struct backlight_properties));
112 props.type = BACKLIGHT_RAW;
112 props.max_brightness = HP680_MAX_INTENSITY; 113 props.max_brightness = HP680_MAX_INTENSITY;
113 bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, 114 bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
114 &hp680bl_ops, &props); 115 &hp680bl_ops, &props);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f177b3a4885..de65d80159be 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -106,6 +106,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
106 struct backlight_device *bd; 106 struct backlight_device *bd;
107 107
108 memset(&props, 0, sizeof(struct backlight_properties)); 108 memset(&props, 0, sizeof(struct backlight_properties));
109 props.type = BACKLIGHT_RAW;
109 props.max_brightness = BL_MAX_BRIGHT; 110 props.max_brightness = BL_MAX_BRIGHT;
110 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, 111 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
111 &jornada_bl_ops, &props); 112 &jornada_bl_ops, &props);
@@ -146,12 +147,12 @@ static struct platform_driver jornada_bl_driver = {
146 }, 147 },
147}; 148};
148 149
149int __init jornada_bl_init(void) 150static int __init jornada_bl_init(void)
150{ 151{
151 return platform_driver_register(&jornada_bl_driver); 152 return platform_driver_register(&jornada_bl_driver);
152} 153}
153 154
154void __exit jornada_bl_exit(void) 155static void __exit jornada_bl_exit(void)
155{ 156{
156 platform_driver_unregister(&jornada_bl_driver); 157 platform_driver_unregister(&jornada_bl_driver);
157} 158}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index cbbb167fd268..d2ff658b4144 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,12 +135,12 @@ static struct platform_driver jornada_lcd_driver = {
135 }, 135 },
136}; 136};
137 137
138int __init jornada_lcd_init(void) 138static int __init jornada_lcd_init(void)
139{ 139{
140 return platform_driver_register(&jornada_lcd_driver); 140 return platform_driver_register(&jornada_lcd_driver);
141} 141}
142 142
143void __exit jornada_lcd_exit(void) 143static void __exit jornada_lcd_exit(void)
144{ 144{
145 platform_driver_unregister(&jornada_lcd_driver); 145 platform_driver_unregister(&jornada_lcd_driver);
146} 146}
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index f439a8632287..72dd5556a35b 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -149,6 +149,7 @@ static int kb3886bl_probe(struct platform_device *pdev)
149 machinfo->limit_mask = -1; 149 machinfo->limit_mask = -1;
150 150
151 memset(&props, 0, sizeof(struct backlight_properties)); 151 memset(&props, 0, sizeof(struct backlight_properties));
152 props.type = BACKLIGHT_RAW;
152 props.max_brightness = machinfo->max_intensity; 153 props.max_brightness = machinfo->max_intensity;
153 kb3886_backlight_device = backlight_device_register("kb3886-bl", 154 kb3886_backlight_device = backlight_device_register("kb3886-bl",
154 &pdev->dev, NULL, 155 &pdev->dev, NULL,
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
new file mode 100644
index 000000000000..7281b2506a67
--- /dev/null
+++ b/drivers/video/backlight/ld9040.c
@@ -0,0 +1,819 @@
1/*
2 * ld9040 AMOLED LCD panel driver.
3 *
4 * Copyright (c) 2011 Samsung Electronics
5 * Author: Donghwa Lee <dh09.lee@samsung.com>
6 * Derived from drivers/video/backlight/s6e63m0.c
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/wait.h>
24#include <linux/fb.h>
25#include <linux/delay.h>
26#include <linux/gpio.h>
27#include <linux/spi/spi.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
30#include <linux/kernel.h>
31#include <linux/lcd.h>
32#include <linux/backlight.h>
33
34#include "ld9040_gamma.h"
35
36#define SLEEPMSEC 0x1000
37#define ENDDEF 0x2000
38#define DEFMASK 0xFF00
39#define COMMAND_ONLY 0xFE
40#define DATA_ONLY 0xFF
41
42#define MIN_BRIGHTNESS 0
43#define MAX_BRIGHTNESS 24
44#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
45
46struct ld9040 {
47 struct device *dev;
48 struct spi_device *spi;
49 unsigned int power;
50 unsigned int current_brightness;
51
52 struct lcd_device *ld;
53 struct backlight_device *bd;
54 struct lcd_platform_data *lcd_pd;
55};
56
57static const unsigned short seq_swreset[] = {
58 0x01, COMMAND_ONLY,
59 ENDDEF, 0x00
60};
61
62static const unsigned short seq_user_setting[] = {
63 0xF0, 0x5A,
64
65 DATA_ONLY, 0x5A,
66 ENDDEF, 0x00
67};
68
69static const unsigned short seq_elvss_on[] = {
70 0xB1, 0x0D,
71
72 DATA_ONLY, 0x00,
73 DATA_ONLY, 0x16,
74 ENDDEF, 0x00
75};
76
77static const unsigned short seq_gtcon[] = {
78 0xF7, 0x09,
79
80 DATA_ONLY, 0x00,
81 DATA_ONLY, 0x00,
82 ENDDEF, 0x00
83};
84
85static const unsigned short seq_panel_condition[] = {
86 0xF8, 0x05,
87
88 DATA_ONLY, 0x65,
89 DATA_ONLY, 0x96,
90 DATA_ONLY, 0x71,
91 DATA_ONLY, 0x7D,
92 DATA_ONLY, 0x19,
93 DATA_ONLY, 0x3B,
94 DATA_ONLY, 0x0D,
95 DATA_ONLY, 0x19,
96 DATA_ONLY, 0x7E,
97 DATA_ONLY, 0x0D,
98 DATA_ONLY, 0xE2,
99 DATA_ONLY, 0x00,
100 DATA_ONLY, 0x00,
101 DATA_ONLY, 0x7E,
102 DATA_ONLY, 0x7D,
103 DATA_ONLY, 0x07,
104 DATA_ONLY, 0x07,
105 DATA_ONLY, 0x20,
106 DATA_ONLY, 0x20,
107 DATA_ONLY, 0x20,
108 DATA_ONLY, 0x02,
109 DATA_ONLY, 0x02,
110 ENDDEF, 0x00
111};
112
113static const unsigned short seq_gamma_set1[] = {
114 0xF9, 0x00,
115
116 DATA_ONLY, 0xA7,
117 DATA_ONLY, 0xB4,
118 DATA_ONLY, 0xAE,
119 DATA_ONLY, 0xBF,
120 DATA_ONLY, 0x00,
121 DATA_ONLY, 0x91,
122 DATA_ONLY, 0x00,
123 DATA_ONLY, 0xB2,
124 DATA_ONLY, 0xB4,
125 DATA_ONLY, 0xAA,
126 DATA_ONLY, 0xBB,
127 DATA_ONLY, 0x00,
128 DATA_ONLY, 0xAC,
129 DATA_ONLY, 0x00,
130 DATA_ONLY, 0xB3,
131 DATA_ONLY, 0xB1,
132 DATA_ONLY, 0xAA,
133 DATA_ONLY, 0xBC,
134 DATA_ONLY, 0x00,
135 DATA_ONLY, 0xB3,
136 ENDDEF, 0x00
137};
138
139static const unsigned short seq_gamma_ctrl[] = {
140 0xFB, 0x02,
141
142 DATA_ONLY, 0x5A,
143 ENDDEF, 0x00
144};
145
146static const unsigned short seq_gamma_start[] = {
147 0xF9, COMMAND_ONLY,
148
149 ENDDEF, 0x00
150};
151
152static const unsigned short seq_apon[] = {
153 0xF3, 0x00,
154
155 DATA_ONLY, 0x00,
156 DATA_ONLY, 0x00,
157 DATA_ONLY, 0x0A,
158 DATA_ONLY, 0x02,
159 ENDDEF, 0x00
160};
161
162static const unsigned short seq_display_ctrl[] = {
163 0xF2, 0x02,
164
165 DATA_ONLY, 0x08,
166 DATA_ONLY, 0x08,
167 DATA_ONLY, 0x10,
168 DATA_ONLY, 0x10,
169 ENDDEF, 0x00
170};
171
172static const unsigned short seq_manual_pwr[] = {
173 0xB0, 0x04,
174 ENDDEF, 0x00
175};
176
177static const unsigned short seq_pwr_ctrl[] = {
178 0xF4, 0x0A,
179
180 DATA_ONLY, 0x87,
181 DATA_ONLY, 0x25,
182 DATA_ONLY, 0x6A,
183 DATA_ONLY, 0x44,
184 DATA_ONLY, 0x02,
185 DATA_ONLY, 0x88,
186 ENDDEF, 0x00
187};
188
189static const unsigned short seq_sleep_out[] = {
190 0x11, COMMAND_ONLY,
191 ENDDEF, 0x00
192};
193
194static const unsigned short seq_sleep_in[] = {
195 0x10, COMMAND_ONLY,
196 ENDDEF, 0x00
197};
198
199static const unsigned short seq_display_on[] = {
200 0x29, COMMAND_ONLY,
201 ENDDEF, 0x00
202};
203
204static const unsigned short seq_display_off[] = {
205 0x28, COMMAND_ONLY,
206 ENDDEF, 0x00
207};
208
209static const unsigned short seq_vci1_1st_en[] = {
210 0xF3, 0x10,
211
212 DATA_ONLY, 0x00,
213 DATA_ONLY, 0x00,
214 DATA_ONLY, 0x00,
215 DATA_ONLY, 0x02,
216 ENDDEF, 0x00
217};
218
219static const unsigned short seq_vl1_en[] = {
220 0xF3, 0x11,
221
222 DATA_ONLY, 0x00,
223 DATA_ONLY, 0x00,
224 DATA_ONLY, 0x00,
225 DATA_ONLY, 0x02,
226 ENDDEF, 0x00
227};
228
229static const unsigned short seq_vl2_en[] = {
230 0xF3, 0x13,
231
232 DATA_ONLY, 0x00,
233 DATA_ONLY, 0x00,
234 DATA_ONLY, 0x00,
235 DATA_ONLY, 0x02,
236 ENDDEF, 0x00
237};
238
239static const unsigned short seq_vci1_2nd_en[] = {
240 0xF3, 0x33,
241
242 DATA_ONLY, 0x00,
243 DATA_ONLY, 0x00,
244 DATA_ONLY, 0x00,
245 DATA_ONLY, 0x02,
246 ENDDEF, 0x00
247};
248
249static const unsigned short seq_vl3_en[] = {
250 0xF3, 0x37,
251
252 DATA_ONLY, 0x00,
253 DATA_ONLY, 0x00,
254 DATA_ONLY, 0x00,
255 DATA_ONLY, 0x02,
256 ENDDEF, 0x00
257};
258
259static const unsigned short seq_vreg1_amp_en[] = {
260 0xF3, 0x37,
261
262 DATA_ONLY, 0x01,
263 DATA_ONLY, 0x00,
264 DATA_ONLY, 0x00,
265 DATA_ONLY, 0x02,
266 ENDDEF, 0x00
267};
268
269static const unsigned short seq_vgh_amp_en[] = {
270 0xF3, 0x37,
271
272 DATA_ONLY, 0x11,
273 DATA_ONLY, 0x00,
274 DATA_ONLY, 0x00,
275 DATA_ONLY, 0x02,
276 ENDDEF, 0x00
277};
278
279static const unsigned short seq_vgl_amp_en[] = {
280 0xF3, 0x37,
281
282 DATA_ONLY, 0x31,
283 DATA_ONLY, 0x00,
284 DATA_ONLY, 0x00,
285 DATA_ONLY, 0x02,
286 ENDDEF, 0x00
287};
288
289static const unsigned short seq_vmos_amp_en[] = {
290 0xF3, 0x37,
291
292 DATA_ONLY, 0xB1,
293 DATA_ONLY, 0x00,
294 DATA_ONLY, 0x00,
295 DATA_ONLY, 0x03,
296 ENDDEF, 0x00
297};
298
299static const unsigned short seq_vint_amp_en[] = {
300 0xF3, 0x37,
301
302 DATA_ONLY, 0xF1,
303 /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
304 DATA_ONLY, 0x00,
305 DATA_ONLY, 0x00,
306 DATA_ONLY, 0x03,
307 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
308 ENDDEF, 0x00
309};
310
311static const unsigned short seq_vbh_amp_en[] = {
312 0xF3, 0x37,
313
314 DATA_ONLY, 0xF9,
315 DATA_ONLY, 0x00,
316 DATA_ONLY, 0x00,
317 DATA_ONLY, 0x03,
318 ENDDEF, 0x00
319};
320
321static const unsigned short seq_vbl_amp_en[] = {
322 0xF3, 0x37,
323
324 DATA_ONLY, 0xFD,
325 DATA_ONLY, 0x00,
326 DATA_ONLY, 0x00,
327 DATA_ONLY, 0x03,
328 ENDDEF, 0x00
329};
330
331static const unsigned short seq_gam_amp_en[] = {
332 0xF3, 0x37,
333
334 DATA_ONLY, 0xFF,
335 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
336 DATA_ONLY, 0x00,
337 DATA_ONLY, 0x00,
338 DATA_ONLY, 0x03,
339 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
340 ENDDEF, 0x00
341};
342
343static const unsigned short seq_sd_amp_en[] = {
344 0xF3, 0x37,
345
346 DATA_ONLY, 0xFF,
347 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
348 DATA_ONLY, 0x80,
349 DATA_ONLY, 0x00,
350 DATA_ONLY, 0x03,
351 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
352 ENDDEF, 0x00
353};
354
355static const unsigned short seq_gls_en[] = {
356 0xF3, 0x37,
357
358 DATA_ONLY, 0xFF,
359 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
360 DATA_ONLY, 0x81,
361 DATA_ONLY, 0x00,
362 DATA_ONLY, 0x03,
363 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
364 ENDDEF, 0x00
365};
366
367static const unsigned short seq_els_en[] = {
368 0xF3, 0x37,
369
370 DATA_ONLY, 0xFF,
371 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
372 DATA_ONLY, 0x83,
373 DATA_ONLY, 0x00,
374 DATA_ONLY, 0x03,
375 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
376 ENDDEF, 0x00
377};
378
379static const unsigned short seq_el_on[] = {
380 0xF3, 0x37,
381
382 DATA_ONLY, 0xFF,
383 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
384 DATA_ONLY, 0x87,
385 DATA_ONLY, 0x00,
386 DATA_ONLY, 0x03,
387 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
388 ENDDEF, 0x00
389};
390
391static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
392{
393 u16 buf[1];
394 struct spi_message msg;
395
396 struct spi_transfer xfer = {
397 .len = 2,
398 .tx_buf = buf,
399 };
400
401 buf[0] = (addr << 8) | data;
402
403 spi_message_init(&msg);
404 spi_message_add_tail(&xfer, &msg);
405
406 return spi_sync(lcd->spi, &msg);
407}
408
409static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
410 unsigned char command)
411{
412 int ret = 0;
413
414 if (address != DATA_ONLY)
415 ret = ld9040_spi_write_byte(lcd, 0x0, address);
416 if (command != COMMAND_ONLY)
417 ret = ld9040_spi_write_byte(lcd, 0x1, command);
418
419 return ret;
420}
421
422static int ld9040_panel_send_sequence(struct ld9040 *lcd,
423 const unsigned short *wbuf)
424{
425 int ret = 0, i = 0;
426
427 while ((wbuf[i] & DEFMASK) != ENDDEF) {
428 if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
429 ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
430 if (ret)
431 break;
432 } else
433 udelay(wbuf[i+1]*1000);
434 i += 2;
435 }
436
437 return ret;
438}
439
440static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
441{
442 unsigned int i = 0;
443 int ret = 0;
444
445 /* start gamma table updating. */
446 ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
447 if (ret) {
448 dev_err(lcd->dev, "failed to disable gamma table updating.\n");
449 goto gamma_err;
450 }
451
452 for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
453 ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
454 if (ret) {
455 dev_err(lcd->dev, "failed to set gamma table.\n");
456 goto gamma_err;
457 }
458 }
459
460 /* update gamma table. */
461 ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
462 if (ret)
463 dev_err(lcd->dev, "failed to update gamma table.\n");
464
465gamma_err:
466 return ret;
467}
468
469static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
470{
471 int ret = 0;
472
473 ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
474
475 return ret;
476}
477
478
479static int ld9040_ldi_init(struct ld9040 *lcd)
480{
481 int ret, i;
482 static const unsigned short *init_seq[] = {
483 seq_user_setting,
484 seq_panel_condition,
485 seq_display_ctrl,
486 seq_manual_pwr,
487 seq_elvss_on,
488 seq_gtcon,
489 seq_gamma_set1,
490 seq_gamma_ctrl,
491 seq_sleep_out,
492 };
493
494 for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
495 ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
496 /* workaround: minimum delay time for transferring CMD */
497 udelay(300);
498 if (ret)
499 break;
500 }
501
502 return ret;
503}
504
505static int ld9040_ldi_enable(struct ld9040 *lcd)
506{
507 int ret = 0;
508
509 ret = ld9040_panel_send_sequence(lcd, seq_display_on);
510
511 return ret;
512}
513
514static int ld9040_ldi_disable(struct ld9040 *lcd)
515{
516 int ret;
517
518 ret = ld9040_panel_send_sequence(lcd, seq_display_off);
519 ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
520
521 return ret;
522}
523
524static int ld9040_power_on(struct ld9040 *lcd)
525{
526 int ret = 0;
527 struct lcd_platform_data *pd = NULL;
528 pd = lcd->lcd_pd;
529 if (!pd) {
530 dev_err(lcd->dev, "platform data is NULL.\n");
531 return -EFAULT;
532 }
533
534 if (!pd->power_on) {
535 dev_err(lcd->dev, "power_on is NULL.\n");
536 return -EFAULT;
537 } else {
538 pd->power_on(lcd->ld, 1);
539 mdelay(pd->power_on_delay);
540 }
541
542 if (!pd->reset) {
543 dev_err(lcd->dev, "reset is NULL.\n");
544 return -EFAULT;
545 } else {
546 pd->reset(lcd->ld);
547 mdelay(pd->reset_delay);
548 }
549
550 ret = ld9040_ldi_init(lcd);
551 if (ret) {
552 dev_err(lcd->dev, "failed to initialize ldi.\n");
553 return ret;
554 }
555
556 ret = ld9040_ldi_enable(lcd);
557 if (ret) {
558 dev_err(lcd->dev, "failed to enable ldi.\n");
559 return ret;
560 }
561
562 return 0;
563}
564
565static int ld9040_power_off(struct ld9040 *lcd)
566{
567 int ret = 0;
568 struct lcd_platform_data *pd = NULL;
569
570 pd = lcd->lcd_pd;
571 if (!pd) {
572 dev_err(lcd->dev, "platform data is NULL.\n");
573 return -EFAULT;
574 }
575
576 ret = ld9040_ldi_disable(lcd);
577 if (ret) {
578 dev_err(lcd->dev, "lcd setting failed.\n");
579 return -EIO;
580 }
581
582 mdelay(pd->power_off_delay);
583
584 if (!pd->power_on) {
585 dev_err(lcd->dev, "power_on is NULL.\n");
586 return -EFAULT;
587 } else
588 pd->power_on(lcd->ld, 0);
589
590 return 0;
591}
592
593static int ld9040_power(struct ld9040 *lcd, int power)
594{
595 int ret = 0;
596
597 if (power_is_on(power) && !power_is_on(lcd->power))
598 ret = ld9040_power_on(lcd);
599 else if (!power_is_on(power) && power_is_on(lcd->power))
600 ret = ld9040_power_off(lcd);
601
602 if (!ret)
603 lcd->power = power;
604
605 return ret;
606}
607
608static int ld9040_set_power(struct lcd_device *ld, int power)
609{
610 struct ld9040 *lcd = lcd_get_data(ld);
611
612 if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
613 power != FB_BLANK_NORMAL) {
614 dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
615 return -EINVAL;
616 }
617
618 return ld9040_power(lcd, power);
619}
620
621static int ld9040_get_power(struct lcd_device *ld)
622{
623 struct ld9040 *lcd = lcd_get_data(ld);
624
625 return lcd->power;
626}
627
628static int ld9040_get_brightness(struct backlight_device *bd)
629{
630 return bd->props.brightness;
631}
632
633static int ld9040_set_brightness(struct backlight_device *bd)
634{
635 int ret = 0, brightness = bd->props.brightness;
636 struct ld9040 *lcd = bl_get_data(bd);
637
638 if (brightness < MIN_BRIGHTNESS ||
639 brightness > bd->props.max_brightness) {
640 dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
641 MIN_BRIGHTNESS, MAX_BRIGHTNESS);
642 return -EINVAL;
643 }
644
645 ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
646 if (ret) {
647 dev_err(&bd->dev, "lcd brightness setting failed.\n");
648 return -EIO;
649 }
650
651 return ret;
652}
653
654static struct lcd_ops ld9040_lcd_ops = {
655 .set_power = ld9040_set_power,
656 .get_power = ld9040_get_power,
657};
658
659static const struct backlight_ops ld9040_backlight_ops = {
660 .get_brightness = ld9040_get_brightness,
661 .update_status = ld9040_set_brightness,
662};
663
664
665static int ld9040_probe(struct spi_device *spi)
666{
667 int ret = 0;
668 struct ld9040 *lcd = NULL;
669 struct lcd_device *ld = NULL;
670 struct backlight_device *bd = NULL;
671
672 lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
673 if (!lcd)
674 return -ENOMEM;
675
676 /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
677 spi->bits_per_word = 9;
678
679 ret = spi_setup(spi);
680 if (ret < 0) {
681 dev_err(&spi->dev, "spi setup failed.\n");
682 goto out_free_lcd;
683 }
684
685 lcd->spi = spi;
686 lcd->dev = &spi->dev;
687
688 lcd->lcd_pd = spi->dev.platform_data;
689 if (!lcd->lcd_pd) {
690 dev_err(&spi->dev, "platform data is NULL.\n");
691 goto out_free_lcd;
692 }
693
694 ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
695 if (IS_ERR(ld)) {
696 ret = PTR_ERR(ld);
697 goto out_free_lcd;
698 }
699
700 lcd->ld = ld;
701
702 bd = backlight_device_register("ld9040-bl", &spi->dev,
703 lcd, &ld9040_backlight_ops, NULL);
704 if (IS_ERR(ld)) {
705 ret = PTR_ERR(ld);
706 goto out_free_lcd;
707 }
708
709 bd->props.max_brightness = MAX_BRIGHTNESS;
710 bd->props.brightness = MAX_BRIGHTNESS;
711 lcd->bd = bd;
712
713 /*
714 * if lcd panel was on from bootloader like u-boot then
715 * do not lcd on.
716 */
717 if (!lcd->lcd_pd->lcd_enabled) {
718 /*
719 * if lcd panel was off from bootloader then
720 * current lcd status is powerdown and then
721 * it enables lcd panel.
722 */
723 lcd->power = FB_BLANK_POWERDOWN;
724
725 ld9040_power(lcd, FB_BLANK_UNBLANK);
726 } else
727 lcd->power = FB_BLANK_UNBLANK;
728
729 dev_set_drvdata(&spi->dev, lcd);
730
731 dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
732 return 0;
733
734out_free_lcd:
735 kfree(lcd);
736 return ret;
737}
738
739static int __devexit ld9040_remove(struct spi_device *spi)
740{
741 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
742
743 ld9040_power(lcd, FB_BLANK_POWERDOWN);
744 lcd_device_unregister(lcd->ld);
745 kfree(lcd);
746
747 return 0;
748}
749
750#if defined(CONFIG_PM)
751static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
752{
753 int ret = 0;
754 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
755
756 dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
757
758 /*
759 * when lcd panel is suspend, lcd panel becomes off
760 * regardless of status.
761 */
762 ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
763
764 return ret;
765}
766
767static int ld9040_resume(struct spi_device *spi)
768{
769 int ret = 0;
770 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
771
772 lcd->power = FB_BLANK_POWERDOWN;
773
774 ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
775
776 return ret;
777}
778#else
779#define ld9040_suspend NULL
780#define ld9040_resume NULL
781#endif
782
783/* Power down all displays on reboot, poweroff or halt. */
784static void ld9040_shutdown(struct spi_device *spi)
785{
786 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
787
788 ld9040_power(lcd, FB_BLANK_POWERDOWN);
789}
790
791static struct spi_driver ld9040_driver = {
792 .driver = {
793 .name = "ld9040",
794 .bus = &spi_bus_type,
795 .owner = THIS_MODULE,
796 },
797 .probe = ld9040_probe,
798 .remove = __devexit_p(ld9040_remove),
799 .shutdown = ld9040_shutdown,
800 .suspend = ld9040_suspend,
801 .resume = ld9040_resume,
802};
803
804static int __init ld9040_init(void)
805{
806 return spi_register_driver(&ld9040_driver);
807}
808
809static void __exit ld9040_exit(void)
810{
811 spi_unregister_driver(&ld9040_driver);
812}
813
814module_init(ld9040_init);
815module_exit(ld9040_exit);
816
817MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
818MODULE_DESCRIPTION("ld9040 LCD Driver");
819MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
new file mode 100644
index 000000000000..038d9c86ec03
--- /dev/null
+++ b/drivers/video/backlight/ld9040_gamma.h
@@ -0,0 +1,200 @@
1/*
2 * Gamma level definitions.
3 *
4 * Copyright (c) 2011 Samsung Electronics
5 * InKi Dae <inki.dae@samsung.com>
6 * Donghwa Lee <dh09.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef _LD9040_BRIGHTNESS_H
14#define _LD9040_BRIGHTNESS_H
15
16#define MAX_GAMMA_LEVEL 25
17#define GAMMA_TABLE_COUNT 21
18
19/* gamma value: 2.2 */
20static const unsigned int ld9040_22_300[] = {
21 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
22 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
23 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
24};
25
26static const unsigned int ld9040_22_290[] = {
27 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
28 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
29 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
30};
31
32static const unsigned int ld9040_22_280[] = {
33 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
34 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
35 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
36};
37
38static const unsigned int ld9040_22_270[] = {
39 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
40 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
41 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
42
43};
44static const unsigned int ld9040_22_260[] = {
45 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
46 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
47 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
48};
49
50static const unsigned int ld9040_22_250[] = {
51 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
52 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
53 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
54};
55
56static const unsigned int ld9040_22_240[] = {
57 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
58 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
59 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
60};
61
62static const unsigned int ld9040_22_230[] = {
63 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
64 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
65 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
66};
67
68static const unsigned int ld9040_22_220[] = {
69 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
70 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
71 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
72};
73
74static const unsigned int ld9040_22_210[] = {
75 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
76 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
77 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
78};
79
80static const unsigned int ld9040_22_200[] = {
81 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
82 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
83 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
84};
85
86static const unsigned int ld9040_22_190[] = {
87 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
88 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
89 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
90};
91
92static const unsigned int ld9040_22_180[] = {
93 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
94 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
95 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
96};
97
98static const unsigned int ld9040_22_170[] = {
99 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
100 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
101 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
102};
103
104static const unsigned int ld9040_22_160[] = {
105 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
106 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
107 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
108};
109
110static const unsigned int ld9040_22_150[] = {
111 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
112 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
113 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
114};
115
116static const unsigned int ld9040_22_140[] = {
117 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
118 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
119 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
120};
121
122static const unsigned int ld9040_22_130[] = {
123 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
124 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
125 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
126};
127
128static const unsigned int ld9040_22_120[] = {
129 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
130 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
131 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
132};
133
134static const unsigned int ld9040_22_110[] = {
135 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
136 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
137 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
138};
139
140static const unsigned int ld9040_22_100[] = {
141 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
142 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
143 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
144};
145
146static const unsigned int ld9040_22_90[] = {
147 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
148 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
149 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
150};
151
152static const unsigned int ld9040_22_80[] = {
153 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
154 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
155 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
156};
157
158static const unsigned int ld9040_22_70[] = {
159 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
160 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
161 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
162};
163
164static const unsigned int ld9040_22_50[] = {
165 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
166 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
167 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
168};
169
170struct ld9040_gamma {
171 unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
172} gamma_table = {
173 .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
174 .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
175 .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
176 .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
177 .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
178 .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
179 .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
180 .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
181 .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
182 .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
183 .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
184 .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
185 .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
186 .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
187 .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
188 .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
189 .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
190 .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
191 .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
192 .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
193 .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
194 .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
195 .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
196 .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
197 .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
198};
199
200#endif
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index d2f59015d517..bbca3127071e 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -184,6 +184,7 @@ static int locomolcd_probe(struct locomo_dev *ldev)
184 local_irq_restore(flags); 184 local_irq_restore(flags);
185 185
186 memset(&props, 0, sizeof(struct backlight_properties)); 186 memset(&props, 0, sizeof(struct backlight_properties));
187 props.type = BACKLIGHT_RAW;
187 props.max_brightness = 4; 188 props.max_brightness = 4;
188 locomolcd_bl_device = backlight_device_register("locomo-bl", 189 locomolcd_bl_device = backlight_device_register("locomo-bl",
189 &ldev->dev, NULL, 190 &ldev->dev, NULL,
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 209acc105cbc..07e8e273ced0 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -136,6 +136,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
136 data->current_brightness = 0; 136 data->current_brightness = 0;
137 137
138 memset(&props, 0, sizeof(struct backlight_properties)); 138 memset(&props, 0, sizeof(struct backlight_properties));
139 props.type = BACKLIGHT_RAW;
139 props.max_brightness = MAX_BRIGHTNESS; 140 props.max_brightness = MAX_BRIGHTNESS;
140 bl = backlight_device_register(name, &pdev->dev, data, 141 bl = backlight_device_register(name, &pdev->dev, data,
141 &max8925_backlight_ops, &props); 142 &max8925_backlight_ops, &props);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
deleted file mode 100644
index 1485f7345f49..000000000000
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Backlight Driver for Nvidia 8600 in Macbook Pro
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Based on code from Pommed:
6 * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
7 * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
8 * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This driver triggers SMIs which cause the firmware to change the
15 * backlight brightness. This is icky in many ways, but it's impractical to
16 * get at the firmware code in order to figure out what it's actually doing.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/backlight.h>
24#include <linux/err.h>
25#include <linux/dmi.h>
26#include <linux/io.h>
27
28static struct backlight_device *mbp_backlight_device;
29
30/* Structure to be passed to the DMI_MATCH function. */
31struct dmi_match_data {
32 /* I/O resource to allocate. */
33 unsigned long iostart;
34 unsigned long iolen;
35 /* Backlight operations structure. */
36 const struct backlight_ops backlight_ops;
37};
38
39/* Module parameters. */
40static int debug;
41module_param_named(debug, debug, int, 0644);
42MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
43
44/*
45 * Implementation for MacBooks with Intel chipset.
46 */
47static int intel_chipset_send_intensity(struct backlight_device *bd)
48{
49 int intensity = bd->props.brightness;
50
51 if (debug)
52 printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
53 intensity);
54
55 outb(0x04 | (intensity << 4), 0xb3);
56 outb(0xbf, 0xb2);
57 return 0;
58}
59
60static int intel_chipset_get_intensity(struct backlight_device *bd)
61{
62 int intensity;
63
64 outb(0x03, 0xb3);
65 outb(0xbf, 0xb2);
66 intensity = inb(0xb3) >> 4;
67
68 if (debug)
69 printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
70 intensity);
71
72 return intensity;
73}
74
75static const struct dmi_match_data intel_chipset_data = {
76 .iostart = 0xb2,
77 .iolen = 2,
78 .backlight_ops = {
79 .options = BL_CORE_SUSPENDRESUME,
80 .get_brightness = intel_chipset_get_intensity,
81 .update_status = intel_chipset_send_intensity,
82 }
83};
84
85/*
86 * Implementation for MacBooks with Nvidia chipset.
87 */
88static int nvidia_chipset_send_intensity(struct backlight_device *bd)
89{
90 int intensity = bd->props.brightness;
91
92 if (debug)
93 printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
94 intensity);
95
96 outb(0x04 | (intensity << 4), 0x52f);
97 outb(0xbf, 0x52e);
98 return 0;
99}
100
101static int nvidia_chipset_get_intensity(struct backlight_device *bd)
102{
103 int intensity;
104
105 outb(0x03, 0x52f);
106 outb(0xbf, 0x52e);
107 intensity = inb(0x52f) >> 4;
108
109 if (debug)
110 printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
111 intensity);
112
113 return intensity;
114}
115
116static const struct dmi_match_data nvidia_chipset_data = {
117 .iostart = 0x52e,
118 .iolen = 2,
119 .backlight_ops = {
120 .options = BL_CORE_SUSPENDRESUME,
121 .get_brightness = nvidia_chipset_get_intensity,
122 .update_status = nvidia_chipset_send_intensity
123 }
124};
125
126/*
127 * DMI matching.
128 */
129static /* const */ struct dmi_match_data *driver_data;
130
131static int mbp_dmi_match(const struct dmi_system_id *id)
132{
133 driver_data = id->driver_data;
134
135 printk(KERN_INFO "mbp_nvidia_bl: %s detected\n", id->ident);
136 return 1;
137}
138
139static const struct dmi_system_id __initdata mbp_device_table[] = {
140 {
141 .callback = mbp_dmi_match,
142 .ident = "MacBook 1,1",
143 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
145 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
146 },
147 .driver_data = (void *)&intel_chipset_data,
148 },
149 {
150 .callback = mbp_dmi_match,
151 .ident = "MacBook 2,1",
152 .matches = {
153 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
154 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
155 },
156 .driver_data = (void *)&intel_chipset_data,
157 },
158 {
159 .callback = mbp_dmi_match,
160 .ident = "MacBook 3,1",
161 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
163 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
164 },
165 .driver_data = (void *)&intel_chipset_data,
166 },
167 {
168 .callback = mbp_dmi_match,
169 .ident = "MacBook 4,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
178 .ident = "MacBook 4,2",
179 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
181 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
182 },
183 .driver_data = (void *)&intel_chipset_data,
184 },
185 {
186 .callback = mbp_dmi_match,
187 .ident = "MacBookPro 1,1",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
191 },
192 .driver_data = (void *)&intel_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
196 .ident = "MacBookPro 1,2",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
199 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
200 },
201 .driver_data = (void *)&intel_chipset_data,
202 },
203 {
204 .callback = mbp_dmi_match,
205 .ident = "MacBookPro 2,1",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
208 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
209 },
210 .driver_data = (void *)&intel_chipset_data,
211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 2,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
218 },
219 .driver_data = (void *)&intel_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 3,1",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
227 },
228 .driver_data = (void *)&intel_chipset_data,
229 },
230 {
231 .callback = mbp_dmi_match,
232 .ident = "MacBookPro 3,2",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
235 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,2"),
236 },
237 .driver_data = (void *)&intel_chipset_data,
238 },
239 {
240 .callback = mbp_dmi_match,
241 .ident = "MacBookPro 4,1",
242 .matches = {
243 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
244 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4,1"),
245 },
246 .driver_data = (void *)&intel_chipset_data,
247 },
248 {
249 .callback = mbp_dmi_match,
250 .ident = "MacBookAir 1,1",
251 .matches = {
252 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
253 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
254 },
255 .driver_data = (void *)&intel_chipset_data,
256 },
257 {
258 .callback = mbp_dmi_match,
259 .ident = "MacBook 5,1",
260 .matches = {
261 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
262 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,1"),
263 },
264 .driver_data = (void *)&nvidia_chipset_data,
265 },
266 {
267 .callback = mbp_dmi_match,
268 .ident = "MacBook 5,2",
269 .matches = {
270 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
271 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
272 },
273 .driver_data = (void *)&nvidia_chipset_data,
274 },
275 {
276 .callback = mbp_dmi_match,
277 .ident = "MacBook 6,1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
280 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
281 },
282 .driver_data = (void *)&nvidia_chipset_data,
283 },
284 {
285 .callback = mbp_dmi_match,
286 .ident = "MacBookAir 2,1",
287 .matches = {
288 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
289 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2,1"),
290 },
291 .driver_data = (void *)&nvidia_chipset_data,
292 },
293 {
294 .callback = mbp_dmi_match,
295 .ident = "MacBookPro 5,1",
296 .matches = {
297 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
298 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
299 },
300 .driver_data = (void *)&nvidia_chipset_data,
301 },
302 {
303 .callback = mbp_dmi_match,
304 .ident = "MacBookPro 5,2",
305 .matches = {
306 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
307 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
308 },
309 .driver_data = (void *)&nvidia_chipset_data,
310 },
311 {
312 .callback = mbp_dmi_match,
313 .ident = "MacBookPro 5,3",
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
316 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
317 },
318 .driver_data = (void *)&nvidia_chipset_data,
319 },
320 {
321 .callback = mbp_dmi_match,
322 .ident = "MacBookPro 5,4",
323 .matches = {
324 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
325 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
326 },
327 .driver_data = (void *)&nvidia_chipset_data,
328 },
329 {
330 .callback = mbp_dmi_match,
331 .ident = "MacBookPro 5,5",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
335 },
336 .driver_data = (void *)&nvidia_chipset_data,
337 },
338 {
339 .callback = mbp_dmi_match,
340 .ident = "MacBookAir 3,1",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
344 },
345 .driver_data = (void *)&nvidia_chipset_data,
346 },
347 {
348 .callback = mbp_dmi_match,
349 .ident = "MacBookAir 3,2",
350 .matches = {
351 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
352 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
353 },
354 .driver_data = (void *)&nvidia_chipset_data,
355 },
356 { }
357};
358
359static int __init mbp_init(void)
360{
361 struct backlight_properties props;
362 if (!dmi_check_system(mbp_device_table))
363 return -ENODEV;
364
365 if (!request_region(driver_data->iostart, driver_data->iolen,
366 "Macbook Pro backlight"))
367 return -ENXIO;
368
369 memset(&props, 0, sizeof(struct backlight_properties));
370 props.max_brightness = 15;
371 mbp_backlight_device = backlight_device_register("mbp_backlight", NULL,
372 NULL,
373 &driver_data->backlight_ops,
374 &props);
375 if (IS_ERR(mbp_backlight_device)) {
376 release_region(driver_data->iostart, driver_data->iolen);
377 return PTR_ERR(mbp_backlight_device);
378 }
379
380 mbp_backlight_device->props.brightness =
381 driver_data->backlight_ops.get_brightness(mbp_backlight_device);
382 backlight_update_status(mbp_backlight_device);
383
384 return 0;
385}
386
387static void __exit mbp_exit(void)
388{
389 backlight_device_unregister(mbp_backlight_device);
390
391 release_region(driver_data->iostart, driver_data->iolen);
392}
393
394module_init(mbp_init);
395module_exit(mbp_exit);
396
397MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
398MODULE_DESCRIPTION("Nvidia-based Macbook Pro Backlight Driver");
399MODULE_LICENSE("GPL");
400MODULE_DEVICE_TABLE(dmi, mbp_device_table);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index d3bc56296c8d..08d26a72394c 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -146,6 +146,7 @@ static int omapbl_probe(struct platform_device *pdev)
146 return -ENOMEM; 146 return -ENOMEM;
147 147
148 memset(&props, 0, sizeof(struct backlight_properties)); 148 memset(&props, 0, sizeof(struct backlight_properties));
149 props.type = BACKLIGHT_RAW;
149 props.max_brightness = OMAPBL_MAX_INTENSITY; 150 props.max_brightness = OMAPBL_MAX_INTENSITY;
150 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, 151 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
151 &props); 152 &props);
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 3c424f7efdcc..ef5628d60563 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -112,6 +112,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
112 if (!pcf_bl) 112 if (!pcf_bl)
113 return -ENOMEM; 113 return -ENOMEM;
114 114
115 bl_props.type = BACKLIGHT_RAW;
115 bl_props.max_brightness = 0x3f; 116 bl_props.max_brightness = 0x3f;
116 bl_props.power = FB_BLANK_UNBLANK; 117 bl_props.power = FB_BLANK_UNBLANK;
117 118
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 809278c90738..6af183d6465e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -84,6 +84,7 @@ static int progearbl_probe(struct platform_device *pdev)
84 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); 84 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20);
85 85
86 memset(&props, 0, sizeof(struct backlight_properties)); 86 memset(&props, 0, sizeof(struct backlight_properties));
87 props.type = BACKLIGHT_RAW;
87 props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; 88 props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
88 progear_backlight_device = backlight_device_register("progear-bl", 89 progear_backlight_device = backlight_device_register("progear-bl",
89 &pdev->dev, NULL, 90 &pdev->dev, NULL,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 21866ec69656..b8f38ec6eb18 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,7 @@ struct pwm_bl_data {
28 unsigned int lth_brightness; 28 unsigned int lth_brightness;
29 int (*notify)(struct device *, 29 int (*notify)(struct device *,
30 int brightness); 30 int brightness);
31 int (*check_fb)(struct device *, struct fb_info *);
31}; 32};
32 33
33static int pwm_backlight_update_status(struct backlight_device *bl) 34static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -62,9 +63,18 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
62 return bl->props.brightness; 63 return bl->props.brightness;
63} 64}
64 65
66static int pwm_backlight_check_fb(struct backlight_device *bl,
67 struct fb_info *info)
68{
69 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
70
71 return !pb->check_fb || pb->check_fb(pb->dev, info);
72}
73
65static const struct backlight_ops pwm_backlight_ops = { 74static const struct backlight_ops pwm_backlight_ops = {
66 .update_status = pwm_backlight_update_status, 75 .update_status = pwm_backlight_update_status,
67 .get_brightness = pwm_backlight_get_brightness, 76 .get_brightness = pwm_backlight_get_brightness,
77 .check_fb = pwm_backlight_check_fb,
68}; 78};
69 79
70static int pwm_backlight_probe(struct platform_device *pdev) 80static int pwm_backlight_probe(struct platform_device *pdev)
@@ -95,6 +105,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
95 105
96 pb->period = data->pwm_period_ns; 106 pb->period = data->pwm_period_ns;
97 pb->notify = data->notify; 107 pb->notify = data->notify;
108 pb->check_fb = data->check_fb;
98 pb->lth_brightness = data->lth_brightness * 109 pb->lth_brightness = data->lth_brightness *
99 (data->pwm_period_ns / data->max_brightness); 110 (data->pwm_period_ns / data->max_brightness);
100 pb->dev = &pdev->dev; 111 pb->dev = &pdev->dev;
@@ -108,6 +119,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
108 dev_dbg(&pdev->dev, "got pwm for backlight\n"); 119 dev_dbg(&pdev->dev, "got pwm for backlight\n");
109 120
110 memset(&props, 0, sizeof(struct backlight_properties)); 121 memset(&props, 0, sizeof(struct backlight_properties));
122 props.type = BACKLIGHT_RAW;
111 props.max_brightness = data->max_brightness; 123 props.max_brightness = data->max_brightness;
112 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb, 124 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
113 &pwm_backlight_ops, &props); 125 &pwm_backlight_ops, &props);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 5927db0da999..322040f686c2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -778,6 +778,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
778 778
779 bd->props.max_brightness = MAX_BRIGHTNESS; 779 bd->props.max_brightness = MAX_BRIGHTNESS;
780 bd->props.brightness = MAX_BRIGHTNESS; 780 bd->props.brightness = MAX_BRIGHTNESS;
781 bd->props.type = BACKLIGHT_RAW;
781 lcd->bd = bd; 782 lcd->bd = bd;
782 783
783 /* 784 /*
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2a04b382ec48..425a7365470b 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -102,6 +102,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
102 data->i2c = client; 102 data->i2c = client;
103 103
104 memset(&props, 0, sizeof(struct backlight_properties)); 104 memset(&props, 0, sizeof(struct backlight_properties));
105 props.type = BACKLIGHT_RAW;
105 props.max_brightness = 512 - 1; 106 props.max_brightness = 512 - 1;
106 data->bl = backlight_device_register("tosa-bl", &client->dev, data, 107 data->bl = backlight_device_register("tosa-bl", &client->dev, data,
107 &bl_ops, &props); 108 &bl_ops, &props);
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 08fd87f3aecc..d4c6eb248ff9 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -193,6 +193,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
193 data->current_brightness = 0; 193 data->current_brightness = 0;
194 data->isink_reg = isink_reg; 194 data->isink_reg = isink_reg;
195 195
196 props.type = BACKLIGHT_RAW;
196 props.max_brightness = max_isel; 197 props.max_brightness = max_isel;
197 bl = backlight_device_register("wm831x", &pdev->dev, data, 198 bl = backlight_device_register("wm831x", &pdev->dev, data,
198 &wm831x_backlight_ops, &props); 199 &wm831x_backlight_ops, &props);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index e7d0f525041e..2464b910b590 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -649,6 +649,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
649 } 649 }
650#ifndef NO_BL_SUPPORT 650#ifndef NO_BL_SUPPORT
651 memset(&props, 0, sizeof(struct backlight_properties)); 651 memset(&props, 0, sizeof(struct backlight_properties));
652 props.type = BACKLIGHT_RAW;
652 props.max_brightness = 255; 653 props.max_brightness = 255;
653 bl_dev = backlight_device_register("bf54x-bl", NULL, NULL, 654 bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
654 &bfin_lq043fb_bl_ops, &props); 655 &bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 3cf77676947c..d8de29f0dd8d 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -545,6 +545,7 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
545 } 545 }
546#ifndef NO_BL_SUPPORT 546#ifndef NO_BL_SUPPORT
547 memset(&props, 0, sizeof(struct backlight_properties)); 547 memset(&props, 0, sizeof(struct backlight_properties));
548 props.type = BACKLIGHT_RAW;
548 props.max_brightness = 255; 549 props.max_brightness = 255;
549 bl_dev = backlight_device_register("bf52x-bl", NULL, NULL, 550 bl_dev = backlight_device_register("bf52x-bl", NULL, NULL,
550 &bfin_lq043fb_bl_ops, &props); 551 &bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 69bd4a581d4a..ef72cb483834 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -499,6 +499,7 @@ static void imxfb_init_backlight(struct imxfb_info *fbi)
499 499
500 memset(&props, 0, sizeof(struct backlight_properties)); 500 memset(&props, 0, sizeof(struct backlight_properties));
501 props.max_brightness = 0xff; 501 props.max_brightness = 0xff;
502 props.type = BACKLIGHT_RAW;
502 writel(fbi->pwmr, fbi->regs + LCDC_PWMR); 503 writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
503 504
504 bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi, 505 bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi,
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 6aac6d1b937b..8471008aa6ff 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -111,6 +111,7 @@ void nvidia_bl_init(struct nvidia_par *par)
111 snprintf(name, sizeof(name), "nvidiabl%d", info->node); 111 snprintf(name, sizeof(name), "nvidiabl%d", info->node);
112 112
113 memset(&props, 0, sizeof(struct backlight_properties)); 113 memset(&props, 0, sizeof(struct backlight_properties));
114 props.type = BACKLIGHT_RAW;
114 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 115 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
115 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops, 116 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
116 &props); 117 &props);
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index e77310653207..7e04c921aa2a 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -534,6 +534,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
534 534
535 props.fb_blank = FB_BLANK_UNBLANK; 535 props.fb_blank = FB_BLANK_UNBLANK;
536 props.power = FB_BLANK_UNBLANK; 536 props.power = FB_BLANK_UNBLANK;
537 props.type = BACKLIGHT_RAW;
537 538
538 bldev = backlight_device_register("acx565akm", &md->spi->dev, 539 bldev = backlight_device_register("acx565akm", &md->spi->dev,
539 md, &acx565akm_bl_ops, &props); 540 md, &acx565akm_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 9a138f650e05..d2b35d2df2a6 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -99,6 +99,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
99 99
100 memset(&props, 0, sizeof(struct backlight_properties)); 100 memset(&props, 0, sizeof(struct backlight_properties));
101 props.max_brightness = dssdev->max_backlight_level; 101 props.max_brightness = dssdev->max_backlight_level;
102 props.type = BACKLIGHT_RAW;
102 103
103 bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev, 104 bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
104 &sharp_ls_bl_ops, &props); 105 &sharp_ls_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 61026f96ad20..c74e8b778ba1 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -729,6 +729,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
729 props.max_brightness = 255; 729 props.max_brightness = 255;
730 else 730 else
731 props.max_brightness = 127; 731 props.max_brightness = 127;
732
733 props.type = BACKLIGHT_RAW;
732 bldev = backlight_device_register("taal", &dssdev->dev, dssdev, 734 bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
733 &taal_bl_ops, &props); 735 &taal_bl_ops, &props);
734 if (IS_ERR(bldev)) { 736 if (IS_ERR(bldev)) {
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index da388186d617..d8ab7be4fd6b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -355,6 +355,7 @@ static void riva_bl_init(struct riva_par *par)
355 snprintf(name, sizeof(name), "rivabl%d", info->node); 355 snprintf(name, sizeof(name), "rivabl%d", info->node);
356 356
357 memset(&props, 0, sizeof(struct backlight_properties)); 357 memset(&props, 0, sizeof(struct backlight_properties));
358 props.type = BACKLIGHT_RAW;
358 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 359 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
359 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops, 360 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
360 &props); 361 &props);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index d66f963e930e..137996dc547e 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -94,9 +94,6 @@ extern int viafb_LCD_ON;
94extern int viafb_DVI_ON; 94extern int viafb_DVI_ON;
95extern int viafb_hotplug; 95extern int viafb_hotplug;
96 96
97extern int strict_strtoul(const char *cp, unsigned int base,
98 unsigned long *res);
99
100u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information 97u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
101 *plvds_setting_info, struct lvds_chip_information 98 *plvds_setting_info, struct lvds_chip_information
102 *plvds_chip_info, u8 index); 99 *plvds_chip_info, u8 index);