From 5fd3a17ed456637a224cf4ca82b9ad9d005bc8d4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 4 Mar 2009 00:57:25 -0700 Subject: md: fix deadlock when stopping arrays Resolve a deadlock when stopping redundant arrays, i.e. ones that require a call to sysfs_remove_group when shutdown. The deadlock is summarized below: Thread1 Thread2 ------- ------- read sysfs attribute stop array take mddev lock sysfs_remove_group sysfs_get_active wait for mddev lock wait for active Sysrq-w: -------- mdmon S 00000017 2212 4163 1 f1982ea8 00000046 2dcf6b85 00000017 c0b23100 f2f83ed0 c0b23100 f2f8413c c0b23100 c0b23100 c0b1fb98 f2f8413c 00000000 f2f8413c c0b23100 f2291ecc 00000002 c0b23100 00000000 00000017 f2f83ed0 f1982eac 00000046 c044d9dd Call Trace: [] ? debug_mutex_add_waiter+0x1d/0x58 [] __mutex_lock_common+0x1d9/0x338 [] ? __mutex_lock_common+0x1d9/0x338 [] mutex_lock_interruptible_nested+0x33/0x3a [] ? mddev_lock+0x14/0x16 [] mddev_lock+0x14/0x16 [] md_attr_show+0x2a/0x49 [] sysfs_read_file+0x93/0xf9 mdadm D 00000017 2812 4177 1 f0401d78 00000046 430456f8 00000017 f0401d58 f0401d20 c0b23100 f2da2c4c c0b23100 c0b23100 c0b1fb98 f2da2c4c 0a10fc36 00000000 c0b23100 f0401d70 00000003 c0b23100 00000000 00000017 f2da29e0 00000001 00000002 00000000 Call Trace: [] schedule_timeout+0x1b/0x95 [] ? schedule_timeout+0x1b/0x95 [] ? wait_for_common+0x34/0xdc [] ? trace_hardirqs_on_caller+0x18/0x145 [] ? trace_hardirqs_on+0xb/0xd [] wait_for_common+0xa0/0xdc [] ? default_wake_function+0x0/0x12 [] wait_for_completion+0x17/0x19 [] sysfs_addrm_finish+0x19f/0x1d1 [] sysfs_hash_and_remove+0x42/0x55 [] sysfs_remove_group+0x57/0x86 [] do_md_stop+0x13a/0x499 This has been there for a while, but is easier to trigger now that mdmon is closely watching sysfs. Cc: Reported-by: Jacek Danecki Signed-off-by: Dan Williams --- drivers/md/md.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/md/md.c b/drivers/md/md.c index 03b4cd0a6344..a307f87eb90e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -214,12 +214,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev) return mddev; } -static void mddev_delayed_delete(struct work_struct *ws) -{ - mddev_t *mddev = container_of(ws, mddev_t, del_work); - kobject_del(&mddev->kobj); - kobject_put(&mddev->kobj); -} +static void mddev_delayed_delete(struct work_struct *ws); static void mddev_put(mddev_t *mddev) { @@ -3542,6 +3537,21 @@ static struct kobj_type md_ktype = { int mdp_major = 0; +static void mddev_delayed_delete(struct work_struct *ws) +{ + mddev_t *mddev = container_of(ws, mddev_t, del_work); + + if (mddev->private == &md_redundancy_group) { + sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + if (mddev->sysfs_action) + sysfs_put(mddev->sysfs_action); + mddev->sysfs_action = NULL; + mddev->private = NULL; + } + kobject_del(&mddev->kobj); + kobject_put(&mddev->kobj); +} + static int md_alloc(dev_t dev, char *name) { static DEFINE_MUTEX(disks_mutex); @@ -4033,13 +4043,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->merge_bvec_fn = NULL; mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; - if (mddev->pers->sync_request) { - sysfs_remove_group(&mddev->kobj, &md_redundancy_group); - if (mddev->sysfs_action) - sysfs_put(mddev->sysfs_action); - mddev->sysfs_action = NULL; - } module_put(mddev->pers->owner); + if (mddev->pers->sync_request) + mddev->private = &md_redundancy_group; mddev->pers = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent(mddev->sysfs_state); -- cgit v1.2.2 From c21986a76220919bb06e4c04a89a8a62aeac107e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 4 Mar 2009 00:18:11 -0800 Subject: jsflash: stop defining MAJOR_NR Ever since early 2.5 kernels block drivers don't need to define MAJOR_NR anymore, so use the JSFD_MAJOR defined directly and kill it. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- drivers/sbus/char/jsflash.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index a9a9893a5f95..e6d1fc8c54f1 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -38,9 +38,6 @@ #include #include #include - -#define MAJOR_NR JSFD_MAJOR - #include #include #include -- cgit v1.2.2 From f4c13638185c91a269c63fcfb980d89180cf43a1 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Wed, 4 Mar 2009 00:19:28 -0800 Subject: sparc64: wait_event_interruptible_timeout may return -ERESTARTSYS wait_event_interruptible_timeout may return -ERESTARTSYS. Signed-off-by: Roel Kluin Signed-off-by: David S. Miller --- drivers/sbus/char/bbc_i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index f08e169ba1b5..7e30e5f6e032 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -129,7 +129,7 @@ static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status) bp->waiting = 1; add_wait_queue(&bp->wq, &wait); while (limit-- > 0) { - unsigned long val; + long val; val = wait_event_interruptible_timeout( bp->wq, -- cgit v1.2.2 From 0796e75503adc6b0a119493ce2e599fb5fd8f96e Mon Sep 17 00:00:00 2001 From: Friedrich Oslage Date: Sun, 8 Mar 2009 20:13:42 -0700 Subject: sunhme: Fix qfe parent detection. Signed-off-by: Friedrich Oslage Signed-off-by: David S. Miller --- drivers/net/sunhme.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index cc4013be5e18..0ff837a9c804 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2630,8 +2630,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) int err = -ENODEV; sbus_dp = to_of_device(op->dev.parent)->node; - if (is_qfe) - sbus_dp = to_of_device(op->dev.parent->parent)->node; /* We can match PCI devices too, do not accept those here. */ if (strcmp(sbus_dp->name, "sbus")) -- cgit v1.2.2 From 467fc4988986865b5dbcc8cc6a86c9b650cb0c6f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 10 Mar 2009 06:08:49 +0000 Subject: video: deferred io cleanup fix for sh_mobile_lcdcfb Fix deferred io cleanup patch in the sh_mobile_lcdcfb driver. If probe() fails early the sh_mobile_lcdc_stop() function will be called to clean up deferred io. This patch modifies the code to only call fb_deferred_io_cleanup() after deferred io has been initialized. With this patch applied we no longer hit BUG_ON() inside fb_deferred_io_cleanup(). Triggers on a Migo-R with the SYS QVGA panel board unmounted. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/video/sh_mobile_lcdcfb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 0e2b8fd24df1..2c5d069e5f06 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -446,7 +446,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) { struct sh_mobile_lcdc_chan *ch; struct sh_mobile_lcdc_board_cfg *board_cfg; - unsigned long tmp; int k; /* tell the board code to disable the panel */ @@ -456,9 +455,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) if (board_cfg->display_off) board_cfg->display_off(board_cfg->board_data); - /* cleanup deferred io if SYS bus */ - tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; - if (ch->ldmt1r_value & (1 << 12) && tmp) { + /* cleanup deferred io if enabled */ + if (ch->info.fbdefio) { fb_deferred_io_cleanup(&ch->info); ch->info.fbdefio = NULL; } -- cgit v1.2.2 From b70d11da61d751ad968c6f686d83ac1b0ae41466 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 3 Mar 2009 14:45:57 -0500 Subject: drm: Return EINVAL on duplicate objects in execbuffer object list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If userspace passes an object list with the same object appearing more than once, we end up hitting the BUG_ON() in i915_gem_object_set_to_gpu_domain() as it gets called a second time for the same object. Signed-off-by: Kristian Høgsberg Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 6 ++++++ drivers/gpu/drm/i915/i915_gem.c | 17 ++++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 17fa40858d26..9186d43be011 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -457,6 +457,12 @@ struct drm_i915_gem_object { /** for phy allocated objects */ struct drm_i915_gem_phys_object *phys_obj; + + /** + * Used for checking the object doesn't appear more than once + * in an execbuffer object list. + */ + int in_execbuffer; }; /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 85685bfd12da..7bdcc752f139 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2469,6 +2469,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; + struct drm_i915_gem_object *obj_priv; int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; @@ -2533,6 +2534,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = -EBADF; goto err; } + + obj_priv = object_list[i]->driver_private; + if (obj_priv->in_execbuffer) { + DRM_ERROR("Object %p appears more than once in object list\n", + object_list[i]); + ret = -EBADF; + goto err; + } + obj_priv->in_execbuffer = true; } /* Pin and relocate */ @@ -2674,8 +2684,13 @@ err: for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]) { + obj_priv = object_list[i]->driver_private; + obj_priv->in_execbuffer = false; + } drm_gem_object_unreference(object_list[i]); + } mutex_unlock(&dev->struct_mutex); -- cgit v1.2.2 From 0fce81e3ccd093f4826de40fbd27fac9632f6170 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sat, 28 Feb 2009 15:01:16 -0500 Subject: i915: add newline to i915_gem_object_pin failure msg Prevents formatting nasty as below: [drm:i915_gem_object_pin] *ERROR* Failure to bind: -12<3>[drm:i915_gem_evict_something] *ERROR* inactive empty 1 request empty 1 flushing empty 1 Signed-off-by: Kyle McMartin Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7bdcc752f139..c94069b28a47 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2727,7 +2727,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret != 0) { if (ret != -EBUSY && ret != -ERESTARTSYS) - DRM_ERROR("Failure to bind: %d", ret); + DRM_ERROR("Failure to bind: %d\n", ret); return ret; } /* -- cgit v1.2.2 From 66824bd7b5dc22da367595359bfcd1149c4ce92a Mon Sep 17 00:00:00 2001 From: Pierre Willenbrock Date: Wed, 25 Feb 2009 17:49:51 +0100 Subject: drm/i915: Don't restore palettes through VGA registers. The VGA registers just hit the pipe registers that we already set through MMIO. This fixes strange colors on resume. Signed-off-by: Pierre Willenbrock Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_suspend.c | 11 ----------- 2 files changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9186d43be011..d6cc9861e0a1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -279,7 +279,6 @@ typedef struct drm_i915_private { u8 saveAR_INDEX; u8 saveAR[21]; u8 saveDACMASK; - u8 saveDACDATA[256*3]; /* 256 3-byte colors */ u8 saveCR[37]; struct { diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5d84027ee8f3..d669cc2b42c0 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -119,11 +119,6 @@ static void i915_save_vga(struct drm_device *dev) /* VGA color palette registers */ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); - /* DACCRX automatically increments during read */ - I915_WRITE8(VGA_DACRX, 0); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA); /* MSR bits */ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); @@ -225,12 +220,6 @@ static void i915_restore_vga(struct drm_device *dev) /* VGA color palette registers */ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); - /* DACCRX automatically increments during read */ - I915_WRITE8(VGA_DACWX, 0); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]); - } int i915_save_state(struct drm_device *dev) -- cgit v1.2.2 From 040aefa263aa9cd7bd5df50586c35e1e15e77f84 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 10 Mar 2009 12:31:12 -0700 Subject: drm/i915: Fix bad \n in MTRR failure notice. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6dab63bdc4c1..6d21b9e48b89 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1105,7 +1105,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) 1024 * 1024, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { - DRM_INFO("MTRR allocation failed\n. Graphics " + DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } -- cgit v1.2.2 From 475049809977bf3975d78f2d2fd992e19ce2d59e Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 10 Mar 2009 12:55:45 -0700 Subject: mm: get_nid_for_pfn() returns int get_nid_for_pfn() returns int Presumably the (nid < 0) case has never happened. We do know that it is happening on one system while creating a symlink for a memory section so it should also happen on the same system if unregister_mem_sect_under_nodes() were called to remove the same symlink. The test was actually added in response to a problem with an earlier version reported by Yasunori Goto where one or more of the leading pages of a memory section on the 2nd node of one of his systems was uninitialized because I believe they coincided with a memory hole. That earlier version did not ignore uninitialized pages and determined the nid by considering only the 1st page of each memory section. This caused the symlink to the 1st memory section on the 2nd node to be incorrectly created in /sys/devices/system/node/node0 instead of /sys/devices/system/node/node1. The problem was fixed by adding the test to skip over uninitialized pages. I suspect we have not seen any reports of the non-removal of a symlink due to the incorrect declaration of the nid variable in unregister_mem_sect_under_nodes() because - systems where a memory section could have an uninitialized range of leading pages are probably rare. - memory remove is probably not done very frequently on the systems that are capable of demonstrating the problem. - lingering symlink(s) that should have been removed may have simply gone unnoticed. [garyhade@us.ibm.com: wrote changelog] Signed-off-by: Roel Kluin Cc: Gary Hade Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/base/node.c b/drivers/base/node.c index 43fa90b837ee..f8f578a71b25 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -303,7 +303,7 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { - unsigned int nid; + int nid; nid = get_nid_for_pfn(pfn); if (nid < 0) -- cgit v1.2.2 From c15ade65788b70797c947f7de3e049e6a23f407f Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 10 Mar 2009 12:55:47 -0700 Subject: lm85: fix the version check that broke adt7468 probing The verstep check in the lm85 driver fails because the upper nibble of the version register is 0x7, not 0x6, on the adt7468 chip. Probing of all adt7468s was broken by 69fc1feba2d5856ff74dedb6ae9d8c490210825c ("hwmon: (lm85) Rework the device detection"), and this patch fixes that. Also add in a missing i2c_device_id that accidentally got dropped from the original patch. Signed-off-by: Darrick J. Wong Cc: Jean Delvare Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hwmon/lm85.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index cfc1ee90f5a3..cf1422ef6ef0 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -72,6 +72,7 @@ I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100, #define LM85_COMPANY_SMSC 0x5c #define LM85_VERSTEP_VMASK 0xf0 #define LM85_VERSTEP_GENERIC 0x60 +#define LM85_VERSTEP_GENERIC2 0x70 #define LM85_VERSTEP_LM85C 0x60 #define LM85_VERSTEP_LM85B 0x62 #define LM85_VERSTEP_ADM1027 0x60 @@ -334,6 +335,7 @@ static struct lm85_data *lm85_update_device(struct device *dev); static const struct i2c_device_id lm85_id[] = { { "adm1027", adm1027 }, { "adt7463", adt7463 }, + { "adt7468", adt7468 }, { "lm85", any_chip }, { "lm85b", lm85b }, { "lm85c", lm85c }, @@ -1153,7 +1155,8 @@ static int lm85_detect(struct i2c_client *client, int kind, address, company, verstep); /* All supported chips have the version in common */ - if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC) { + if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC && + (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) { dev_dbg(&adapter->dev, "Autodetection failed: " "unsupported version\n"); return -ENODEV; -- cgit v1.2.2 From 8ef1f0291a5d126f678b2f0225843c1ab550559c Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 10 Mar 2009 12:55:48 -0700 Subject: lm85: add VRM10 support for adt7468 chip The adt7468 chip supports VRM10 sensors just like the adt7463; add a missing check for it. Signed-off-by: Darrick J. Wong Cc: Jean Delvare Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hwmon/lm85.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index cf1422ef6ef0..b251d8674b41 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -410,7 +410,8 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, struct lm85_data *data = lm85_update_device(dev); int vid; - if (data->type == adt7463 && (data->vid & 0x80)) { + if ((data->type == adt7463 || data->type == adt7468) && + (data->vid & 0x80)) { /* 6-pin VID (VRM 10) */ vid = vid_from_reg(data->vid & 0x3f, data->vrm); } else { -- cgit v1.2.2 From 2f68891314b14e7e0ef07b4e77a8ea6e917fc74b Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 10 Mar 2009 12:55:50 -0700 Subject: x86/agp: tighten check to update amd nb aperture Impact: fix bug to make agp work with dri Jeffrey reported that dri does work with 64bit, but doesn't work with 32bit it turns out NB aperture is 32M, aperture on agp is 128M 64bit is using 64M for vaidation for 64 iommu/gart 32bit is only using 32M..., and will not update the nb aperture. So try to compare nb apterture and agp apterture before leaving not touch nb aperture. Reported-by: Jeffrey Trull Tested-by: Jeffrey Trull Signed-off-by: Yinghai Lu Acked-by: Dave Airlie Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/agp/amd64-agp.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 52f4361eb6e4..d765afda9c2a 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -271,15 +271,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, nb_order = (nb_order >> 1) & 7; pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); nb_aper = nb_base << 25; - if (agp_aperture_valid(nb_aper, (32*1024*1024)<= order) { + if (agp_aperture_valid(nb_aper, (32*1024*1024)<dev, "aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)< Date: Tue, 10 Mar 2009 12:55:53 -0700 Subject: mtd_dataflash: fix probing of AT45DB321C chips. Commit 771999b65f79264acde4b855e5d35696eca5e80c ("[MTD] DataFlash: bugfix, binary page sizes now handled") broke support for probing AT45DB321C flash chips. These chips do not support the "page size" status bit, so if we match the JEDEC id return early. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Will Newton Cc: David Woodhouse Acked-by: David Brownell Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mtd/devices/mtd_dataflash.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index d44f741ae229..6d9f810565c8 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -821,7 +821,8 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) if (!(info->flags & IS_POW2PS)) return info; } - } + } else + return info; } } -- cgit v1.2.2 From 9c1e8a4ebcc04226cb6f3a1bf1d72f4cafd6b089 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Tue, 10 Mar 2009 12:55:54 -0700 Subject: intel-agp: fix a panic with 1M of shared memory, no GTT entries When GTT size is equal to amount of video memory, the amount of GTT entries is computed lower than zero, which is invalid and leads to off-by-one error in intel_i915_configure() Originally posted here: http://bugzilla.kernel.org/show_bug.cgi?id=12539 http://bugzilla.redhat.com/show_bug.cgi?id=445592 Signed-off-by: Lubomir Rintel Cc: Lubomir Rintel Cc: Dave Airlie Reviewed-by: Eric Anholt Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/agp/intel-agp.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c7714185f831..4373adb2119a 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -633,13 +633,15 @@ static void intel_i830_init_gtt_entries(void) break; } } - if (gtt_entries > 0) + if (gtt_entries > 0) { dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", gtt_entries / KB(1), local ? "local" : "stolen"); - else + gtt_entries /= KB(4); + } else { dev_info(&agp_bridge->dev->dev, "no pre-allocated video memory detected\n"); - gtt_entries /= KB(4); + gtt_entries = 0; + } intel_private.gtt_entries = gtt_entries; } -- cgit v1.2.2 From d58ab5cf09679d8cb4824e22cae900c0eab5ab31 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Tue, 10 Mar 2009 12:55:55 -0700 Subject: mtd: physmap: fix NULL pointer dereference in error path commit e480814f138cd5d78a8efe397756ba6b6518fdb6 ("[MTD] [MAPS] physmap: fix wrong free and del_mtd_{partition,device}") introduces a NULL pointer dereference in physmap_flash_remove when called from the error path in physmap_flash_probe (if map_probe failed). Call del_mtd_{partition,device} only if info->cmtd was not NULL. Reported-by: pHilipp Zabel Signed-off-by: Atsushi Nemoto Cc: David Woodhouse Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mtd/maps/physmap.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 4b122e7ab4b3..229718222db7 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -46,16 +46,19 @@ static int physmap_flash_remove(struct platform_device *dev) physmap_data = dev->dev.platform_data; + if (info->cmtd) { #ifdef CONFIG_MTD_PARTITIONS - if (info->nr_parts) { - del_mtd_partitions(info->cmtd); - kfree(info->parts); - } else if (physmap_data->nr_parts) - del_mtd_partitions(info->cmtd); - else - del_mtd_device(info->cmtd); + if (info->nr_parts || physmap_data->nr_parts) + del_mtd_partitions(info->cmtd); + else + del_mtd_device(info->cmtd); #else - del_mtd_device(info->cmtd); + del_mtd_device(info->cmtd); +#endif + } +#ifdef CONFIG_MTD_PARTITIONS + if (info->nr_parts) + kfree(info->parts); #endif #ifdef CONFIG_MTD_CONCAT -- cgit v1.2.2 From 16b71fdf97599f1b1b7f38418ee9922d9f117396 Mon Sep 17 00:00:00 2001 From: Samuel CUELLA Date: Tue, 10 Mar 2009 12:56:00 -0700 Subject: i810: fix kernel crash fix when struct fb_var_screeninfo is supplied Prevent the kernel from being crashed by a divide-by-zero operation when supplied an incorrectly filled 'struct fb_var_screeninfo' from userland. Previously i810_main.c:1005 (i810_check_params) was using the global 'yres' symbol previously defined at i810_main.c:145 as a module parameter value holder (i810_main.c:2174). If i810fb is compiled-in or if this param doesn't get a default value, this direct usage leads to a divide-by-zero at i810_main.c:1005 (i810_check_params). The patch simply replace the 'yres' global, perhaps undefined symbol usage by a given parameter structure lookup. This problem occurs with directfb, mplayer -vo fbdev, SDL library. It was also reported ( but non solved ) at: http://mail.directfb.org/pipermail/directfb-dev/2008-March/004050.html Signed-off-by: Samuel CUELLA Cc: Jiri Kosina Cc: Krzysztof Helt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/i810/i810_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index a24e680d2b9c..2e940199fc89 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c @@ -993,6 +993,7 @@ static int i810_check_params(struct fb_var_screeninfo *var, struct i810fb_par *par = info->par; int line_length, vidmem, mode_valid = 0, retval = 0; u32 vyres = var->yres_virtual, vxres = var->xres_virtual; + /* * Memory limit */ @@ -1002,12 +1003,12 @@ static int i810_check_params(struct fb_var_screeninfo *var, if (vidmem > par->fb.size) { vyres = par->fb.size/line_length; if (vyres < var->yres) { - vyres = yres; + vyres = info->var.yres; vxres = par->fb.size/vyres; vxres /= var->bits_per_pixel >> 3; line_length = get_line_length(par, vxres, var->bits_per_pixel); - vidmem = line_length * yres; + vidmem = line_length * info->var.yres; if (vxres < var->xres) { printk("i810fb: required video memory, " "%d bytes, for %dx%d-%d (virtual) " -- cgit v1.2.2 From 187cfc439f7b1a7c91ff72d561b2a7c9c0b83431 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 9 Mar 2009 14:36:15 +0000 Subject: hvc_console: Remove tty->low_latency on pseries backends The hvcs and hvsi backends both set tty->low_latency to one, along with more or less scary comments regarding bugs or races that would happen if not doing so. However, they also both call tty_flip_buffer_push() in conexts where it's illegal to do so since some recent tty changes (or at least it may have been illegal always but it nows blows) when low_latency is set (ie, hard interrupt or with spinlock held and irqs disabled). This removes the setting for now to get them back to working condition, we'll have to address the races described in the comments separately if they are still an issue (some of this might have been fixed already). Signed-off-by: Benjamin Herrenschmidt --- drivers/char/hvcs.c | 9 --------- drivers/char/hvsi.c | 1 - 2 files changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c index 6e6eb445d374..c76bccf5354d 100644 --- a/drivers/char/hvcs.c +++ b/drivers/char/hvcs.c @@ -1139,15 +1139,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp) hvcsd->tty = tty; tty->driver_data = hvcsd; - /* - * Set this driver to low latency so that we actually have a chance at - * catching a throttled TTY after we flip_buffer_push. Otherwise the - * flush_to_async may not execute until after the kernel_thread has - * yielded and resumed the next flip_buffer_push resulting in data - * loss. - */ - tty->low_latency = 1; - memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); /* diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 406f8742a260..2989056a9e39 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c @@ -810,7 +810,6 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp) hp = &hvsi_ports[line]; tty->driver_data = hp; - tty->low_latency = 1; /* avoid throttle/tty_flip_buffer_push race */ mb(); if (hp->state == HVSI_FSP_DIED) -- cgit v1.2.2 From d801cec70d69d2d4121e133edd5c3237fe0e0078 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 11 Mar 2009 10:45:17 +1100 Subject: radeonfb/aty128fb: Disable broken early resume hook for PowerBooks radeonfb and aty128fb have a special hook called by the PowerMac platform code very very early on resume from sleep to bring the screen back. This is useful for debugging wakup problems, but unfortunately, this also became a source of problems of its own. The hook is called extremely early, with interrupts still off, and the code path involved with that code nowadays rely on things like taking mutexes, GFP_KERNEL allocations, etc... In addition, the driver now relies on the PCI core to restore the standard config space before calling resume which doesn't happen with this early code path. I'm keeping the code in but commented out along with a fixup call to pci_restore_state(). The reason is that I still want to make it easy to re-enable temporarily to track wake up problems, and it's possible that I can revive it at some stage if we make sleeping things save to call in early resume using a system state. In the meantime, this should fix several reported regressions. Signed-off-by: Benjamin Herrenschmidt --- drivers/video/aty/aty128fb.c | 10 +++++++++- drivers/video/aty/radeon_pm.c | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 2181ce4d7ebd..35e8eb02b9e9 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1853,13 +1853,14 @@ static void aty128_bl_exit(struct backlight_device *bd) * Initialisation */ -#ifdef CONFIG_PPC_PMAC +#ifdef CONFIG_PPC_PMAC__disabled static void aty128_early_resume(void *data) { struct aty128fb_par *par = data; if (try_acquire_console_sem()) return; + pci_restore_state(par->pdev); aty128_do_resume(par->pdev); release_console_sem(); } @@ -1907,7 +1908,14 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); +#if 0 /* Disable the early video resume hack for now as it's causing problems, among + * others we now rely on the PCI core restoring the config space for us, which + * isn't the case with that hack, and that code path causes various things to + * be called with interrupts off while they shouldn't. I'm leaving the code in + * as it can be useful for debugging purposes + */ pmac_set_early_video_resume(aty128_early_resume, par); +#endif } /* Find default mode */ diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index ca5f0dc28546..81603f85e17e 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -2762,12 +2762,13 @@ int radeonfb_pci_resume(struct pci_dev *pdev) return rc; } -#ifdef CONFIG_PPC_OF +#ifdef CONFIG_PPC_OF__disabled static void radeonfb_early_resume(void *data) { struct radeonfb_info *rinfo = data; rinfo->no_schedule = 1; + pci_restore_state(rinfo->pdev); radeonfb_pci_resume(rinfo->pdev); rinfo->no_schedule = 0; } @@ -2834,7 +2835,14 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis */ if (rinfo->pm_mode != radeon_pm_none) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, rinfo->of_node, 0, 1); +#if 0 /* Disable the early video resume hack for now as it's causing problems, among + * others we now rely on the PCI core restoring the config space for us, which + * isn't the case with that hack, and that code path causes various things to + * be called with interrupts off while they shouldn't. I'm leaving the code in + * as it can be useful for debugging purposes + */ pmac_set_early_video_resume(radeonfb_early_resume, rinfo); +#endif } #if 0 -- cgit v1.2.2 From 9b2412f9ad0a3913baa40c270d6e1fa3c6d50067 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:44 +0000 Subject: drm/i915: First recheck for an empty fence register. If we wait upon a request and successfully unbind a buffer occupying a fence register, then that slot will be freed and cause a NULL derefrence upon rescanning. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c94069b28a47..6497c1ad02d3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1580,6 +1580,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) } /* First try to find a free reg */ +try_again: for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) @@ -1591,7 +1592,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) struct drm_i915_gem_object *old_obj_priv = NULL; loff_t offset; -try_again: /* Could try to use LRU here instead... */ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { -- cgit v1.2.2 From 22c344e9a03beeb7071a588a973012749f84a830 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:45 +0000 Subject: drm/i915: Check fence status on every pin. As we may steal the fence register of an unpinned buffer for another, every time we repin the buffer we need to recheck whether it needs to be allocated a fence. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6497c1ad02d3..12c7d78d9240 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2730,14 +2730,21 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) DRM_ERROR("Failure to bind: %d\n", ret); return ret; } - /* - * Pre-965 chips need a fence register set up in order to - * properly handle tiled surfaces. - */ - if (!IS_I965G(dev) && - obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) - i915_gem_object_get_fence_reg(obj, true); + } + /* + * Pre-965 chips need a fence register set up in order to + * properly handle tiled surfaces. + */ + if (!IS_I965G(dev) && + obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence_reg(obj, true); + if (ret != 0) { + if (ret != -EBUSY && ret != -ERESTARTSYS) + DRM_ERROR("Failure to install fence: %d\n", + ret); + return ret; + } } obj_priv->pin_count++; -- cgit v1.2.2 From fc7170ba281c041852eeda52d4faf5db720c99ce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:46 +0000 Subject: drm/i915: Check to see if we've pinned all available fences We need to check and report if there are no available fences - or else we spin endlessly waiting for a buffer to magically unpin itself. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12c7d78d9240..2b5c7ad9e64a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1557,7 +1557,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_fence_reg *reg = NULL; - int i, ret; + struct drm_i915_gem_object *old_obj_priv = NULL; + int i, ret, avail; switch (obj_priv->tiling_mode) { case I915_TILING_NONE: @@ -1581,17 +1582,24 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) /* First try to find a free reg */ try_again: + avail = 0; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) break; + + old_obj_priv = reg->obj->driver_private; + if (!old_obj_priv->pin_count) + avail++; } /* None available, try to steal one or wait for a user to finish */ if (i == dev_priv->num_fence_regs) { - struct drm_i915_gem_object *old_obj_priv = NULL; loff_t offset; + if (avail == 0) + return -ENOMEM; + /* Could try to use LRU here instead... */ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { -- cgit v1.2.2 From d7619c4b9c95cc9a2e7f0f4f7ae21165ab5cb1e7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Feb 2009 14:26:47 +0000 Subject: drm/i915: Protect active fences on i915 The i915 also uses the fence registers for GPU access to tiled buffers so we cannot reallocate one whilst it is on the active list. By performing a LRU scan of the fenced buffers we also avoid waiting the possibility of waiting on a pinned, or otherwise unusable, buffer. Signed-off-by: Chris Wilson Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2b5c7ad9e64a..b0ff5c44e61e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1595,18 +1595,32 @@ try_again: /* None available, try to steal one or wait for a user to finish */ if (i == dev_priv->num_fence_regs) { + uint32_t seqno = dev_priv->mm.next_gem_seqno; loff_t offset; if (avail == 0) return -ENOMEM; - /* Could try to use LRU here instead... */ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + uint32_t this_seqno; + reg = &dev_priv->fence_regs[i]; old_obj_priv = reg->obj->driver_private; - if (!old_obj_priv->pin_count) + + if (old_obj_priv->pin_count) + continue; + + /* i915 uses fences for GPU access to tiled buffers */ + if (IS_I965G(dev) || !old_obj_priv->active) break; + + /* find the seqno of the first available fence */ + this_seqno = old_obj_priv->last_rendering_seqno; + if (this_seqno != 0 && + reg->obj->write_domain == 0 && + i915_seqno_passed(seqno, this_seqno)) + seqno = this_seqno; } /* @@ -1614,15 +1628,25 @@ try_again: * objects to finish before trying again. */ if (i == dev_priv->num_fence_regs) { - ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0); - if (ret) { - WARN(ret != -ERESTARTSYS, - "switch to GTT domain failed: %d\n", ret); - return ret; + if (seqno == dev_priv->mm.next_gem_seqno) { + i915_gem_flush(dev, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, + I915_GEM_GPU_DOMAINS); + if (seqno == 0) + return -ENOMEM; } + + ret = i915_wait_request(dev, seqno); + if (ret) + return ret; goto try_again; } + BUG_ON(old_obj_priv->active || + (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); + /* * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. -- cgit v1.2.2 From dc529a4fe1ae4667c819437a94185e8581e1e680 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 10 Mar 2009 22:34:49 -0700 Subject: drm/i915: fix 945 fence register writes for fence 8 and above. The last 8 fence registers sit at a different offset, so when we went to set fence number 8 in the lower offset, we instead set PGETBL_CTL, and the GPU got all sorts of angry at us. fd.o bug #20567. Easily reproducible by running glxgears and killing it about 6 times. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 21 +++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b0ff5c44e61e..37427e4016cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1476,7 +1476,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_i915_gem_object *obj_priv = obj->driver_private; int regnum = obj_priv->fence_reg; int tile_width; - uint32_t val; + uint32_t fence_reg, val; uint32_t pitch_val; if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || @@ -1503,7 +1503,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + if (regnum < 8) + fence_reg = FENCE_REG_830_0 + (regnum * 4); + else + fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); + I915_WRITE(fence_reg, val); } static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) @@ -1687,8 +1691,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) if (IS_I965G(dev)) I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); - else - I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); + else { + uint32_t fence_reg; + + if (obj_priv->fence_reg < 8) + fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + else + fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - + 8) * 4; + + I915_WRITE(fence_reg, 0); + } dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; obj_priv->fence_reg = I915_FENCE_REG_NONE; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9d6539a868b3..90600d899413 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -184,6 +184,7 @@ * Fence registers */ #define FENCE_REG_830_0 0x2000 +#define FENCE_REG_945_8 0x3000 #define I830_FENCE_START_MASK 0x07f80000 #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) -- cgit v1.2.2 From 3bb9db79235e19dbb18ba6f4a428a97c69115319 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 12 Mar 2009 13:36:38 +0100 Subject: hwmon: (abituguru3) Fix I/O error handling Fix a logic bug reported by Roel Kluin, by rewriting the error handling code in a clearer way. Signed-off-by: Jean Delvare Cc: Roel Kluin Acked-by: Alistair John Strachan Acked-by: Hans de Goede --- drivers/hwmon/abituguru3.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c index e52b38806d03..ad2b3431b725 100644 --- a/drivers/hwmon/abituguru3.c +++ b/drivers/hwmon/abituguru3.c @@ -760,8 +760,11 @@ static int abituguru3_read_increment_offset(struct abituguru3_data *data, for (i = 0; i < offset_count; i++) if ((x = abituguru3_read(data, bank, offset + i, count, - buf + i * count)) != count) - return i * count + (i && (x < 0)) ? 0 : x; + buf + i * count)) != count) { + if (x < 0) + return x; + return i * count + x; + } return i * count; } -- cgit v1.2.2 From 1a51e068c900eb6ea23ce597361ebf3b19a57b23 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 12 Mar 2009 13:36:38 +0100 Subject: hwmon: (lm90) Document support for the MAX6648/6692 chips Update documentation to prevent further confusion/duplication. Signed-off-by: Darrick J. Wong Signed-off-by: Jean Delvare --- drivers/hwmon/Kconfig | 4 ++-- drivers/hwmon/lm90.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index b84bf066879b..b4eea0292c1a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -543,8 +543,8 @@ config SENSORS_LM90 help If you say yes here you get support for National Semiconductor LM90, LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim - MAX6646, MAX6647, MAX6649, MAX6657, MAX6658, MAX6659, MAX6680 and - MAX6681 sensor chips. + MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, + MAX6680, MAX6681 and MAX6692 sensor chips. This driver can also be built as a module. If so, the module will be called lm90. diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 96a701866726..1aff7575799d 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -32,10 +32,10 @@ * supported by this driver. These chips lack the remote temperature * offset feature. * - * This driver also supports the MAX6646, MAX6647 and MAX6649 chips - * made by Maxim. These are again similar to the LM86, but they use - * unsigned temperature values and can report temperatures from 0 to - * 145 degrees. + * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and + * MAX6692 chips made by Maxim. These are again similar to the LM86, + * but they use unsigned temperature values and can report temperatures + * from 0 to 145 degrees. * * This driver also supports the MAX6680 and MAX6681, two other sensor * chips made by Maxim. These are quite similar to the other Maxim -- cgit v1.2.2 From e267d25005c861fe6afda343f044536342c9f8b4 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 12 Mar 2009 13:36:39 +0100 Subject: hwmon: (it87) Properly decode -128 degrees C temperature The it87 driver is reporting -128 degrees C as +128 degrees C. That's not a terribly likely temperature value but let's still get it right, especially when it simplifies the code. Signed-off-by: Jean Delvare --- drivers/hwmon/it87.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 95a99c590da2..9157247fed8e 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -213,7 +213,7 @@ static inline u16 FAN16_TO_REG(long rpm) #define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\ ((val)+500)/1000),-128,127)) -#define TEMP_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000) +#define TEMP_FROM_REG(val) ((val) * 1000) #define PWM_TO_REG(val) ((val) >> 1) #define PWM_FROM_REG(val) (((val)&0x7f) << 1) @@ -267,9 +267,9 @@ struct it87_data { u8 has_fan; /* Bitfield, fans enabled */ u16 fan[5]; /* Register values, possibly combined */ u16 fan_min[5]; /* Register values, possibly combined */ - u8 temp[3]; /* Register value */ - u8 temp_high[3]; /* Register value */ - u8 temp_low[3]; /* Register value */ + s8 temp[3]; /* Register value */ + s8 temp_high[3]; /* Register value */ + s8 temp_low[3]; /* Register value */ u8 sensor; /* Register value */ u8 fan_div[3]; /* Register encoding, shifted right */ u8 vid; /* Register encoding, combined */ -- cgit v1.2.2 From 51b3e2700177b89fdb0d985926ce777a7ad52b15 Mon Sep 17 00:00:00 2001 From: Andrew Klossner Date: Thu, 12 Mar 2009 13:36:39 +0100 Subject: hwmon: (f75375s) Remove unnecessary and confusing initialization f75375_probe calls i2c_get_clientdata to initialize the data pointer, but there isn't yet any client data to get, and the value is never used before the variable is assigned a new value seven lines later. The call doesn't hurt anything and wastes only a couple of cycles. The reason to fix it is because this module serves as an example to hackers writing new hwmon drivers, and this part of the example is confusing. Signed-off-by: Andrew Klossner Signed-off-by: Jean Delvare --- drivers/hwmon/f75375s.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 1692de369969..18a1ba888165 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -617,7 +617,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data, static int f75375_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct f75375_data *data = i2c_get_clientdata(client); + struct f75375_data *data; struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data; int err; -- cgit v1.2.2 From 649426efcfbc67a8b033497151816cbac9fd0cfa Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 5 Mar 2009 13:57:28 -0500 Subject: PCI: Add PCI quirk to disable L0s ASPM state for 82575 and 82598 This patch is intended to disable L0s ASPM link state for 82598 (ixgbe) parts due to the fact that it is possible to corrupt TX data when coming back out of L0s on some systems. The workaround had been added for 82575 (igb) previously, but did not use the ASPM api. This quirk uses the ASPM api to prevent the ASPM subsystem from re-enabling the L0s state. Instead of adding the fix in igb to the ixgbe driver as well it was decided to move it into a pci quirk. It is necessary to move the fix out of the driver and into a pci quirk in order to prevent the issue from occuring prior to driver load to handle the possibility of the device being passed to a VM via direct assignment. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher CC: Jesse Barnes Signed-off-by: Matthew Wilcox --- drivers/pci/quirks.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f20d55368edb..1a7c48d71dc7 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "pci.h" int isa_dma_bridge_buggy; @@ -1749,6 +1750,30 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); +/* + * The 82575 and 82598 may experience data corruption issues when transitioning + * out of L0S. To prevent this we need to disable L0S on the pci-e link + */ +static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev) +{ + dev_info(&dev->dev, "Disabling L0s\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); + static void __devinit fixup_rev1_53c810(struct pci_dev* dev) { /* rev 1 ncr53c810 chips don't set the class at all which means -- cgit v1.2.2 From cb4cb4ac7338c28b047760be187355ed9c783e72 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Thu, 5 Mar 2009 19:28:40 -0700 Subject: PCIe: AER: during disable, check subordinate before walking Commit 47a8b0cc (Enable PCIe AER only after checking firmware support) wants to walk the PCI bus in the remove path to disable AER, and calls pci_walk_bus for downstream bridges. Unfortunately, in the remove path, we remove devices and bridges in a depth-first manner, starting with the furthest downstream bridge and working our way backwards. The furthest downstream bridges will not have a dev->subordinate, and we hit a NULL deref in pci_walk_bus. Check for dev->subordinate first before attempting to walk the PCI hierarchy below us. Acked-by: Andrew Patterson Signed-off-by: Alex Chiang Signed-off-by: Matthew Wilcox --- drivers/pci/pcie/aer/aerdrv_core.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index d0c973685868..382575007382 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -133,6 +133,9 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev, bool enable) { set_device_error_reporting(dev, &enable); + + if (!dev->subordinate) + return; pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); } -- cgit v1.2.2 From 3f3b902ed8147c42a4a9764014c758e6b3f42f51 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 6 Mar 2009 14:39:14 +1100 Subject: powerpc/pseries: The RPA PCI hotplug driver depends on EEH The RPA PCI hotplug driver calls EEH routines, so should depend on EEH. Also PPC_PSERIES implies PPC64, so remove that. Signed-off-by: Michael Ellerman Signed-off-by: Matthew Wilcox --- drivers/pci/hotplug/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index eacfb13998bb..9aa4fe100a0d 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -143,7 +143,7 @@ config HOTPLUG_PCI_SHPC config HOTPLUG_PCI_RPA tristate "RPA PCI Hotplug driver" - depends on PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE + depends on PPC_PSERIES && EEH && !HOTPLUG_PCI_FAKE help Say Y here if you have a RPA system that supports PCI Hotplug. -- cgit v1.2.2 From 7726c3308a92b4a4c3bd059059498fca0e6f8e48 Mon Sep 17 00:00:00 2001 From: Prakash Punnoor Date: Fri, 6 Mar 2009 00:45:12 +0100 Subject: pci: don't disable too many HT MSI mapping Prakash's system needs MSI disabled on some bridges, but not all. This seems to be the minimal fix for 2.6.29, but should be replaced during 2.6.30. Signed-off-by: Prakash Punnoor Signed-off-by: Matthew Wilcox --- drivers/pci/quirks.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1a7c48d71dc7..9104671e4b72 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2166,6 +2166,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) int pos; int found; + /* Enabling HT MSI mapping on this device breaks MCP51 */ + if (dev->device == 0x270) + return; + /* check if there is HT MSI cap or enabled on this device */ found = ht_check_msi_mapping(dev); -- cgit v1.2.2 From 6a958d5b28e4a180458e0d319d2e4bb5c4b7da3e Mon Sep 17 00:00:00 2001 From: Prakash Punnoor Date: Fri, 6 Mar 2009 10:10:35 +0100 Subject: pci: Fix typo in message while disabling HT MSI mapping "Enabling" should read "Disabling" Signed-off-by: Prakash Punnoor Signed-off-by: Matthew Wilcox --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 9104671e4b72..92b9efe9bcaf 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2122,7 +2122,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev) if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { - dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); + dev_info(&dev->dev, "Disabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags & ~HT_MSI_FLAGS_ENABLE); -- cgit v1.2.2 From d89987193631bf23d1735c55d13a06d4b8d0e9bd Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sat, 7 Mar 2009 19:35:47 -0700 Subject: PCIe: portdrv: call pci_disable_device during remove The PCIe port driver calls pci_enable_device() during probe but never calls pci_disable_device() during remove. Cc: stable@kernel.org Signed-off-by: Alex Chiang Signed-off-by: Matthew Wilcox --- drivers/pci/pcie/portdrv_pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 248b4db91552..5ea566e20b37 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -103,6 +103,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, static void pcie_portdrv_remove (struct pci_dev *dev) { pcie_port_device_remove(dev); + pci_disable_device(dev); kfree(pci_get_drvdata(dev)); } -- cgit v1.2.2 From 8d0df7a3d1ecbaf5d5602a59055c8ca993855bed Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 12 Mar 2009 14:31:25 -0700 Subject: drivers/w1/masters/w1-gpio.c: fix read_bit() W1 master implementations are expected to return 0 or 1 from their read_bit() function. However, not all platforms do return these values from gpio_get_value() - namely PXAs won't. Hence the w1 gpio-master needs to break the result down to 0 or 1 itself. Signed-off-by: Daniel Mack Cc: Ville Syrjala Cc: Evgeniy Polyakov Cc: David Brownell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/w1/masters/w1-gpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index 9e1138a75e8b..a411702413d6 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c @@ -39,7 +39,7 @@ static u8 w1_gpio_read_bit(void *data) { struct w1_gpio_platform_data *pdata = data; - return gpio_get_value(pdata->pin); + return gpio_get_value(pdata->pin) ? 1 : 0; } static int __init w1_gpio_probe(struct platform_device *pdev) -- cgit v1.2.2 From a4e3f91b98d86ae0b5c816fe45190bb29ac32f71 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 12 Mar 2009 14:31:30 -0700 Subject: ds2760_battery.c: fix division by zero The 'battery remaining capacity' calculation in drivers/power/ds2760_battery.c lacks a parameter check to a division operation which causes the kernel to oops on my board. [ 21.233750] Division by zero in kernel. [ 21.237646] [] (__div0+0x0/0x20) from [] (Ldiv0+0x8/0x10) [ 21.244816] [] (ds2760_battery_read_status+0x0/0x2a4) from [] (ds2760_battery_get_property+0x30/0xdc) [ 21.255803] r8:c03a22c0 r7:c7886100 r6:00000009 r5:c782fe7c r4:c7886084 [ 21.262518] [] (ds2760_battery_get_property+0x0/0xdc) from [] (power_supply_show_property+0x48/0x114) [ 21.273480] r6:c7996000 r5:00000009 r4:00000000 [ 21.278111] [] (power_supply_show_property+0x0/0x114) from [] (power_supply_uevent+0x188/0x280) [ 21.288537] r8:00000001 r7:c7886100 r6:c7996000 r5:000000b4 r4:00000000 [ 21.295222] [] (power_supply_uevent+0x0/0x280) from [] (dev_uevent+0xd4/0x10c) [ 21.304199] [] (dev_uevent+0x0/0x10c) from [] (kobject_uevent_env+0x180/0x390) [ 21.313170] r5:00000000 r4:c78860ac [ 21.316725] [] (kobject_uevent_env+0x0/0x390) from [] (kobject_uevent+0x14/0x18) [ 21.325850] [] (kobject_uevent+0x0/0x18) from [] (power_supply_changed_work+0x5c/0x70) [ 21.335506] [] (power_supply_changed_work+0x0/0x70) from [] (run_workqueue+0xbc/0x144) [ 21.345167] r4:c7812040 [ 21.347716] [] (run_workqueue+0x0/0x144) from [] (worker_thread+0xa8/0xbc) [ 21.356296] r7:c7812040 r6:c7820b00 r5:c782ffa4 r4:c7812048 [ 21.361957] [] (worker_thread+0x0/0xbc) from [] (kthread+0x5c/0x94) [ 21.369971] r7:00000000 r6:c004d8a4 r5:c7812040 r4:c782e000 [ 21.375612] [] (kthread+0x0/0x94) from [] (do_exit+0x0/0x688) Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Acked-by: Matt Reimer Acked-by: Anton Vorontsov Cc: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/power/ds2760_battery.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 1d768928e0bb..a52d4a11652d 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -180,10 +180,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) di->empty_uAh = battery_interpolate(scale, di->temp_C / 10); di->empty_uAh *= 1000; /* convert to µAh */ - /* From Maxim Application Note 131: remaining capacity = - * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */ - di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) / - (di->full_active_uAh - di->empty_uAh); + if (di->full_active_uAh == di->empty_uAh) + di->rem_capacity = 0; + else + /* From Maxim Application Note 131: remaining capacity = + * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */ + di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) / + (di->full_active_uAh - di->empty_uAh); if (di->rem_capacity < 0) di->rem_capacity = 0; -- cgit v1.2.2 From 7c48ed3383bfb2106694807361ec187fe8a4333d Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Thu, 12 Mar 2009 14:31:33 -0700 Subject: mmc: s3cmci: fix s3c2410_dma_config() arguments. The s3cmci driver is calling s3c2410_dma_config with incorrect data for the DCON register. The S3C2410_DCON_HWTRIG is implicit in the channel configuration and the device selection of S3C2410_DCON_CH0_SDI is incorrect as the DMA system may not select channel 0. Signed-off-by: Ben Dooks Acked-by: Pierre Ossman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mmc/host/s3cmci.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index f4a67c65d301..2db166b7096f 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -793,8 +793,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host, host->mem->start + host->sdidata); if (!setup_ok) { - s3c2410_dma_config(host->dma, 4, - (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI)); + s3c2410_dma_config(host->dma, 4, 0); s3c2410_dma_set_buffdone_fn(host->dma, s3cmci_dma_done_callback); s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); -- cgit v1.2.2 From 1ba869ec581fd9078b684c56c399ffe3d2345e27 Mon Sep 17 00:00:00 2001 From: Michael Spang Date: Thu, 12 Mar 2009 14:31:34 -0700 Subject: acer-wmi: fix regression in backlight detection Currently we disable the Acer WMI backlight device if there is no ACPI backlight device. As a result, we end up with no backlight device at all. We should instead disable it if there is an ACPI device, as the other laptop drivers do. This regression was introduced in febf2d9 ("Acer-WMI: fingers off backlight if video.ko is serving this functionality"). Each laptop driver with backlight support got a similar change around febf2d9. The changes to the other drivers look correct; see e.g. a598c82f for a similar but correct change. The regression is also in 2.6.28. Signed-off-by: Michael Spang Acked-by: Thomas Renninger Cc: Zhang Rui Cc: Andi Kleen Cc: Carlos Corbacho Cc: Len Brown Cc: "Rafael J. Wysocki" Cc: [2.6.28.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/platform/x86/acer-wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 94c9f911824e..6bcca616a704 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -1297,7 +1297,7 @@ static int __init acer_wmi_init(void) set_quirks(); - if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { + if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) { interface->capability &= ~ACER_CAP_BRIGHTNESS; printk(ACER_INFO "Brightness must be controlled by " "generic video driver\n"); -- cgit v1.2.2 From 02d46e07e538c285accb5c000a7db3a97eff1fbf Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 12 Mar 2009 14:31:36 -0700 Subject: mfd: add support for WM8351 revision B No software visible difference from revision A. Signed-off-by: Mark Brown Cc: Samuel Ortiz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/mfd/wm8350-core.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index 84d5ea1ec171..b457a05b28d9 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -1383,6 +1383,11 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq, wm8350->power.rev_g_coeff = 1; break; + case 1: + dev_info(wm8350->dev, "WM8351 Rev B\n"); + wm8350->power.rev_g_coeff = 1; + break; + default: dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n"); ret = -ENODEV; -- cgit v1.2.2 From c12e56ef6951f4fce1afe9ef6aab9243ea9a9b04 Mon Sep 17 00:00:00 2001 From: Faisal Latif Date: Thu, 12 Mar 2009 14:34:59 -0700 Subject: RDMA/nes: Don't allow userspace QPs to use STag zero STag zero is a special STag that allows consumers to access any bus address without registering memory. The nes driver unfortunately allows STag zero to be used even with QPs created by unprivileged userspace consumers, which means that any process with direct verbs access to the nes device can read and write any memory accessible to the underlying PCI device (usually any memory in the system). Such access is usually given for cluster software such as MPI to use, so this is a local privilege escalation bug on most systems running this driver. The driver was using STag zero to receive the last streaming mode data; to allow STag zero to be disabled for unprivileged QPs, the driver now registers a special MR for this data. Cc: Signed-off-by: Faisal Latif Signed-off-by: Roland Dreier Signed-off-by: Linus Torvalds --- drivers/infiniband/hw/nes/nes_cm.c | 39 ++++++++++++++++++++++++++++++----- drivers/infiniband/hw/nes/nes_verbs.c | 2 ++ drivers/infiniband/hw/nes/nes_verbs.h | 1 + 3 files changed, 37 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a01b4488208b..4a65b96db2c8 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -2490,12 +2490,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) int ret = 0; struct nes_vnic *nesvnic; struct nes_device *nesdev; + struct nes_ib_device *nesibdev; nesvnic = to_nesvnic(nesqp->ibqp.device); if (!nesvnic) return -EINVAL; nesdev = nesvnic->nesdev; + nesibdev = nesvnic->nesibdev; nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", atomic_read(&nesvnic->netdev->refcnt)); @@ -2507,6 +2509,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt) } else { /* Need to free the Last Streaming Mode Message */ if (nesqp->ietf_frame) { + if (nesqp->lsmm_mr) + nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr); pci_free_consistent(nesdev->pcidev, nesqp->private_data_len+sizeof(struct ietf_mpa_frame), nesqp->ietf_frame, nesqp->ietf_frame_pbase); @@ -2543,6 +2547,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) u32 crc_value; int ret; int passive_state; + struct nes_ib_device *nesibdev; + struct ib_mr *ibmr = NULL; + struct ib_phys_buf ibphysbuf; + struct nes_pd *nespd; + + ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -2601,6 +2611,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) { u64temp = (unsigned long)nesqp; + nesibdev = nesvnic->nesibdev; + nespd = nesqp->nespd; + ibphysbuf.addr = nesqp->ietf_frame_pbase; + ibphysbuf.size = conn_param->private_data_len + + sizeof(struct ietf_mpa_frame); + ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, + &ibphysbuf, 1, + IB_ACCESS_LOCAL_WRITE, + (u64 *)&nesqp->ietf_frame); + if (!ibmr) { + nes_debug(NES_DBG_CM, "Unable to register memory region" + "for lSMM for cm_node = %p \n", + cm_node); + return -ENOMEM; + } + + ibmr->pd = &nespd->ibpd; + ibmr->device = nespd->ibpd.device; + nesqp->lsmm_mr = ibmr; + u64temp |= NES_SW_CONTEXT_ALIGN>>1; set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, @@ -2611,14 +2641,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); - wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = - cpu_to_le32((u32)nesqp->ietf_frame_pbase); - wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = - cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32)); + set_wqe_64bit_value(wqe->wqe_words, + NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, + (u64)nesqp->ietf_frame); wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); - wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; + wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4fdb72454f94..d93a6562817c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1360,8 +1360,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT); nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size << NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT); + if (!udata) { nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN); nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN); + } nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number + ((u32)nesqp->nesrcq->hw_cq.cq_number << 16)); u64temp = (u64)nesqp->hwqp.sq_pbase; diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index 6c6b4da5184f..ae0ca9bc83bd 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h @@ -134,6 +134,7 @@ struct nes_qp { struct ietf_mpa_frame *ietf_frame; dma_addr_t ietf_frame_pbase; wait_queue_head_t state_waitq; + struct ib_mr *lsmm_mr; unsigned long socket; struct nes_hw_qp hwqp; struct work_struct work; -- cgit v1.2.2 From 257b17ca030387cb17314cd1851507bdd1b4ddd5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:23 -0700 Subject: dmaengine: fail device registration if channel registration fails Atsushi points out: "If alloc_percpu or kzalloc failed, chan_id does not match with its position in device->channels list. And above "continue" looks buggy anyway. Keeping incomplete channels in device->channels list looks very dangerous..." Also, fix up leakage of idr_ref in the idr_pre_get() and channel init fail cases. Reported-by: Atsushi Nemoto Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 51 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 280a9d263eb3..49243d14b894 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -602,6 +602,24 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); +static int get_dma_id(struct dma_device *device) +{ + int rc; + + idr_retry: + if (!idr_pre_get(&dma_idr, GFP_KERNEL)) + return -ENOMEM; + mutex_lock(&dma_list_mutex); + rc = idr_get_new(&dma_idr, NULL, &device->dev_id); + mutex_unlock(&dma_list_mutex); + if (rc == -EAGAIN) + goto idr_retry; + else if (rc != 0) + return rc; + + return 0; +} + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device @@ -640,27 +658,25 @@ int dma_async_device_register(struct dma_device *device) idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; - atomic_set(idr_ref, 0); - idr_retry: - if (!idr_pre_get(&dma_idr, GFP_KERNEL)) - return -ENOMEM; - mutex_lock(&dma_list_mutex); - rc = idr_get_new(&dma_idr, NULL, &device->dev_id); - mutex_unlock(&dma_list_mutex); - if (rc == -EAGAIN) - goto idr_retry; - else if (rc != 0) + rc = get_dma_id(device); + if (rc != 0) { + kfree(idr_ref); return rc; + } + + atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { + rc = -ENOMEM; chan->local = alloc_percpu(typeof(*chan->local)); if (chan->local == NULL) - continue; + goto err_out; chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); if (chan->dev == NULL) { free_percpu(chan->local); - continue; + chan->local = NULL; + goto err_out; } chan->chan_id = chancnt++; @@ -677,6 +693,8 @@ int dma_async_device_register(struct dma_device *device) if (rc) { free_percpu(chan->local); chan->local = NULL; + kfree(chan->dev); + atomic_dec(idr_ref); goto err_out; } chan->client_count = 0; @@ -707,6 +725,15 @@ int dma_async_device_register(struct dma_device *device) return 0; err_out: + /* if we never registered a channel just release the idr */ + if (atomic_read(idr_ref) == 0) { + mutex_lock(&dma_list_mutex); + idr_remove(&dma_idr, device->dev_id); + mutex_unlock(&dma_list_mutex); + kfree(idr_ref); + return rc; + } + list_for_each_entry(chan, &device->channels, device_node) { if (chan->local == NULL) continue; -- cgit v1.2.2 From 0149f7d5dc66dcffbb044ba005a5378a5864d2a3 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:23 -0700 Subject: dma: ipu_idmac driver cosmetic clean-up Remove superfluous semicolons, update comments. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index ae50a9d1a4e6..a6f7294c8598 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -107,7 +107,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt) } } -/* Enable / disable direct write to memory by the Camera Sensor Interface */ +/* Enable direct write to memory by the Camera Sensor Interface */ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -126,6 +126,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) idmac_write_icreg(ipu, ic_conf, IC_CONF); } +/* Called under spin_lock_irqsave(&ipu_data.lock) */ static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -422,7 +423,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; default: dev_err(ipu_data.dev, - "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); + "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); break; } @@ -433,20 +434,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params, uint16_t burst_pixels) { params->pp.npb = burst_pixels - 1; -}; +} static void ipu_ch_param_set_buffer(union chan_param_mem *params, dma_addr_t buf0, dma_addr_t buf1) { params->pp.eba0 = buf0; params->pp.eba1 = buf1; -}; +} static void ipu_ch_param_set_rotation(union chan_param_mem *params, enum ipu_rotate_mode rotate) { params->pp.bam = rotate; -}; +} static void ipu_write_param_mem(uint32_t addr, uint32_t *data, uint32_t num_words) @@ -571,7 +572,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch) { /* Channel Parameter Memory */ return 0x10000 | (dma_ch << 4); -}; +} static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, bool prio) @@ -611,7 +612,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) /** * ipu_enable_channel() - enable an IPU channel. - * @channel: channel ID. + * @idmac: IPU DMAC context. + * @ichan: IDMAC channel. * @return: 0 on success or negative error code on failure. */ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) @@ -649,7 +651,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) /** * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. - * @channel: channel ID. + * @ichan: IDMAC channel. * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. * @width: width of buffer in pixels. * @height: height of buffer in pixels. @@ -687,7 +689,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, } /* IC channel's stride must be a multiple of 8 pixels */ - if ((channel <= 13) && (stride % 8)) { + if ((channel <= IDMAC_IC_13) && (stride % 8)) { dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); return -EINVAL; } @@ -752,7 +754,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) /** * ipu_update_channel_buffer() - update physical address of a channel buffer. - * @channel: channel ID. + * @ichan: IDMAC channel. * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. @@ -1341,13 +1343,7 @@ static void ipu_gc_tasklet(unsigned long arg) } } -/* - * At the time .device_alloc_chan_resources() method is called, we cannot know, - * whether the client will accept the channel. Thus we must only check, if we - * can satisfy client's request but the only real criterion to verify, whether - * the client has accepted our offer is the client_count. That's why we have to - * perform the rest of our allocation tasks on the first call to this function. - */ +/* Allocate and initialise a transfer descriptor. */ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long tx_flags) @@ -1432,8 +1428,7 @@ static void __idmac_terminate_all(struct dma_chan *chan) struct idmac_tx_desc *desc = ichan->desc + i; if (list_empty(&desc->list)) /* Descriptor was prepared, but not submitted */ - list_add(&desc->list, - &ichan->free_list); + list_add(&desc->list, &ichan->free_list); async_tx_clear_ack(&desc->txd); } -- cgit v1.2.2 From 234f2df56f5b05756c444edc9879145deddf69f4 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: dma: improve section assignment in i.MX31 IPU DMA driver The i.MX31 IPU DMA driver is a platform driver, but doesn't need hotplug, so we can use __init and __exit function attributes. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 8 ++++---- drivers/dma/ipu/ipu_irq.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index a6f7294c8598..b759ae9315ba 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1576,7 +1576,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) return dma_async_device_register(&idmac->dma); } -static void ipu_idmac_exit(struct ipu *ipu) +static void __exit ipu_idmac_exit(struct ipu *ipu) { int i; struct idmac *idmac = &ipu->idmac; @@ -1595,7 +1595,7 @@ static void ipu_idmac_exit(struct ipu *ipu) * IPU common probe / remove */ -static int ipu_probe(struct platform_device *pdev) +static int __init ipu_probe(struct platform_device *pdev) { struct ipu_platform_data *pdata = pdev->dev.platform_data; struct resource *mem_ipu, *mem_ic; @@ -1695,7 +1695,7 @@ err_noirq: return ret; } -static int ipu_remove(struct platform_device *pdev) +static int __exit ipu_remove(struct platform_device *pdev) { struct ipu *ipu = platform_get_drvdata(pdev); @@ -1720,7 +1720,7 @@ static struct platform_driver ipu_platform_driver = { .name = "ipu-core", .owner = THIS_MODULE, }, - .remove = ipu_remove, + .remove = __exit_p(ipu_remove), }; static int __init ipu_init(void) diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 83f532cc767f..dd8ebc75b667 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = { }; /* Install the IRQ handler */ -int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) +int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) { struct ipu_platform_data *pdata = dev->dev.platform_data; unsigned int irq, irq_base, i; -- cgit v1.2.2 From 8d47bae004f062630f69f7f83d098424252e232d Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: dma: i.MX31 IPU DMA robustness improvements Add DMA error handling to the ISR, move common code fragments to functions, fix scatter-gather element queuing in the ISR, survive channel freeing and re-allocation in a quick succession. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 265 ++++++++++++++++++++++++++++++-------------- 1 file changed, 181 insertions(+), 84 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index b759ae9315ba..0a525c450f24 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -28,6 +28,9 @@ #define FS_VF_IN_VALID 0x00000002 #define FS_ENC_IN_VALID 0x00000001 +static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, + bool wait_for_stop); + /* * There can be only one, we could allocate it dynamically, but then we'd have * to add an extra parameter to some functions, and use something as ugly as @@ -762,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(enum ipu_channel channel, +static int ipu_update_channel_buffer(struct idmac_channel *ichan, int buffer_n, dma_addr_t phyaddr) { + enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; unsigned long flags; @@ -773,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, if (buffer_n == 0) { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); if (reg & (1UL << channel)) { - spin_unlock_irqrestore(&ipu_data.lock, flags); - return -EACCES; + ipu_ic_disable_task(&ipu_data, channel); + ichan->status = IPU_CHANNEL_READY; } /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ @@ -784,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, } else { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); if (reg & (1UL << channel)) { - spin_unlock_irqrestore(&ipu_data.lock, flags); - return -EACCES; + ipu_ic_disable_task(&ipu_data, channel); + ichan->status = IPU_CHANNEL_READY; } /* Check if double-buffering is already enabled */ @@ -806,6 +810,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel, return 0; } +/* Called under spin_lock_irqsave(&ichan->lock) */ +static int ipu_submit_buffer(struct idmac_channel *ichan, + struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) +{ + unsigned int chan_id = ichan->dma_chan.chan_id; + struct device *dev = &ichan->dma_chan.dev->device; + int ret; + + if (async_tx_test_ack(&desc->txd)) + return -EINTR; + + /* + * On first invocation this shouldn't be necessary, the call to + * ipu_init_channel_buffer() above will set addresses for us, so we + * could make it conditional on status >= IPU_CHANNEL_ENABLED, but + * doing it again shouldn't hurt either. + */ + ret = ipu_update_channel_buffer(ichan, buf_idx, + sg_dma_address(sg)); + + if (ret < 0) { + dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", + sg, chan_id, buf_idx); + return ret; + } + + ipu_select_buffer(chan_id, buf_idx); + dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", + sg, chan_id, buf_idx); + + return 0; +} + /* Called under spin_lock_irqsave(&ichan->lock) */ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, struct idmac_tx_desc *desc) @@ -817,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, if (!ichan->sg[i]) { ichan->sg[i] = sg; - /* - * On first invocation this shouldn't be necessary, the - * call to ipu_init_channel_buffer() above will set - * addresses for us, so we could make it conditional - * on status >= IPU_CHANNEL_ENABLED, but doing it again - * shouldn't hurt either. - */ - ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i, - sg_dma_address(sg)); + ret = ipu_submit_buffer(ichan, desc, sg, i); if (ret < 0) return ret; - ipu_select_buffer(ichan->dma_chan.chan_id, i); - sg = sg_next(sg); } } @@ -844,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) struct idmac_channel *ichan = to_idmac_chan(tx->chan); struct idmac *idmac = to_idmac(tx->chan->device); struct ipu *ipu = to_ipu(idmac); + struct device *dev = &ichan->dma_chan.dev->device; dma_cookie_t cookie; unsigned long flags; + int ret; /* Sanity check */ if (!list_empty(&desc->list)) { /* The descriptor doesn't belong to client */ - dev_err(&ichan->dma_chan.dev->device, - "Descriptor %p not prepared!\n", tx); + dev_err(dev, "Descriptor %p not prepared!\n", tx); return -EBUSY; } mutex_lock(&ichan->chan_mutex); + async_tx_clear_ack(tx); + if (ichan->status < IPU_CHANNEL_READY) { struct idmac_video_param *video = &ichan->params.video; /* @@ -880,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) goto out; } - /* ipu->lock can be taken under ichan->lock, but not v.v. */ - spin_lock_irqsave(&ichan->lock, flags); - - /* submit_buffers() atomically verifies and fills empty sg slots */ - cookie = ipu_submit_channel_buffers(ichan, desc); - - spin_unlock_irqrestore(&ichan->lock, flags); - - if (cookie < 0) - goto out; + dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); cookie = ichan->dma_chan.cookie; @@ -899,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) /* from dmaengine.h: "last cookie value returned to client" */ ichan->dma_chan.cookie = cookie; tx->cookie = cookie; + + /* ipu->lock can be taken under ichan->lock, but not v.v. */ spin_lock_irqsave(&ichan->lock, flags); + list_add_tail(&desc->list, &ichan->queue); + /* submit_buffers() atomically verifies and fills empty sg slots */ + ret = ipu_submit_channel_buffers(ichan, desc); + spin_unlock_irqrestore(&ichan->lock, flags); + if (ret < 0) { + cookie = ret; + goto dequeue; + } + if (ichan->status < IPU_CHANNEL_ENABLED) { - int ret = ipu_enable_channel(idmac, ichan); + ret = ipu_enable_channel(idmac, ichan); if (ret < 0) { cookie = ret; - spin_lock_irqsave(&ichan->lock, flags); - list_del_init(&desc->list); - spin_unlock_irqrestore(&ichan->lock, flags); - tx->cookie = cookie; - ichan->dma_chan.cookie = cookie; + goto dequeue; } } dump_idmac_reg(ipu); +dequeue: + if (cookie < 0) { + spin_lock_irqsave(&ichan->lock, flags); + list_del_init(&desc->list); + spin_unlock_irqrestore(&ichan->lock, flags); + tx->cookie = cookie; + ichan->dma_chan.cookie = cookie; + } + out: mutex_unlock(&ichan->chan_mutex); @@ -1163,6 +1200,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, return 0; } +static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, + struct idmac_tx_desc **desc, struct scatterlist *sg) +{ + struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; + + if (sgnew) + /* next sg-element in this list */ + return sgnew; + + if ((*desc)->list.next == &ichan->queue) + /* No more descriptors on the queue */ + return NULL; + + /* Fetch next descriptor */ + *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); + return (*desc)->sg; +} + /* * We have several possibilities here: * current BUF next BUF @@ -1178,23 +1233,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, static irqreturn_t idmac_interrupt(int irq, void *dev_id) { struct idmac_channel *ichan = dev_id; + struct device *dev = &ichan->dma_chan.dev->device; unsigned int chan_id = ichan->dma_chan.chan_id; struct scatterlist **sg, *sgnext, *sgnew = NULL; /* Next transfer descriptor */ - struct idmac_tx_desc *desc = NULL, *descnew; + struct idmac_tx_desc *desc, *descnew; dma_async_tx_callback callback; void *callback_param; bool done = false; - u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), - ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), - curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); + u32 ready0, ready1, curbuf, err; + unsigned long flags; /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ - pr_debug("IDMAC irq %d\n", irq); + dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); + + spin_lock_irqsave(&ipu_data.lock, flags); + + ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); + ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); + curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); + err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); + + if (err & (1 << chan_id)) { + idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); + spin_unlock_irqrestore(&ipu_data.lock, flags); + /* + * Doing this + * ichan->sg[0] = ichan->sg[1] = NULL; + * you can force channel re-enable on the next tx_submit(), but + * this is dirty - think about descriptors with multiple + * sg elements. + */ + dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", + chan_id, ready0, ready1, curbuf); + return IRQ_HANDLED; + } + spin_unlock_irqrestore(&ipu_data.lock, flags); + /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); - if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { int i = 100; @@ -1209,19 +1287,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (!i) { spin_unlock(&ichan->lock); - dev_dbg(ichan->dma_chan.device->dev, + dev_dbg(dev, "IRQ on active buffer on channel %x, active " "%d, ready %x, %x, current %x!\n", chan_id, ichan->active_buffer, ready0, ready1, curbuf); return IRQ_NONE; - } + } else + dev_dbg(dev, + "Buffer deactivated on channel %x, active " + "%d, ready %x, %x, current %x, rest %d!\n", chan_id, + ichan->active_buffer, ready0, ready1, curbuf, i); } if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || (!ichan->active_buffer && (ready0 >> chan_id) & 1) )) { spin_unlock(&ichan->lock); - dev_dbg(ichan->dma_chan.device->dev, + dev_dbg(dev, "IRQ with active buffer still ready on channel %x, " "active %d, ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1229,8 +1311,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) } if (unlikely(list_empty(&ichan->queue))) { + ichan->sg[ichan->active_buffer] = NULL; spin_unlock(&ichan->lock); - dev_err(ichan->dma_chan.device->dev, + dev_err(dev, "IRQ without queued buffers on channel %x, active %d, " "ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1245,40 +1328,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) sg = &ichan->sg[ichan->active_buffer]; sgnext = ichan->sg[!ichan->active_buffer]; + if (!*sg) { + spin_unlock(&ichan->lock); + return IRQ_HANDLED; + } + + desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); + descnew = desc; + + dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", + irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + + /* Find the descriptor of sgnext */ + sgnew = idmac_sg_next(ichan, &descnew, *sg); + if (sgnext != sgnew) + dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); + /* * if sgnext == NULL sg must be the last element in a scatterlist and * queue must be empty */ if (unlikely(!sgnext)) { - if (unlikely(sg_next(*sg))) { - dev_err(ichan->dma_chan.device->dev, - "Broken buffer-update locking on channel %x!\n", - chan_id); - /* We'll let the user catch up */ + if (!WARN_ON(sg_next(*sg))) + dev_dbg(dev, "Underrun on channel %x\n", chan_id); + ichan->sg[!ichan->active_buffer] = sgnew; + + if (unlikely(sgnew)) { + ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); } else { - /* Underrun */ + spin_lock_irqsave(&ipu_data.lock, flags); ipu_ic_disable_task(&ipu_data, chan_id); - dev_dbg(ichan->dma_chan.device->dev, - "Underrun on channel %x\n", chan_id); + spin_unlock_irqrestore(&ipu_data.lock, flags); ichan->status = IPU_CHANNEL_READY; /* Continue to check for complete descriptor */ } } - desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); - - /* First calculate and submit the next sg element */ - if (likely(sgnext)) - sgnew = sg_next(sgnext); - - if (unlikely(!sgnew)) { - /* Start a new scatterlist, if any queued */ - if (likely(desc->list.next != &ichan->queue)) { - descnew = list_entry(desc->list.next, - struct idmac_tx_desc, list); - sgnew = &descnew->sg[0]; - } - } + /* Calculate and submit the next sg element */ + sgnew = idmac_sg_next(ichan, &descnew, sgnew); if (unlikely(!sg_next(*sg)) || !sgnext) { /* @@ -1291,17 +1378,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) *sg = sgnew; - if (likely(sgnew)) { - int ret; - - ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, - sg_dma_address(*sg)); - if (ret < 0) - dev_err(ichan->dma_chan.device->dev, - "Failed to update buffer on channel %x buffer %d!\n", - chan_id, ichan->active_buffer); - else - ipu_select_buffer(chan_id, ichan->active_buffer); + if (likely(sgnew) && + ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; + spin_unlock(&ichan->lock); + callback(callback_param); + spin_lock(&ichan->lock); } /* Flip the active buffer - even if update above failed */ @@ -1329,13 +1412,20 @@ static void ipu_gc_tasklet(unsigned long arg) struct idmac_channel *ichan = ipu->channel + i; struct idmac_tx_desc *desc; unsigned long flags; - int j; + struct scatterlist *sg; + int j, k; for (j = 0; j < ichan->n_tx_desc; j++) { desc = ichan->desc + j; spin_lock_irqsave(&ichan->lock, flags); if (async_tx_test_ack(&desc->txd)) { list_move(&desc->list, &ichan->free_list); + for_each_sg(desc->sg, sg, desc->sg_len, k) { + if (ichan->sg[0] == sg) + ichan->sg[0] = NULL; + else if (ichan->sg[1] == sg) + ichan->sg[1] = NULL; + } async_tx_clear_ack(&desc->txd); } spin_unlock_irqrestore(&ichan->lock, flags); @@ -1471,15 +1561,22 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) goto eimap; ichan->eof_irq = ret; - ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, - ichan->eof_name, ichan); - if (ret < 0) - goto erirq; + + /* + * Important to first disable the channel, because maybe someone + * used it before us, e.g., the bootloader + */ + ipu_disable_channel(idmac, ichan, true); ret = ipu_init_channel(idmac, ichan); if (ret < 0) goto eichan; + ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, + ichan->eof_name, ichan); + if (ret < 0) + goto erirq; + ichan->status = IPU_CHANNEL_INITIALIZED; dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", @@ -1487,9 +1584,9 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) return ret; -eichan: - free_irq(ichan->eof_irq, ichan); erirq: + ipu_uninit_channel(idmac, ichan); +eichan: ipu_irq_unmap(ichan->dma_chan.chan_id); eimap: return ret; -- cgit v1.2.2 From ccccce229c633a92c42cd1a40c0738d7b0d12644 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:24 -0700 Subject: dmaengine: initialize tx_list in dma_async_tx_descriptor_init Centralize this common initialization (and one case where ipu_idmac is duplicating ->chan initialization). Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 1 + drivers/dma/dw_dmac.c | 1 - drivers/dma/fsldma.c | 1 - drivers/dma/ioat_dma.c | 1 - drivers/dma/iop-adma.c | 1 - drivers/dma/ipu/ipu_idmac.c | 2 -- drivers/dma/mv_xor.c | 1 - 7 files changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 49243d14b894..a41d1ea10fa3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -920,6 +920,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); + INIT_LIST_HEAD(&tx->tx_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a97c07eef7ec..862fc9ce9d86 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -826,7 +826,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; - INIT_LIST_HEAD(&desc->txd.tx_list); desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, sizeof(desc->lli), DMA_TO_DEVICE); dwc_desc_put(dwc, desc); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 86d6da47f558..da8a8ed9e411 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( dma_async_tx_descriptor_init(&desc_sw->async_tx, &fsl_chan->common); desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; - INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->async_tx.phys = pdesc; } diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 5905cd36bcd2..e4fc33c1c32f 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( desc_sw->async_tx.tx_submit = ioat2_tx_submit; break; } - INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->hw = desc; desc_sw->async_tx.phys = phys; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 16adbe61cfb2..2f052265122f 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = iop_adma_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); - INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) iop_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0a525c450f24..7ce0e25266dc 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -983,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n) memset(txd, 0, sizeof(*txd)); dma_async_tx_descriptor_init(txd, &ichan->dma_chan); txd->tx_submit = idmac_tx_submit; - txd->chan = &ichan->dma_chan; - INIT_LIST_HEAD(&txd->tx_list); list_add(&desc->list, &ichan->free_list); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index cb7f26fb9f18..ddab94f51224 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); - INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; -- cgit v1.2.2 From 729b5d1b8ec72c28e99840b3f300ba67726e3ab9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: dmaengine: allow dma support for async_tx to be toggled Provide a config option for blocking the allocation of dma channels to the async_tx api. Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 48ea59e79672..3b3c01b6f1ee 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -98,6 +98,17 @@ config NET_DMA Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise say N. +config ASYNC_TX_DMA + bool "Async_tx: Offload support for the async_tx api" + depends on DMA_ENGINE + help + This allows the async_tx api to take advantage of offload engines for + memcpy, memset, xor, and raid6 p+q operations. If your platform has + a dma engine that can perform raid operations and you have enabled + MD_RAID456 say Y. + + If unsure, say N. + config DMATEST tristate "DMA Test client" depends on DMA_ENGINE -- cgit v1.2.2 From b54d5cb9156868fb4f27ccd46a3afb0bf3ef8e0c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: dmatest: add xor test Extend dmatest to launch a thread per supported operation type and add an xor test. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 274 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 189 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e190d8b30700..224acf478fec 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); +static unsigned int xor_sources = 3; +module_param(xor_sources, uint, S_IRUGO); +MODULE_PARM_DESC(xor_sources, + "Number of xor source buffers (default: 3)"); + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -59,8 +64,9 @@ struct dmatest_thread { struct list_head node; struct task_struct *task; struct dma_chan *chan; - u8 *srcbuf; - u8 *dstbuf; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; }; struct dmatest_chan { @@ -98,30 +104,37 @@ static unsigned long dmatest_random(void) return buf; } -static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) +static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; - - for (i = 0; i < start; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK);; - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + u8 *buf; + + for (; (buf = *bufs); bufs++) { + for (i = 0; i < start; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_SRC | PATTERN_COPY + | (~i & PATTERN_COUNT_MASK);; + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + buf++; + } } -static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) +static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) { unsigned int i; - - for (i = 0; i < start; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_DST | PATTERN_OVERWRITE - | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + u8 *buf; + + for (; (buf = *bufs); bufs++) { + for (i = 0; i < start; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_DST | PATTERN_OVERWRITE + | (~i & PATTERN_COUNT_MASK); + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + } } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, @@ -150,23 +163,30 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, thread_name, index, expected, actual); } -static unsigned int dmatest_verify(u8 *buf, unsigned int start, +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; u8 actual; - - for (i = start; i < end; i++) { - actual = buf[i]; - if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { - if (error_count < 32) - dmatest_mismatch(actual, pattern, i, counter, - is_srcbuf); - error_count++; + u8 expected; + u8 *buf; + unsigned int counter_orig = counter; + + for (; (buf = *bufs); bufs++) { + counter = counter_orig; + for (i = start; i < end; i++) { + actual = buf[i]; + expected = pattern | (~counter & PATTERN_COUNT_MASK); + if (actual != expected) { + if (error_count < 32) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); + error_count++; + } + counter++; } - counter++; } if (error_count > 32) @@ -178,10 +198,10 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start, /* * This function repeatedly tests DMA transfers of various lengths and - * offsets until it is told to exit by kthread_stop(). There may be - * multiple threads running this function in parallel for a single - * channel, and there may be multiple channels being tested in - * parallel. + * offsets for a given operation type until it is told to exit by + * kthread_stop(). There may be multiple threads running this function + * in parallel for a single channel, and there may be multiple channels + * being tested in parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on @@ -201,25 +221,53 @@ static int dmatest_func(void *data) unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; + enum dma_ctrl_flags flags; int ret; + int src_cnt; + int dst_cnt; + int i; thread_name = current->comm; ret = -ENOMEM; - thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->srcbuf) - goto err_srcbuf; - thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->dstbuf) - goto err_dstbuf; smp_rmb(); chan = thread->chan; + if (thread->type == DMA_MEMCPY) + src_cnt = dst_cnt = 1; + else if (thread->type == DMA_XOR) { + src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ + dst_cnt = 1; + } else + goto err_srcs; + + thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); + if (!thread->srcs) + goto err_srcs; + for (i = 0; i < src_cnt; i++) { + thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->srcs[i]) + goto err_srcbuf; + } + thread->srcs[i] = NULL; + + thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); + if (!thread->dsts) + goto err_dsts; + for (i = 0; i < dst_cnt; i++) { + thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->dsts[i]) + goto err_dstbuf; + } + thread->dsts[i] = NULL; + + flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP; while (!kthread_should_stop()) { struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_src, dma_dest; + struct dma_async_tx_descriptor *tx = NULL; + dma_addr_t dma_srcs[src_cnt]; + dma_addr_t dma_dsts[dst_cnt]; total_tests++; @@ -227,22 +275,41 @@ static int dmatest_func(void *data) src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); - dmatest_init_srcbuf(thread->srcbuf, src_off, len); - dmatest_init_dstbuf(thread->dstbuf, dst_off, len); + dmatest_init_srcs(thread->srcs, src_off, len); + dmatest_init_dsts(thread->dsts, dst_off, len); + + for (i = 0; i < src_cnt; i++) { + u8 *buf = thread->srcs[i] + src_off; - dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off, - len, DMA_TO_DEVICE); + dma_srcs[i] = dma_map_single(dev->dev, buf, len, + DMA_TO_DEVICE); + } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ - dma_dest = dma_map_single(dev->dev, thread->dstbuf, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < dst_cnt; i++) { + dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], + test_buf_size, + DMA_BIDIRECTIONAL); + } + + if (thread->type == DMA_MEMCPY) + tx = dev->device_prep_dma_memcpy(chan, + dma_dsts[0] + dst_off, + dma_srcs[0], len, + flags); + else if (thread->type == DMA_XOR) + tx = dev->device_prep_dma_xor(chan, + dma_dsts[0] + dst_off, + dma_srcs, xor_sources, + len, flags); - tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off, - dma_src, len, - DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < src_cnt; i++) + dma_unmap_single(dev->dev, dma_srcs[i], len, + DMA_TO_DEVICE); + for (i = 0; i < dst_cnt; i++) + dma_unmap_single(dev->dev, dma_dsts[i], + test_buf_size, + DMA_BIDIRECTIONAL); pr_warning("%s: #%u: prep error with src_off=0x%x " "dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, @@ -263,11 +330,11 @@ static int dmatest_func(void *data) failed_tests++; continue; } - dma_async_memcpy_issue_pending(chan); + dma_async_issue_pending(chan); do { msleep(1); - status = dma_async_memcpy_complete( + status = dma_async_is_tx_complete( chan, cookie, NULL, NULL); } while (status == DMA_IN_PROGRESS); @@ -278,29 +345,30 @@ static int dmatest_func(void *data) continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - dma_unmap_single(dev->dev, dma_dest, - test_buf_size, DMA_BIDIRECTIONAL); + for (i = 0; i < dst_cnt; i++) + dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, + DMA_BIDIRECTIONAL); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += dmatest_verify(thread->srcbuf, 0, src_off, + error_count += dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += dmatest_verify(thread->srcbuf, src_off, + error_count += dmatest_verify(thread->srcs, src_off, src_off + len, src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += dmatest_verify(thread->srcbuf, src_off + len, + error_count += dmatest_verify(thread->srcs, src_off + len, test_buf_size, src_off + len, PATTERN_SRC, true); pr_debug("%s: verifying dest buffer...\n", thread->task->comm); - error_count += dmatest_verify(thread->dstbuf, 0, dst_off, + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += dmatest_verify(thread->dstbuf, dst_off, + error_count += dmatest_verify(thread->dsts, dst_off, dst_off + len, src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += dmatest_verify(thread->dstbuf, dst_off + len, + error_count += dmatest_verify(thread->dsts, dst_off + len, test_buf_size, dst_off + len, PATTERN_DST, false); @@ -319,10 +387,16 @@ static int dmatest_func(void *data) } ret = 0; - kfree(thread->dstbuf); + for (i = 0; thread->dsts[i]; i++) + kfree(thread->dsts[i]); err_dstbuf: - kfree(thread->srcbuf); + kfree(thread->dsts); +err_dsts: + for (i = 0; thread->srcs[i]; i++) + kfree(thread->srcs[i]); err_srcbuf: + kfree(thread->srcs); +err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); return ret; @@ -344,35 +418,36 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) kfree(dtc); } -static int dmatest_add_channel(struct dma_chan *chan) +static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) { - struct dmatest_chan *dtc; - struct dmatest_thread *thread; - unsigned int i; - - dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); - if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); - return -ENOMEM; - } + struct dmatest_thread *thread; + struct dma_chan *chan = dtc->chan; + char *op; + unsigned int i; - dtc->chan = chan; - INIT_LIST_HEAD(&dtc->threads); + if (type == DMA_MEMCPY) + op = "copy"; + else if (type == DMA_XOR) + op = "xor"; + else + return -EINVAL; for (i = 0; i < threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-test%u\n", - dma_chan_name(chan), i); + pr_warning("dmatest: No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); + break; } thread->chan = dtc->chan; + thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-test%u", - dma_chan_name(chan), i); + thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-test%u\n", - dma_chan_name(chan), i); + pr_warning("dmatest: Failed to run thread %s-%s%u\n", + dma_chan_name(chan), op, i); kfree(thread); break; } @@ -382,7 +457,36 @@ static int dmatest_add_channel(struct dma_chan *chan) list_add_tail(&thread->node, &dtc->threads); } - pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); + return i; +} + +static int dmatest_add_channel(struct dma_chan *chan) +{ + struct dmatest_chan *dtc; + struct dma_device *dma_dev = chan->device; + unsigned int thread_count = 0; + unsigned int cnt; + + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); + if (!dtc) { + pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + return -ENOMEM; + } + + dtc->chan = chan; + INIT_LIST_HEAD(&dtc->threads); + + if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_MEMCPY); + thread_count += cnt > 0 ?: 0; + } + if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_XOR); + thread_count += cnt > 0 ?: 0; + } + + pr_info("dmatest: Started %u threads using %s\n", + thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &dmatest_channels); nr_channels++; -- cgit v1.2.2 From e44e0aa3cfa97cddff01704751a4b25151830c72 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 25 Mar 2009 09:13:25 -0700 Subject: dmatest: add dma interrupts and callbacks Use the callback infrastructure to report driver/hardware hangs or missed interrupts. Since this makes the test threads much more aggressive (from: explicit 1ms sleep to: wait_for_completion) we set the nice value to 10 so as to not swamp legitimate tasks. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 224acf478fec..a27c0fb1bc11 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -196,6 +196,11 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } +static void dmatest_callback(void *completion) +{ + complete(completion); +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -261,13 +266,17 @@ static int dmatest_func(void *data) } thread->dsts[i] = NULL; - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP; + set_user_nice(current, 10); + + flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; while (!kthread_should_stop()) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; + struct completion cmp; + unsigned long tmo = msecs_to_jiffies(3000); total_tests++; @@ -318,7 +327,10 @@ static int dmatest_func(void *data) failed_tests++; continue; } - tx->callback = NULL; + + init_completion(&cmp); + tx->callback = dmatest_callback; + tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -332,18 +344,23 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - do { - msleep(1); - status = dma_async_is_tx_complete( - chan, cookie, NULL, NULL); - } while (status == DMA_IN_PROGRESS); + tmo = wait_for_completion_timeout(&cmp, tmo); + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (status == DMA_ERROR) { - pr_warning("%s: #%u: error during copy\n", - thread_name, total_tests - 1); + if (tmo == 0) { + pr_warning("%s: #%u: test timed out\n", + thread_name, total_tests - 1); + failed_tests++; + continue; + } else if (status != DMA_SUCCESS) { + pr_warning("%s: #%u: got completion callback," + " but status is \'%s\'\n", + thread_name, total_tests - 1, + status == DMA_ERROR ? "error" : "in progress"); failed_tests++; continue; } + /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ for (i = 0; i < dst_cnt; i++) dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, -- cgit v1.2.2 From 0f571515c332e00b3515dbe0859ceaa30ab66e00 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Fri, 6 Mar 2009 20:07:14 +0900 Subject: dmaengine: Add privatecnt to revert DMA_PRIVATE property Currently dma_request_channel() set DMA_PRIVATE capability but never clear it. So if a public channel was once grabbed by dma_request_channel(), the device stay PRIVATE forever. Add privatecnt member to dma_device to correctly revert it. [lg@denx.de: fix bad usage of 'chan' in dma_async_device_register] Signed-off-by: Atsushi Nemoto Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a41d1ea10fa3..92438e9dacc3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v * published in the general-purpose allocator */ dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); if (err == -ENODEV) { @@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v dma_chan_name(chan), err); else break; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); chan->private = NULL; chan = NULL; } @@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan) WARN_ONCE(chan->client_count != 1, "chan reference count %d != 1\n", chan->client_count); dma_chan_put(chan); + /* drop PRIVATE cap enabled by __dma_request_channel() */ + if (--chan->device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); chan->private = NULL; mutex_unlock(&dma_list_mutex); } @@ -719,6 +725,8 @@ int dma_async_device_register(struct dma_device *device) } } list_add_tail_rcu(&device->global_node, &dma_device_list); + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + device->privatecnt++; /* Always private */ dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); -- cgit v1.2.2 From d9de451989a88a2003ca06e524aca4665c0c7f06 Mon Sep 17 00:00:00 2001 From: Hans-Christian Egtvedt Date: Wed, 1 Apr 2009 15:47:02 +0200 Subject: dw_dmac: add cyclic API to DW DMA driver This patch adds a cyclic DMA interface to the DW DMA driver. This is very useful if you want to use the DMA controller in combination with a sound device which uses cyclic buffers. Using a DMA channel for cyclic DMA will disable the possibility to use it as a normal DMA engine until the user calls the cyclic free function on the DMA channel. Also a cyclic DMA list can not be prepared if the channel is already active. Signed-off-by: Hans-Christian Egtvedt Acked-by: Haavard Skinnemoen Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 332 ++++++++++++++++++++++++++++++++++++++++++++- drivers/dma/dw_dmac_regs.h | 7 +- 2 files changed, 337 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 862fc9ce9d86..0b8aada08aa8 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_descriptor_complete(dwc, bad_desc); } +/* --------------------- Cyclic DMA API extensions -------------------- */ + +inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, SAR); +} +EXPORT_SYMBOL(dw_dma_get_src_addr); + +inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + return channel_readl(dwc, DAR); +} +EXPORT_SYMBOL(dw_dma_get_dst_addr); + +/* called with dwc->lock held and all DMAC interrupts disabled */ +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, + u32 status_block, u32 status_err, u32 status_xfer) +{ + if (status_block & dwc->mask) { + void (*callback)(void *param); + void *callback_param; + + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", + channel_readl(dwc, LLP)); + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + + callback = dwc->cdesc->period_callback; + callback_param = dwc->cdesc->period_callback_param; + if (callback) { + spin_unlock(&dwc->lock); + callback(callback_param); + spin_lock(&dwc->lock); + } + } + + /* + * Error and transfer complete are highly unlikely, and will most + * likely be due to a configuration error by the user. + */ + if (unlikely(status_err & dwc->mask) || + unlikely(status_xfer & dwc->mask)) { + int i; + + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " + "interrupt, stopping DMA transfer\n", + status_xfer ? "xfer" : "error"); + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + /* make sure DMA does not restart by loading a new list */ + channel_writel(dwc, LLP, 0); + channel_writel(dwc, CTL_LO, 0); + channel_writel(dwc, CTL_HI, 0); + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + for (i = 0; i < dwc->cdesc->periods; i++) + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); + } +} + +/* ------------------------------------------------------------------------- */ + static void dw_dma_tasklet(unsigned long data) { struct dw_dma *dw = (struct dw_dma *)data; @@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data) for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; spin_lock(&dwc->lock); - if (status_err & (1 << i)) + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) + dwc_handle_cyclic(dw, dwc, status_block, status_err, + status_xfer); + else if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if ((status_block | status_xfer) & (1 << i)) dwc_scan_descriptors(dw, dwc); @@ -883,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); } +/* --------------------- Cyclic DMA API extensions -------------------- */ + +/** + * dw_dma_cyclic_start - start the cyclic DMA transfer + * @chan: the DMA channel to start + * + * Must be called with soft interrupts disabled. Returns zero on success or + * -errno on failure. + */ +int dw_dma_cyclic_start(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); + return -ENODEV; + } + + spin_lock(&dwc->lock); + + /* assert channel is idle */ + if (dma_readl(dw, CH_EN) & dwc->mask) { + dev_err(chan2dev(&dwc->chan), + "BUG: Attempted to start non-idle channel\n"); + dev_err(chan2dev(&dwc->chan), + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", + channel_readl(dwc, SAR), + channel_readl(dwc, DAR), + channel_readl(dwc, LLP), + channel_readl(dwc, CTL_HI), + channel_readl(dwc, CTL_LO)); + spin_unlock(&dwc->lock); + return -EBUSY; + } + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + /* setup DMAC channel registers */ + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); + channel_writel(dwc, CTL_HI, 0); + + channel_set_bit(dw, CH_EN, dwc->mask); + + spin_unlock(&dwc->lock); + + return 0; +} +EXPORT_SYMBOL(dw_dma_cyclic_start); + +/** + * dw_dma_cyclic_stop - stop the cyclic DMA transfer + * @chan: the DMA channel to stop + * + * Must be called with soft interrupts disabled. + */ +void dw_dma_cyclic_stop(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + + spin_lock(&dwc->lock); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + spin_unlock(&dwc->lock); +} +EXPORT_SYMBOL(dw_dma_cyclic_stop); + +/** + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer + * @chan: the DMA channel to prepare + * @buf_addr: physical DMA address where the buffer starts + * @buf_len: total number of bytes for the entire buffer + * @period_len: number of bytes for each period + * @direction: transfer direction, to or from device + * + * Must be called before trying to start the transfer. Returns a valid struct + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. + */ +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_data_direction direction) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_cyclic_desc *cdesc; + struct dw_cyclic_desc *retval = NULL; + struct dw_desc *desc; + struct dw_desc *last = NULL; + struct dw_dma_slave *dws = chan->private; + unsigned long was_cyclic; + unsigned int reg_width; + unsigned int periods; + unsigned int i; + + spin_lock_bh(&dwc->lock); + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { + spin_unlock_bh(&dwc->lock); + dev_dbg(chan2dev(&dwc->chan), + "queue and/or active list are not empty\n"); + return ERR_PTR(-EBUSY); + } + + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + spin_unlock_bh(&dwc->lock); + if (was_cyclic) { + dev_dbg(chan2dev(&dwc->chan), + "channel already prepared for cyclic DMA\n"); + return ERR_PTR(-EBUSY); + } + + retval = ERR_PTR(-EINVAL); + reg_width = dws->reg_width; + periods = buf_len / period_len; + + /* Check for too big/unaligned periods and unaligned DMA buffer. */ + if (period_len > (DWC_MAX_COUNT << reg_width)) + goto out_err; + if (unlikely(period_len & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(buf_addr & ((1 << reg_width) - 1))) + goto out_err; + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) + goto out_err; + + retval = ERR_PTR(-ENOMEM); + + if (periods > NR_DESCS_PER_CHANNEL) + goto out_err; + + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); + if (!cdesc) + goto out_err; + + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); + if (!cdesc->desc) + goto out_err_alloc; + + for (i = 0; i < periods; i++) { + desc = dwc_desc_get(dwc); + if (!desc) + goto out_err_desc_get; + + switch (direction) { + case DMA_TO_DEVICE: + desc->lli.dar = dws->tx_reg; + desc->lli.sar = buf_addr + (period_len * i); + desc->lli.ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_FIX + | DWC_CTLL_SRC_INC + | DWC_CTLL_FC_M2P + | DWC_CTLL_INT_EN); + break; + case DMA_FROM_DEVICE: + desc->lli.dar = buf_addr + (period_len * i); + desc->lli.sar = dws->rx_reg; + desc->lli.ctllo = (DWC_DEFAULT_CTLLO + | DWC_CTLL_SRC_WIDTH(reg_width) + | DWC_CTLL_DST_WIDTH(reg_width) + | DWC_CTLL_DST_INC + | DWC_CTLL_SRC_FIX + | DWC_CTLL_FC_P2M + | DWC_CTLL_INT_EN); + break; + default: + break; + } + + desc->lli.ctlhi = (period_len >> reg_width); + cdesc->desc[i] = desc; + + if (last) { + last->lli.llp = desc->txd.phys; + dma_sync_single_for_device(chan2parent(chan), + last->txd.phys, sizeof(last->lli), + DMA_TO_DEVICE); + } + + last = desc; + } + + /* lets make a cyclic list */ + last->lli.llp = cdesc->desc[0]->txd.phys; + dma_sync_single_for_device(chan2parent(chan), last->txd.phys, + sizeof(last->lli), DMA_TO_DEVICE); + + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " + "period %zu periods %d\n", buf_addr, buf_len, + period_len, periods); + + cdesc->periods = periods; + dwc->cdesc = cdesc; + + return cdesc; + +out_err_desc_get: + while (i--) + dwc_desc_put(dwc, cdesc->desc[i]); +out_err_alloc: + kfree(cdesc); +out_err: + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); + return (struct dw_cyclic_desc *)retval; +} +EXPORT_SYMBOL(dw_dma_cyclic_prep); + +/** + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer + * @chan: the DMA channel to free + */ +void dw_dma_cyclic_free(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_cyclic_desc *cdesc = dwc->cdesc; + int i; + + dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); + + if (!cdesc) + return; + + spin_lock_bh(&dwc->lock); + + channel_clear_bit(dw, CH_EN, dwc->mask); + while (dma_readl(dw, CH_EN) & dwc->mask) + cpu_relax(); + + dma_writel(dw, CLEAR.BLOCK, dwc->mask); + dma_writel(dw, CLEAR.ERROR, dwc->mask); + dma_writel(dw, CLEAR.XFER, dwc->mask); + + spin_unlock_bh(&dwc->lock); + + for (i = 0; i < cdesc->periods; i++) + dwc_desc_put(dwc, cdesc->desc[i]); + + kfree(cdesc->desc); + kfree(cdesc); + + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); +} +EXPORT_SYMBOL(dw_dma_cyclic_free); + /*----------------------------------------------------------------------*/ static void dw_dma_off(struct dw_dma *dw) diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index b252b202c5cf..13a580767031 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -126,6 +126,10 @@ struct dw_dma_regs { #define DW_REGLEN 0x400 +enum dw_dmac_flags { + DW_DMA_IS_CYCLIC = 0, +}; + struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; @@ -134,10 +138,12 @@ struct dw_dma_chan { spinlock_t lock; /* these other elements are all protected by lock */ + unsigned long flags; dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; + struct dw_cyclic_desc *cdesc; unsigned int descs_allocated; }; @@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) return container_of(chan, struct dw_dma_chan, chan); } - struct dw_dma { struct dma_device dma; void __iomem *regs; -- cgit v1.2.2 From 8c6db1bbf80123839ec87bdd6cb364aea384623d Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 2 Apr 2009 11:36:58 +0200 Subject: dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup Add Start-of-Frame and End-of-Frame debugging to ipu_idmac.c, in the future it might also be needed for the actual video processing in mx3-camera, at which point, the ISRs will have to be transferred to mx3_camera.c, for which ipu_irq_map() and ipu_irq_unmap() functions will have to be exported. Also simplify a couple of pointer-dereferences. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Dan Williams --- drivers/dma/ipu/ipu_idmac.c | 65 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 56 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 7ce0e25266dc..90773844cc89 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1442,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan unsigned long flags; /* We only can handle these three channels so far */ - if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && - ichan->dma_chan.chan_id != IDMAC_IC_7) + if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && + chan->chan_id != IDMAC_IC_7) return NULL; if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { @@ -1484,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan) /* This is not always needed, but doesn't hurt either */ spin_lock_irqsave(&ipu->lock, flags); - ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); + ipu_select_buffer(chan->chan_id, ichan->active_buffer); spin_unlock_irqrestore(&ipu->lock, flags); /* @@ -1541,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan) mutex_unlock(&ichan->chan_mutex); } +#ifdef DEBUG +static irqreturn_t ic_sof_irq(int irq, void *dev_id) +{ + struct idmac_channel *ichan = dev_id; + printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", + irq, ichan->dma_chan.chan_id); + disable_irq(irq); + return IRQ_HANDLED; +} + +static irqreturn_t ic_eof_irq(int irq, void *dev_id) +{ + struct idmac_channel *ichan = dev_id; + printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", + irq, ichan->dma_chan.chan_id); + disable_irq(irq); + return IRQ_HANDLED; +} + +static int ic_sof = -EINVAL, ic_eof = -EINVAL; +#endif + static int idmac_alloc_chan_resources(struct dma_chan *chan) { struct idmac_channel *ichan = to_idmac_chan(chan); @@ -1554,7 +1576,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) chan->cookie = 1; ichan->completed = -ENXIO; - ret = ipu_irq_map(ichan->dma_chan.chan_id); + ret = ipu_irq_map(chan->chan_id); if (ret < 0) goto eimap; @@ -1575,17 +1597,28 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) if (ret < 0) goto erirq; +#ifdef DEBUG + if (chan->chan_id == IDMAC_IC_7) { + ic_sof = ipu_irq_map(69); + if (ic_sof > 0) + request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + ic_eof = ipu_irq_map(70); + if (ic_eof > 0) + request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + } +#endif + ichan->status = IPU_CHANNEL_INITIALIZED; - dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", - ichan->dma_chan.chan_id, ichan->eof_irq); + dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", + chan->chan_id, ichan->eof_irq); return ret; erirq: ipu_uninit_channel(idmac, ichan); eichan: - ipu_irq_unmap(ichan->dma_chan.chan_id); + ipu_irq_unmap(chan->chan_id); eimap: return ret; } @@ -1600,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan) __idmac_terminate_all(chan); if (ichan->status > IPU_CHANNEL_FREE) { +#ifdef DEBUG + if (chan->chan_id == IDMAC_IC_7) { + if (ic_sof > 0) { + free_irq(ic_sof, ichan); + ipu_irq_unmap(69); + ic_sof = -EINVAL; + } + if (ic_eof > 0) { + free_irq(ic_eof, ichan); + ipu_irq_unmap(70); + ic_eof = -EINVAL; + } + } +#endif free_irq(ichan->eof_irq, ichan); - ipu_irq_unmap(ichan->dma_chan.chan_id); + ipu_irq_unmap(chan->chan_id); } ichan->status = IPU_CHANNEL_FREE; @@ -1663,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) dma_chan->device = &idmac->dma; dma_chan->cookie = 1; dma_chan->chan_id = i; - list_add_tail(&ichan->dma_chan.device_node, &dma->channels); + list_add_tail(&dma_chan->device_node, &dma->channels); } idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); -- cgit v1.2.2