aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c40
-rw-r--r--drivers/acpi/toshiba_acpi.c3
-rw-r--r--drivers/block/paride/pf.c25
-rw-r--r--drivers/block/rd.c13
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/rtc.c52
-rw-r--r--drivers/dma/dmaengine.c17
-rw-r--r--drivers/dma/ioat.c11
-rw-r--r--drivers/dma/ioat_dca.c164
-rw-r--r--drivers/dma/ioat_dma.c578
-rw-r--r--drivers/dma/ioatdma.h32
-rw-r--r--drivers/dma/ioatdma_hw.h33
-rw-r--r--drivers/dma/ioatdma_registers.h106
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/ide/Kconfig4
-rw-r--r--drivers/ide/cris/ide-cris.c3
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-lib.c1
-rw-r--r--drivers/ide/pci/cmd64x.c5
-rw-r--r--drivers/ide/pci/cs5530.c3
-rw-r--r--drivers/ide/pci/it821x.c3
-rw-r--r--drivers/ide/pci/jmicron.c3
-rw-r--r--drivers/ide/pci/sc1200.c3
-rw-r--r--drivers/ide/pci/sis5513.c1
-rw-r--r--drivers/ide/ppc/pmac.c1
-rw-r--r--drivers/ide/setup-pci.c5
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/misc/ioc4.c10
-rw-r--r--drivers/net/cris/eth_v10.c440
-rw-r--r--drivers/oprofile/cpu_buffer.c7
-rw-r--r--drivers/oprofile/cpu_buffer.h1
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c93
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c5
-rw-r--r--drivers/rtc/rtc-m48t59.c3
-rw-r--r--drivers/rtc/rtc-stk17ta8.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c4
-rw-r--r--drivers/serial/8250_pnp.c10
-rw-r--r--drivers/serial/atmel_serial.c9
-rw-r--r--drivers/serial/crisv10.c1293
-rw-r--r--drivers/serial/crisv10.h146
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/spi/spi_txx9.c40
-rw-r--r--drivers/spi/tle62x0.c5
-rw-r--r--drivers/usb/serial/keyspan.c38
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/gbefb.c4
-rw-r--r--drivers/video/geode/lxfb.h2
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/video/s1d13xxxfb.c5
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/uvesafb.c6
-rw-r--r--drivers/w1/masters/ds2490.c2
59 files changed, 1970 insertions, 1325 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ce9dead0f499..087a7028ae84 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -50,6 +50,7 @@ config ACPI_SLEEP
50config ACPI_PROCFS 50config ACPI_PROCFS
51 bool "Deprecated /proc/acpi files" 51 bool "Deprecated /proc/acpi files"
52 depends on PROC_FS 52 depends on PROC_FS
53 default y
53 ---help--- 54 ---help---
54 For backwards compatibility, this option allows 55 For backwards compatibility, this option allows
55 deprecated /proc/acpi/ files to exist, even when 56 deprecated /proc/acpi/ files to exist, even when
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e03de37a750d..30238f6ff232 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -27,8 +27,10 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#ifdef CONFIG_ACPI_PROCFS
30#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#endif
32#include <linux/power_supply.h> 34#include <linux/power_supply.h>
33#include <acpi/acpi_bus.h> 35#include <acpi/acpi_bus.h>
34#include <acpi/acpi_drivers.h> 36#include <acpi/acpi_drivers.h>
@@ -49,12 +51,15 @@ MODULE_AUTHOR("Paul Diefenbaugh");
49MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 51MODULE_DESCRIPTION("ACPI AC Adapter Driver");
50MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
51 53
54#ifdef CONFIG_ACPI_PROCFS
52extern struct proc_dir_entry *acpi_lock_ac_dir(void); 55extern struct proc_dir_entry *acpi_lock_ac_dir(void);
53extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); 56extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
57static int acpi_ac_open_fs(struct inode *inode, struct file *file);
58#endif
54 59
55static int acpi_ac_add(struct acpi_device *device); 60static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device, int type); 61static int acpi_ac_remove(struct acpi_device *device, int type);
57static int acpi_ac_open_fs(struct inode *inode, struct file *file); 62static int acpi_ac_resume(struct acpi_device *device);
58 63
59const static struct acpi_device_id ac_device_ids[] = { 64const static struct acpi_device_id ac_device_ids[] = {
60 {"ACPI0003", 0}, 65 {"ACPI0003", 0},
@@ -69,6 +74,7 @@ static struct acpi_driver acpi_ac_driver = {
69 .ops = { 74 .ops = {
70 .add = acpi_ac_add, 75 .add = acpi_ac_add,
71 .remove = acpi_ac_remove, 76 .remove = acpi_ac_remove,
77 .resume = acpi_ac_resume,
72 }, 78 },
73}; 79};
74 80
@@ -80,12 +86,15 @@ struct acpi_ac {
80 86
81#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); 87#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
82 88
89#ifdef CONFIG_ACPI_PROCFS
83static const struct file_operations acpi_ac_fops = { 90static const struct file_operations acpi_ac_fops = {
84 .open = acpi_ac_open_fs, 91 .open = acpi_ac_open_fs,
85 .read = seq_read, 92 .read = seq_read,
86 .llseek = seq_lseek, 93 .llseek = seq_lseek,
87 .release = single_release, 94 .release = single_release,
88}; 95};
96#endif
97
89static int get_ac_property(struct power_supply *psy, 98static int get_ac_property(struct power_supply *psy,
90 enum power_supply_property psp, 99 enum power_supply_property psp,
91 union power_supply_propval *val) 100 union power_supply_propval *val)
@@ -127,6 +136,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
127 return 0; 136 return 0;
128} 137}
129 138
139#ifdef CONFIG_ACPI_PROCFS
130/* -------------------------------------------------------------------------- 140/* --------------------------------------------------------------------------
131 FS Interface (/proc) 141 FS Interface (/proc)
132 -------------------------------------------------------------------------- */ 142 -------------------------------------------------------------------------- */
@@ -206,6 +216,7 @@ static int acpi_ac_remove_fs(struct acpi_device *device)
206 216
207 return 0; 217 return 0;
208} 218}
219#endif
209 220
210/* -------------------------------------------------------------------------- 221/* --------------------------------------------------------------------------
211 Driver Model 222 Driver Model
@@ -264,7 +275,9 @@ static int acpi_ac_add(struct acpi_device *device)
264 if (result) 275 if (result)
265 goto end; 276 goto end;
266 277
278#ifdef CONFIG_ACPI_PROCFS
267 result = acpi_ac_add_fs(device); 279 result = acpi_ac_add_fs(device);
280#endif
268 if (result) 281 if (result)
269 goto end; 282 goto end;
270 ac->charger.name = acpi_device_bid(device); 283 ac->charger.name = acpi_device_bid(device);
@@ -287,13 +300,30 @@ static int acpi_ac_add(struct acpi_device *device)
287 300
288 end: 301 end:
289 if (result) { 302 if (result) {
303#ifdef CONFIG_ACPI_PROCFS
290 acpi_ac_remove_fs(device); 304 acpi_ac_remove_fs(device);
305#endif
291 kfree(ac); 306 kfree(ac);
292 } 307 }
293 308
294 return result; 309 return result;
295} 310}
296 311
312static int acpi_ac_resume(struct acpi_device *device)
313{
314 struct acpi_ac *ac;
315 unsigned old_state;
316 if (!device || !acpi_driver_data(device))
317 return -EINVAL;
318 ac = acpi_driver_data(device);
319 old_state = ac->state;
320 if (acpi_ac_get_state(ac))
321 return 0;
322 if (old_state != ac->state)
323 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
324 return 0;
325}
326
297static int acpi_ac_remove(struct acpi_device *device, int type) 327static int acpi_ac_remove(struct acpi_device *device, int type)
298{ 328{
299 acpi_status status = AE_OK; 329 acpi_status status = AE_OK;
@@ -309,7 +339,9 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
309 ACPI_ALL_NOTIFY, acpi_ac_notify); 339 ACPI_ALL_NOTIFY, acpi_ac_notify);
310 if (ac->charger.dev) 340 if (ac->charger.dev)
311 power_supply_unregister(&ac->charger); 341 power_supply_unregister(&ac->charger);
342#ifdef CONFIG_ACPI_PROCFS
312 acpi_ac_remove_fs(device); 343 acpi_ac_remove_fs(device);
344#endif
313 345
314 kfree(ac); 346 kfree(ac);
315 347
@@ -323,13 +355,17 @@ static int __init acpi_ac_init(void)
323 if (acpi_disabled) 355 if (acpi_disabled)
324 return -ENODEV; 356 return -ENODEV;
325 357
358#ifdef CONFIG_ACPI_PROCFS
326 acpi_ac_dir = acpi_lock_ac_dir(); 359 acpi_ac_dir = acpi_lock_ac_dir();
327 if (!acpi_ac_dir) 360 if (!acpi_ac_dir)
328 return -ENODEV; 361 return -ENODEV;
362#endif
329 363
330 result = acpi_bus_register_driver(&acpi_ac_driver); 364 result = acpi_bus_register_driver(&acpi_ac_driver);
331 if (result < 0) { 365 if (result < 0) {
366#ifdef CONFIG_ACPI_PROCFS
332 acpi_unlock_ac_dir(acpi_ac_dir); 367 acpi_unlock_ac_dir(acpi_ac_dir);
368#endif
333 return -ENODEV; 369 return -ENODEV;
334 } 370 }
335 371
@@ -341,7 +377,9 @@ static void __exit acpi_ac_exit(void)
341 377
342 acpi_bus_unregister_driver(&acpi_ac_driver); 378 acpi_bus_unregister_driver(&acpi_ac_driver);
343 379
380#ifdef CONFIG_ACPI_PROCFS
344 acpi_unlock_ac_dir(acpi_ac_dir); 381 acpi_unlock_ac_dir(acpi_ac_dir);
382#endif
345 383
346 return; 384 return;
347} 385}
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index a736ef7bdee4..9e8c20c6a0b7 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -591,9 +591,12 @@ static int __init toshiba_acpi_init(void)
591 NULL, 591 NULL,
592 &toshiba_backlight_data); 592 &toshiba_backlight_data);
593 if (IS_ERR(toshiba_backlight_device)) { 593 if (IS_ERR(toshiba_backlight_device)) {
594 int ret = PTR_ERR(toshiba_backlight_device);
595
594 printk(KERN_ERR "Could not register toshiba backlight device\n"); 596 printk(KERN_ERR "Could not register toshiba backlight device\n");
595 toshiba_backlight_device = NULL; 597 toshiba_backlight_device = NULL;
596 toshiba_acpi_exit(); 598 toshiba_acpi_exit();
599 return ret;
597 } 600 }
598 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 601 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
599 602
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ceffa6034e20..e7fe6ca97dd8 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -488,13 +488,11 @@ static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fu
488 return r; 488 return r;
489} 489}
490 490
491#define DBMSG(msg) ((verbose>1)?(msg):NULL)
492
493static void pf_lock(struct pf_unit *pf, int func) 491static void pf_lock(struct pf_unit *pf, int func)
494{ 492{
495 char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; 493 char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
496 494
497 pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock"); 495 pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
498} 496}
499 497
500static void pf_eject(struct pf_unit *pf) 498static void pf_eject(struct pf_unit *pf)
@@ -555,7 +553,7 @@ static void pf_mode_sense(struct pf_unit *pf)
555 { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; 553 { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
556 char buf[8]; 554 char buf[8];
557 555
558 pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense")); 556 pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
559 pf->media_status = PF_RW; 557 pf->media_status = PF_RW;
560 if (buf[3] & 0x80) 558 if (buf[3] & 0x80)
561 pf->media_status = PF_RO; 559 pf->media_status = PF_RO;
@@ -591,7 +589,7 @@ static void pf_get_capacity(struct pf_unit *pf)
591 char buf[8]; 589 char buf[8];
592 int bs; 590 int bs;
593 591
594 if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) { 592 if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
595 pf->media_status = PF_NM; 593 pf->media_status = PF_NM;
596 return; 594 return;
597 } 595 }
@@ -804,13 +802,18 @@ static int pf_next_buf(void)
804 pf_buf += 512; 802 pf_buf += 512;
805 pf_block++; 803 pf_block++;
806 if (!pf_run) 804 if (!pf_run)
807 return 0;
808 if (!pf_count)
809 return 1; 805 return 1;
810 spin_lock_irqsave(&pf_spin_lock, saved_flags); 806 if (!pf_count) {
811 pf_end_request(1); 807 spin_lock_irqsave(&pf_spin_lock, saved_flags);
812 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 808 pf_end_request(1);
813 return 1; 809 pf_req = elv_next_request(pf_queue);
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req)
812 return 1;
813 pf_count = pf_req->current_nr_sectors;
814 pf_buf = pf_req->buffer;
815 }
816 return 0;
814} 817}
815 818
816static inline void next_request(int success) 819static inline void next_request(int success)
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 47f8ac6cce57..82f4eecc8699 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page)
189 return 0; 189 return 0;
190} 190}
191 191
192/*
193 * releasepage is called by pagevec_strip/try_to_release_page if
194 * buffers_heads_over_limit is true. Without a releasepage function
195 * try_to_free_buffers is called instead. That can unset the dirty
196 * bit of our ram disk pages, which will be eventually freed, even
197 * if the page is still in use.
198 */
199static int ramdisk_releasepage(struct page *page, gfp_t dummy)
200{
201 return 0;
202}
203
192static const struct address_space_operations ramdisk_aops = { 204static const struct address_space_operations ramdisk_aops = {
193 .readpage = ramdisk_readpage, 205 .readpage = ramdisk_readpage,
194 .prepare_write = ramdisk_prepare_write, 206 .prepare_write = ramdisk_prepare_write,
@@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = {
196 .writepage = ramdisk_writepage, 208 .writepage = ramdisk_writepage,
197 .set_page_dirty = ramdisk_set_page_dirty, 209 .set_page_dirty = ramdisk_set_page_dirty,
198 .writepages = ramdisk_writepages, 210 .writepages = ramdisk_writepages,
211 .releasepage = ramdisk_releasepage,
199}; 212};
200 213
201static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector, 214static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index cc5d77797def..02518da6a386 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -47,7 +47,7 @@
47/* #define ATR_CSUM */ 47/* #define ATR_CSUM */
48 48
49#ifdef PCMCIA_DEBUG 49#ifdef PCMCIA_DEBUG
50#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle)) 50#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
51static int pc_debug = PCMCIA_DEBUG; 51static int pc_debug = PCMCIA_DEBUG;
52module_param(pc_debug, int, 0600); 52module_param(pc_debug, int, 0600);
53#define DEBUGP(n, rdr, x, args...) do { \ 53#define DEBUGP(n, rdr, x, args...) do { \
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a0b9c8728d56..5f291bf739a6 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -41,7 +41,7 @@
41 41
42 42
43#ifdef PCMCIA_DEBUG 43#ifdef PCMCIA_DEBUG
44#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle)) 44#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
45static int pc_debug = PCMCIA_DEBUG; 45static int pc_debug = PCMCIA_DEBUG;
46module_param(pc_debug, int, 0600); 46module_param(pc_debug, int, 0600);
47#define DEBUGP(n, rdr, x, args...) do { \ 47#define DEBUGP(n, rdr, x, args...) do { \
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index ec6b65ec69ea..0c66b802736a 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -918,6 +918,31 @@ static const struct file_operations rtc_proc_fops = {
918}; 918};
919#endif 919#endif
920 920
921static resource_size_t rtc_size;
922
923static struct resource * __init rtc_request_region(resource_size_t size)
924{
925 struct resource *r;
926
927 if (RTC_IOMAPPED)
928 r = request_region(RTC_PORT(0), size, "rtc");
929 else
930 r = request_mem_region(RTC_PORT(0), size, "rtc");
931
932 if (r)
933 rtc_size = size;
934
935 return r;
936}
937
938static void rtc_release_region(void)
939{
940 if (RTC_IOMAPPED)
941 release_region(RTC_PORT(0), rtc_size);
942 else
943 release_mem_region(RTC_PORT(0), rtc_size);
944}
945
921static int __init rtc_init(void) 946static int __init rtc_init(void)
922{ 947{
923#ifdef CONFIG_PROC_FS 948#ifdef CONFIG_PROC_FS
@@ -968,10 +993,17 @@ found:
968 } 993 }
969no_irq: 994no_irq:
970#else 995#else
971 if (RTC_IOMAPPED) 996 r = rtc_request_region(RTC_IO_EXTENT);
972 r = request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); 997
973 else 998 /*
974 r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); 999 * If we've already requested a smaller range (for example, because
1000 * PNPBIOS or ACPI told us how the device is configured), the request
1001 * above might fail because it's too big.
1002 *
1003 * If so, request just the range we actually use.
1004 */
1005 if (!r)
1006 r = rtc_request_region(RTC_IO_EXTENT_USED);
975 if (!r) { 1007 if (!r) {
976#ifdef RTC_IRQ 1008#ifdef RTC_IRQ
977 rtc_has_irq = 0; 1009 rtc_has_irq = 0;
@@ -992,10 +1024,7 @@ no_irq:
992 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ 1024 /* Yeah right, seeing as irq 8 doesn't even hit the bus. */
993 rtc_has_irq = 0; 1025 rtc_has_irq = 0;
994 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); 1026 printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
995 if (RTC_IOMAPPED) 1027 rtc_release_region();
996 release_region(RTC_PORT(0), RTC_IO_EXTENT);
997 else
998 release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
999 return -EIO; 1028 return -EIO;
1000 } 1029 }
1001 hpet_rtc_timer_init(); 1030 hpet_rtc_timer_init();
@@ -1009,7 +1038,7 @@ no_irq:
1009 free_irq(RTC_IRQ, NULL); 1038 free_irq(RTC_IRQ, NULL);
1010 rtc_has_irq = 0; 1039 rtc_has_irq = 0;
1011#endif 1040#endif
1012 release_region(RTC_PORT(0), RTC_IO_EXTENT); 1041 rtc_release_region();
1013 return -ENODEV; 1042 return -ENODEV;
1014 } 1043 }
1015 1044
@@ -1091,10 +1120,7 @@ static void __exit rtc_exit (void)
1091 if (rtc_has_irq) 1120 if (rtc_has_irq)
1092 free_irq (rtc_irq, &rtc_port); 1121 free_irq (rtc_irq, &rtc_port);
1093#else 1122#else
1094 if (RTC_IOMAPPED) 1123 rtc_release_region();
1095 release_region(RTC_PORT(0), RTC_IO_EXTENT);
1096 else
1097 release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
1098#ifdef RTC_IRQ 1124#ifdef RTC_IRQ
1099 if (rtc_has_irq) 1125 if (rtc_has_irq)
1100 free_irq (RTC_IRQ, NULL); 1126 free_irq (RTC_IRQ, NULL);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 82489923af09..d59b2f417306 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -182,10 +182,9 @@ static void dma_client_chan_alloc(struct dma_client *client)
182 /* we are done once this client rejects 182 /* we are done once this client rejects
183 * an available resource 183 * an available resource
184 */ 184 */
185 if (ack == DMA_ACK) { 185 if (ack == DMA_ACK)
186 dma_chan_get(chan); 186 dma_chan_get(chan);
187 kref_get(&device->refcount); 187 else if (ack == DMA_NAK)
188 } else if (ack == DMA_NAK)
189 return; 188 return;
190 } 189 }
191 } 190 }
@@ -272,11 +271,8 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
272 /* client was holding resources for this channel so 271 /* client was holding resources for this channel so
273 * free it 272 * free it
274 */ 273 */
275 if (ack == DMA_ACK) { 274 if (ack == DMA_ACK)
276 dma_chan_put(chan); 275 dma_chan_put(chan);
277 kref_put(&chan->device->refcount,
278 dma_async_device_cleanup);
279 }
280 } 276 }
281 277
282 mutex_unlock(&dma_list_mutex); 278 mutex_unlock(&dma_list_mutex);
@@ -316,11 +312,8 @@ void dma_async_client_unregister(struct dma_client *client)
316 ack = client->event_callback(client, chan, 312 ack = client->event_callback(client, chan,
317 DMA_RESOURCE_REMOVED); 313 DMA_RESOURCE_REMOVED);
318 314
319 if (ack == DMA_ACK) { 315 if (ack == DMA_ACK)
320 dma_chan_put(chan); 316 dma_chan_put(chan);
321 kref_put(&chan->device->refcount,
322 dma_async_device_cleanup);
323 }
324 } 317 }
325 318
326 list_del(&client->global_node); 319 list_del(&client->global_node);
@@ -397,6 +390,8 @@ int dma_async_device_register(struct dma_device *device)
397 goto err_out; 390 goto err_out;
398 } 391 }
399 392
393 /* One for the channel, one of the class device */
394 kref_get(&device->refcount);
400 kref_get(&device->refcount); 395 kref_get(&device->refcount);
401 kref_init(&chan->refcount); 396 kref_init(&chan->refcount);
402 chan->slow_ref = 0; 397 chan->slow_ref = 0;
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index f204c39fb412..16e0fd8facfb 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -39,10 +39,14 @@ MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Intel Corporation"); 39MODULE_AUTHOR("Intel Corporation");
40 40
41static struct pci_device_id ioat_pci_tbl[] = { 41static struct pci_device_id ioat_pci_tbl[] = {
42 /* I/OAT v1 platforms */
42 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, 43 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
43 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, 44 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
44 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, 45 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
45 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, 46 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
47
48 /* I/OAT v2 platforms */
49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
46 { 0, } 50 { 0, }
47}; 51};
48 52
@@ -74,10 +78,17 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
74 if (device->dma && ioat_dca_enabled) 78 if (device->dma && ioat_dca_enabled)
75 device->dca = ioat_dca_init(pdev, iobase); 79 device->dca = ioat_dca_init(pdev, iobase);
76 break; 80 break;
81 case IOAT_VER_2_0:
82 device->dma = ioat_dma_probe(pdev, iobase);
83 if (device->dma && ioat_dca_enabled)
84 device->dca = ioat2_dca_init(pdev, iobase);
85 break;
77 default: 86 default:
78 err = -ENODEV; 87 err = -ENODEV;
79 break; 88 break;
80 } 89 }
90 if (!device->dma)
91 err = -ENODEV;
81 return err; 92 return err;
82} 93}
83 94
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index ba985715b803..0fa8a98051a8 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -261,3 +261,167 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
261 return dca; 261 return dca;
262} 262}
263 263
264
265static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
266{
267 struct ioat_dca_priv *ioatdca = dca_priv(dca);
268 struct pci_dev *pdev;
269 int i;
270 u16 id;
271 u16 global_req_table;
272
273 /* This implementation only supports PCI-Express */
274 if (dev->bus != &pci_bus_type)
275 return -ENODEV;
276 pdev = to_pci_dev(dev);
277 id = dcaid_from_pcidev(pdev);
278
279 if (ioatdca->requester_count == ioatdca->max_requesters)
280 return -ENODEV;
281
282 for (i = 0; i < ioatdca->max_requesters; i++) {
283 if (ioatdca->req_slots[i].pdev == NULL) {
284 /* found an empty slot */
285 ioatdca->requester_count++;
286 ioatdca->req_slots[i].pdev = pdev;
287 ioatdca->req_slots[i].rid = id;
288 global_req_table =
289 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
290 writel(id | IOAT_DCA_GREQID_VALID,
291 ioatdca->iobase + global_req_table + (i * 4));
292 return i;
293 }
294 }
295 /* Error, ioatdma->requester_count is out of whack */
296 return -EFAULT;
297}
298
299static int ioat2_dca_remove_requester(struct dca_provider *dca,
300 struct device *dev)
301{
302 struct ioat_dca_priv *ioatdca = dca_priv(dca);
303 struct pci_dev *pdev;
304 int i;
305 u16 global_req_table;
306
307 /* This implementation only supports PCI-Express */
308 if (dev->bus != &pci_bus_type)
309 return -ENODEV;
310 pdev = to_pci_dev(dev);
311
312 for (i = 0; i < ioatdca->max_requesters; i++) {
313 if (ioatdca->req_slots[i].pdev == pdev) {
314 global_req_table =
315 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
316 writel(0, ioatdca->iobase + global_req_table + (i * 4));
317 ioatdca->req_slots[i].pdev = NULL;
318 ioatdca->req_slots[i].rid = 0;
319 ioatdca->requester_count--;
320 return i;
321 }
322 }
323 return -ENODEV;
324}
325
326static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
327{
328 u8 tag;
329
330 tag = ioat_dca_get_tag(dca, cpu);
331 tag = (~tag) & 0x1F;
332 return tag;
333}
334
335static struct dca_ops ioat2_dca_ops = {
336 .add_requester = ioat2_dca_add_requester,
337 .remove_requester = ioat2_dca_remove_requester,
338 .get_tag = ioat2_dca_get_tag,
339};
340
341static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset)
342{
343 int slots = 0;
344 u32 req;
345 u16 global_req_table;
346
347 global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
348 if (global_req_table == 0)
349 return 0;
350 do {
351 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
352 slots++;
353 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
354
355 return slots;
356}
357
358struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
359{
360 struct dca_provider *dca;
361 struct ioat_dca_priv *ioatdca;
362 int slots;
363 int i;
364 int err;
365 u32 tag_map;
366 u16 dca_offset;
367 u16 csi_fsb_control;
368 u16 pcie_control;
369 u8 bit;
370
371 if (!system_has_dca_enabled(pdev))
372 return NULL;
373
374 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
375 if (dca_offset == 0)
376 return NULL;
377
378 slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
379 if (slots == 0)
380 return NULL;
381
382 dca = alloc_dca_provider(&ioat2_dca_ops,
383 sizeof(*ioatdca)
384 + (sizeof(struct ioat_dca_slot) * slots));
385 if (!dca)
386 return NULL;
387
388 ioatdca = dca_priv(dca);
389 ioatdca->iobase = iobase;
390 ioatdca->dca_base = iobase + dca_offset;
391 ioatdca->max_requesters = slots;
392
393 /* some bios might not know to turn these on */
394 csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
395 if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
396 csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
397 writew(csi_fsb_control,
398 ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
399 }
400 pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
401 if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
402 pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
403 writew(pcie_control,
404 ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
405 }
406
407
408 /* TODO version, compatibility and configuration checks */
409
410 /* copy out the APIC to DCA tag map */
411 tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
412 for (i = 0; i < 5; i++) {
413 bit = (tag_map >> (4 * i)) & 0x0f;
414 if (bit < 8)
415 ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
416 else
417 ioatdca->tag_map[i] = 0;
418 }
419
420 err = register_dca_provider(dca, &pdev->dev);
421 if (err) {
422 free_dca_provider(dca);
423 return NULL;
424 }
425
426 return dca;
427}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 7e4a785c2dff..c1c2dcc6fc2e 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -36,18 +36,24 @@
36#include "ioatdma_registers.h" 36#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 37#include "ioatdma_hw.h"
38 38
39#define INITIAL_IOAT_DESC_COUNT 128
40
41#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) 39#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) 40#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 43
44static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)");
48
46/* internal functions */ 49/* internal functions */
47static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53static struct ioat_desc_sw *
54ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
49static struct ioat_desc_sw * 55static struct ioat_desc_sw *
50ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); 56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
51 57
52static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( 58static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
53 struct ioatdma_device *device, 59 struct ioatdma_device *device,
@@ -130,6 +136,12 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
130 ioat_chan->device = device; 136 ioat_chan->device = device;
131 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
132 ioat_chan->xfercap = xfercap; 138 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0;
140 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU,
143 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144 }
133 spin_lock_init(&ioat_chan->cleanup_lock); 145 spin_lock_init(&ioat_chan->cleanup_lock);
134 spin_lock_init(&ioat_chan->desc_lock); 146 spin_lock_init(&ioat_chan->desc_lock);
135 INIT_LIST_HEAD(&ioat_chan->free_desc); 147 INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -161,13 +173,17 @@ static void ioat_set_dest(dma_addr_t addr,
161 tx_to_ioat_desc(tx)->dst = addr; 173 tx_to_ioat_desc(tx)->dst = addr;
162} 174}
163 175
164static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) 176static inline void __ioat1_dma_memcpy_issue_pending(
177 struct ioat_dma_chan *ioat_chan);
178static inline void __ioat2_dma_memcpy_issue_pending(
179 struct ioat_dma_chan *ioat_chan);
180
181static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
165{ 182{
166 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 183 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
167 struct ioat_desc_sw *first = tx_to_ioat_desc(tx); 184 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
168 struct ioat_desc_sw *prev, *new; 185 struct ioat_desc_sw *prev, *new;
169 struct ioat_dma_descriptor *hw; 186 struct ioat_dma_descriptor *hw;
170 int append = 0;
171 dma_cookie_t cookie; 187 dma_cookie_t cookie;
172 LIST_HEAD(new_chain); 188 LIST_HEAD(new_chain);
173 u32 copy; 189 u32 copy;
@@ -209,7 +225,7 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
209 list_add_tail(&new->node, &new_chain); 225 list_add_tail(&new->node, &new_chain);
210 desc_count++; 226 desc_count++;
211 prev = new; 227 prev = new;
212 } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan))); 228 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
213 229
214 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 230 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
215 if (new->async_tx.callback) { 231 if (new->async_tx.callback) {
@@ -246,20 +262,98 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
246 first->async_tx.phys; 262 first->async_tx.phys;
247 __list_splice(&new_chain, ioat_chan->used_desc.prev); 263 __list_splice(&new_chain, ioat_chan->used_desc.prev);
248 264
265 ioat_chan->dmacount += desc_count;
249 ioat_chan->pending += desc_count; 266 ioat_chan->pending += desc_count;
250 if (ioat_chan->pending >= 4) { 267 if (ioat_chan->pending >= ioat_pending_level)
251 append = 1; 268 __ioat1_dma_memcpy_issue_pending(ioat_chan);
252 ioat_chan->pending = 0;
253 }
254 spin_unlock_bh(&ioat_chan->desc_lock); 269 spin_unlock_bh(&ioat_chan->desc_lock);
255 270
256 if (append) 271 return cookie;
257 writeb(IOAT_CHANCMD_APPEND, 272}
258 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 273
274static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
275{
276 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
277 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
278 struct ioat_desc_sw *new;
279 struct ioat_dma_descriptor *hw;
280 dma_cookie_t cookie;
281 u32 copy;
282 size_t len;
283 dma_addr_t src, dst;
284 int orig_ack;
285 unsigned int desc_count = 0;
286
287 /* src and dest and len are stored in the initial descriptor */
288 len = first->len;
289 src = first->src;
290 dst = first->dst;
291 orig_ack = first->async_tx.ack;
292 new = first;
293
294 /* ioat_chan->desc_lock is still in force in version 2 path */
295
296 do {
297 copy = min((u32) len, ioat_chan->xfercap);
298
299 new->async_tx.ack = 1;
300
301 hw = new->hw;
302 hw->size = copy;
303 hw->ctl = 0;
304 hw->src_addr = src;
305 hw->dst_addr = dst;
306
307 len -= copy;
308 dst += copy;
309 src += copy;
310 desc_count++;
311 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
312
313 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
314 if (new->async_tx.callback) {
315 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
316 if (first != new) {
317 /* move callback into to last desc */
318 new->async_tx.callback = first->async_tx.callback;
319 new->async_tx.callback_param
320 = first->async_tx.callback_param;
321 first->async_tx.callback = NULL;
322 first->async_tx.callback_param = NULL;
323 }
324 }
325
326 new->tx_cnt = desc_count;
327 new->async_tx.ack = orig_ack; /* client is in control of this ack */
328
329 /* store the original values for use in later cleanup */
330 if (new != first) {
331 new->src = first->src;
332 new->dst = first->dst;
333 new->len = first->len;
334 }
335
336 /* cookie incr and addition to used_list must be atomic */
337 cookie = ioat_chan->common.cookie;
338 cookie++;
339 if (cookie < 0)
340 cookie = 1;
341 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
342
343 ioat_chan->dmacount += desc_count;
344 ioat_chan->pending += desc_count;
345 if (ioat_chan->pending >= ioat_pending_level)
346 __ioat2_dma_memcpy_issue_pending(ioat_chan);
347 spin_unlock_bh(&ioat_chan->desc_lock);
259 348
260 return cookie; 349 return cookie;
261} 350}
262 351
352/**
353 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
354 * @ioat_chan: the channel supplying the memory pool for the descriptors
355 * @flags: allocation flags
356 */
263static struct ioat_desc_sw *ioat_dma_alloc_descriptor( 357static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
264 struct ioat_dma_chan *ioat_chan, 358 struct ioat_dma_chan *ioat_chan,
265 gfp_t flags) 359 gfp_t flags)
@@ -284,15 +378,57 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
284 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); 378 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
285 desc_sw->async_tx.tx_set_src = ioat_set_src; 379 desc_sw->async_tx.tx_set_src = ioat_set_src;
286 desc_sw->async_tx.tx_set_dest = ioat_set_dest; 380 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
287 desc_sw->async_tx.tx_submit = ioat_tx_submit; 381 switch (ioat_chan->device->version) {
382 case IOAT_VER_1_2:
383 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
384 break;
385 case IOAT_VER_2_0:
386 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
387 break;
388 }
288 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); 389 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
390
289 desc_sw->hw = desc; 391 desc_sw->hw = desc;
290 desc_sw->async_tx.phys = phys; 392 desc_sw->async_tx.phys = phys;
291 393
292 return desc_sw; 394 return desc_sw;
293} 395}
294 396
295/* returns the actual number of allocated descriptors */ 397static int ioat_initial_desc_count = 256;
398module_param(ioat_initial_desc_count, int, 0644);
399MODULE_PARM_DESC(ioat_initial_desc_count,
400 "initial descriptors per channel (default: 256)");
401
402/**
403 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
404 * @ioat_chan: the channel to be massaged
405 */
406static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
407{
408 struct ioat_desc_sw *desc, *_desc;
409
410 /* setup used_desc */
411 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
412 ioat_chan->used_desc.prev = NULL;
413
414 /* pull free_desc out of the circle so that every node is a hw
415 * descriptor, but leave it pointing to the list
416 */
417 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
418 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
419
420 /* circle link the hw descriptors */
421 desc = to_ioat_desc(ioat_chan->free_desc.next);
422 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
423 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
424 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
425 }
426}
427
428/**
429 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
430 * @chan: the channel to be filled out
431 */
296static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 432static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
297{ 433{
298 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 434 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -304,7 +440,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
304 440
305 /* have we already been set up? */ 441 /* have we already been set up? */
306 if (!list_empty(&ioat_chan->free_desc)) 442 if (!list_empty(&ioat_chan->free_desc))
307 return INITIAL_IOAT_DESC_COUNT; 443 return ioat_chan->desccount;
308 444
309 /* Setup register to interrupt and write completion status on error */ 445 /* Setup register to interrupt and write completion status on error */
310 chanctrl = IOAT_CHANCTRL_ERR_INT_EN | 446 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
@@ -320,7 +456,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
320 } 456 }
321 457
322 /* Allocate descriptors */ 458 /* Allocate descriptors */
323 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { 459 for (i = 0; i < ioat_initial_desc_count; i++) {
324 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); 460 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
325 if (!desc) { 461 if (!desc) {
326 dev_err(&ioat_chan->device->pdev->dev, 462 dev_err(&ioat_chan->device->pdev->dev,
@@ -330,7 +466,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
330 list_add_tail(&desc->node, &tmp_list); 466 list_add_tail(&desc->node, &tmp_list);
331 } 467 }
332 spin_lock_bh(&ioat_chan->desc_lock); 468 spin_lock_bh(&ioat_chan->desc_lock);
469 ioat_chan->desccount = i;
333 list_splice(&tmp_list, &ioat_chan->free_desc); 470 list_splice(&tmp_list, &ioat_chan->free_desc);
471 if (ioat_chan->device->version != IOAT_VER_1_2)
472 ioat2_dma_massage_chan_desc(ioat_chan);
334 spin_unlock_bh(&ioat_chan->desc_lock); 473 spin_unlock_bh(&ioat_chan->desc_lock);
335 474
336 /* allocate a completion writeback area */ 475 /* allocate a completion writeback area */
@@ -347,10 +486,14 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
347 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 486 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
348 487
349 tasklet_enable(&ioat_chan->cleanup_task); 488 tasklet_enable(&ioat_chan->cleanup_task);
350 ioat_dma_start_null_desc(ioat_chan); 489 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
351 return i; 490 return ioat_chan->desccount;
352} 491}
353 492
493/**
494 * ioat_dma_free_chan_resources - release all the descriptors
495 * @chan: the channel to be cleaned
496 */
354static void ioat_dma_free_chan_resources(struct dma_chan *chan) 497static void ioat_dma_free_chan_resources(struct dma_chan *chan)
355{ 498{
356 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 499 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -364,22 +507,45 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
364 /* Delay 100ms after reset to allow internal DMA logic to quiesce 507 /* Delay 100ms after reset to allow internal DMA logic to quiesce
365 * before removing DMA descriptor resources. 508 * before removing DMA descriptor resources.
366 */ 509 */
367 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 510 writeb(IOAT_CHANCMD_RESET,
511 ioat_chan->reg_base
512 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
368 mdelay(100); 513 mdelay(100);
369 514
370 spin_lock_bh(&ioat_chan->desc_lock); 515 spin_lock_bh(&ioat_chan->desc_lock);
371 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { 516 switch (ioat_chan->device->version) {
372 in_use_descs++; 517 case IOAT_VER_1_2:
373 list_del(&desc->node); 518 list_for_each_entry_safe(desc, _desc,
374 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 519 &ioat_chan->used_desc, node) {
375 desc->async_tx.phys); 520 in_use_descs++;
376 kfree(desc); 521 list_del(&desc->node);
377 } 522 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
378 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { 523 desc->async_tx.phys);
379 list_del(&desc->node); 524 kfree(desc);
525 }
526 list_for_each_entry_safe(desc, _desc,
527 &ioat_chan->free_desc, node) {
528 list_del(&desc->node);
529 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
530 desc->async_tx.phys);
531 kfree(desc);
532 }
533 break;
534 case IOAT_VER_2_0:
535 list_for_each_entry_safe(desc, _desc,
536 ioat_chan->free_desc.next, node) {
537 list_del(&desc->node);
538 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
539 desc->async_tx.phys);
540 kfree(desc);
541 }
542 desc = to_ioat_desc(ioat_chan->free_desc.next);
380 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 543 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
381 desc->async_tx.phys); 544 desc->async_tx.phys);
382 kfree(desc); 545 kfree(desc);
546 INIT_LIST_HEAD(&ioat_chan->free_desc);
547 INIT_LIST_HEAD(&ioat_chan->used_desc);
548 break;
383 } 549 }
384 spin_unlock_bh(&ioat_chan->desc_lock); 550 spin_unlock_bh(&ioat_chan->desc_lock);
385 551
@@ -395,6 +561,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
395 561
396 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 562 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
397 ioat_chan->pending = 0; 563 ioat_chan->pending = 0;
564 ioat_chan->dmacount = 0;
398} 565}
399 566
400/** 567/**
@@ -406,7 +573,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
406 * has run out. 573 * has run out.
407 */ 574 */
408static struct ioat_desc_sw * 575static struct ioat_desc_sw *
409ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 576ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
410{ 577{
411 struct ioat_desc_sw *new = NULL; 578 struct ioat_desc_sw *new = NULL;
412 579
@@ -425,7 +592,82 @@ ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
425 return new; 592 return new;
426} 593}
427 594
428static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( 595static struct ioat_desc_sw *
596ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
597{
598 struct ioat_desc_sw *new = NULL;
599
600 /*
601 * used.prev points to where to start processing
602 * used.next points to next free descriptor
603 * if used.prev == NULL, there are none waiting to be processed
604 * if used.next == used.prev.prev, there is only one free descriptor,
605 * and we need to use it to as a noop descriptor before
606 * linking in a new set of descriptors, since the device
607 * has probably already read the pointer to it
608 */
609 if (ioat_chan->used_desc.prev &&
610 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
611
612 struct ioat_desc_sw *desc = NULL;
613 struct ioat_desc_sw *noop_desc = NULL;
614 int i;
615
616 /* set up the noop descriptor */
617 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
618 noop_desc->hw->size = 0;
619 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
620 noop_desc->hw->src_addr = 0;
621 noop_desc->hw->dst_addr = 0;
622
623 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
624 ioat_chan->pending++;
625 ioat_chan->dmacount++;
626
627 /* get a few more descriptors */
628 for (i = 16; i; i--) {
629 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
630 BUG_ON(!desc);
631 list_add_tail(&desc->node, ioat_chan->used_desc.next);
632
633 desc->hw->next
634 = to_ioat_desc(desc->node.next)->async_tx.phys;
635 to_ioat_desc(desc->node.prev)->hw->next
636 = desc->async_tx.phys;
637 ioat_chan->desccount++;
638 }
639
640 ioat_chan->used_desc.next = noop_desc->node.next;
641 }
642 new = to_ioat_desc(ioat_chan->used_desc.next);
643 prefetch(new);
644 ioat_chan->used_desc.next = new->node.next;
645
646 if (ioat_chan->used_desc.prev == NULL)
647 ioat_chan->used_desc.prev = &new->node;
648
649 prefetch(new->hw);
650 return new;
651}
652
653static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
654 struct ioat_dma_chan *ioat_chan)
655{
656 if (!ioat_chan)
657 return NULL;
658
659 switch (ioat_chan->device->version) {
660 case IOAT_VER_1_2:
661 return ioat1_dma_get_next_descriptor(ioat_chan);
662 break;
663 case IOAT_VER_2_0:
664 return ioat2_dma_get_next_descriptor(ioat_chan);
665 break;
666 }
667 return NULL;
668}
669
670static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
429 struct dma_chan *chan, 671 struct dma_chan *chan,
430 size_t len, 672 size_t len,
431 int int_en) 673 int int_en)
@@ -441,19 +683,62 @@ static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
441 return new ? &new->async_tx : NULL; 683 return new ? &new->async_tx : NULL;
442} 684}
443 685
686static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
687 struct dma_chan *chan,
688 size_t len,
689 int int_en)
690{
691 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
692 struct ioat_desc_sw *new;
693
694 spin_lock_bh(&ioat_chan->desc_lock);
695 new = ioat2_dma_get_next_descriptor(ioat_chan);
696 new->len = len;
697
698 /* leave ioat_chan->desc_lock set in version 2 path */
699 return new ? &new->async_tx : NULL;
700}
701
702
444/** 703/**
445 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 704 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
446 * descriptors to hw 705 * descriptors to hw
447 * @chan: DMA channel handle 706 * @chan: DMA channel handle
448 */ 707 */
449static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) 708static inline void __ioat1_dma_memcpy_issue_pending(
709 struct ioat_dma_chan *ioat_chan)
710{
711 ioat_chan->pending = 0;
712 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
713}
714
715static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
450{ 716{
451 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 717 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
452 718
453 if (ioat_chan->pending != 0) { 719 if (ioat_chan->pending != 0) {
454 ioat_chan->pending = 0; 720 spin_lock_bh(&ioat_chan->desc_lock);
455 writeb(IOAT_CHANCMD_APPEND, 721 __ioat1_dma_memcpy_issue_pending(ioat_chan);
456 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); 722 spin_unlock_bh(&ioat_chan->desc_lock);
723 }
724}
725
726static inline void __ioat2_dma_memcpy_issue_pending(
727 struct ioat_dma_chan *ioat_chan)
728{
729 ioat_chan->pending = 0;
730 writew(ioat_chan->dmacount,
731 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
732}
733
734static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
735{
736 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
737
738 if (ioat_chan->pending != 0) {
739 spin_lock_bh(&ioat_chan->desc_lock);
740 __ioat2_dma_memcpy_issue_pending(ioat_chan);
741 spin_unlock_bh(&ioat_chan->desc_lock);
457 } 742 }
458} 743}
459 744
@@ -465,11 +750,17 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
465 chan->reg_base + IOAT_CHANCTRL_OFFSET); 750 chan->reg_base + IOAT_CHANCTRL_OFFSET);
466} 751}
467 752
753/**
754 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
755 * @chan: ioat channel to be cleaned up
756 */
468static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) 757static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
469{ 758{
470 unsigned long phys_complete; 759 unsigned long phys_complete;
471 struct ioat_desc_sw *desc, *_desc; 760 struct ioat_desc_sw *desc, *_desc;
472 dma_cookie_t cookie = 0; 761 dma_cookie_t cookie = 0;
762 unsigned long desc_phys;
763 struct ioat_desc_sw *latest_desc;
473 764
474 prefetch(ioat_chan->completion_virt); 765 prefetch(ioat_chan->completion_virt);
475 766
@@ -507,56 +798,115 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
507 798
508 cookie = 0; 799 cookie = 0;
509 spin_lock_bh(&ioat_chan->desc_lock); 800 spin_lock_bh(&ioat_chan->desc_lock);
510 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { 801 switch (ioat_chan->device->version) {
511 802 case IOAT_VER_1_2:
512 /* 803 list_for_each_entry_safe(desc, _desc,
513 * Incoming DMA requests may use multiple descriptors, due to 804 &ioat_chan->used_desc, node) {
514 * exceeding xfercap, perhaps. If so, only the last one will
515 * have a cookie, and require unmapping.
516 */
517 if (desc->async_tx.cookie) {
518 cookie = desc->async_tx.cookie;
519 805
520 /* 806 /*
521 * yes we are unmapping both _page and _single alloc'd 807 * Incoming DMA requests may use multiple descriptors,
522 * regions with unmap_page. Is this *really* that bad? 808 * due to exceeding xfercap, perhaps. If so, only the
809 * last one will have a cookie, and require unmapping.
523 */ 810 */
524 pci_unmap_page(ioat_chan->device->pdev, 811 if (desc->async_tx.cookie) {
525 pci_unmap_addr(desc, dst), 812 cookie = desc->async_tx.cookie;
526 pci_unmap_len(desc, len), 813
527 PCI_DMA_FROMDEVICE); 814 /*
528 pci_unmap_page(ioat_chan->device->pdev, 815 * yes we are unmapping both _page and _single
529 pci_unmap_addr(desc, src), 816 * alloc'd regions with unmap_page. Is this
530 pci_unmap_len(desc, len), 817 * *really* that bad?
531 PCI_DMA_TODEVICE); 818 */
532 if (desc->async_tx.callback) { 819 pci_unmap_page(ioat_chan->device->pdev,
533 desc->async_tx.callback( 820 pci_unmap_addr(desc, dst),
534 desc->async_tx.callback_param); 821 pci_unmap_len(desc, len),
535 desc->async_tx.callback = NULL; 822 PCI_DMA_FROMDEVICE);
823 pci_unmap_page(ioat_chan->device->pdev,
824 pci_unmap_addr(desc, src),
825 pci_unmap_len(desc, len),
826 PCI_DMA_TODEVICE);
827
828 if (desc->async_tx.callback) {
829 desc->async_tx.callback(desc->async_tx.callback_param);
830 desc->async_tx.callback = NULL;
831 }
536 } 832 }
537 }
538 833
539 if (desc->async_tx.phys != phys_complete) { 834 if (desc->async_tx.phys != phys_complete) {
540 /* 835 /*
541 * a completed entry, but not the last, so cleanup 836 * a completed entry, but not the last, so clean
542 * if the client is done with the descriptor 837 * up if the client is done with the descriptor
543 */ 838 */
544 if (desc->async_tx.ack) { 839 if (desc->async_tx.ack) {
545 list_del(&desc->node); 840 list_del(&desc->node);
546 list_add_tail(&desc->node, 841 list_add_tail(&desc->node,
547 &ioat_chan->free_desc); 842 &ioat_chan->free_desc);
548 } else 843 } else
844 desc->async_tx.cookie = 0;
845 } else {
846 /*
847 * last used desc. Do not remove, so we can
848 * append from it, but don't look at it next
849 * time, either
850 */
549 desc->async_tx.cookie = 0; 851 desc->async_tx.cookie = 0;
550 } else {
551 /*
552 * last used desc. Do not remove, so we can append from
553 * it, but don't look at it next time, either
554 */
555 desc->async_tx.cookie = 0;
556 852
557 /* TODO check status bits? */ 853 /* TODO check status bits? */
854 break;
855 }
856 }
857 break;
858 case IOAT_VER_2_0:
859 /* has some other thread has already cleaned up? */
860 if (ioat_chan->used_desc.prev == NULL)
558 break; 861 break;
862
863 /* work backwards to find latest finished desc */
864 desc = to_ioat_desc(ioat_chan->used_desc.next);
865 latest_desc = NULL;
866 do {
867 desc = to_ioat_desc(desc->node.prev);
868 desc_phys = (unsigned long)desc->async_tx.phys
869 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
870 if (desc_phys == phys_complete) {
871 latest_desc = desc;
872 break;
873 }
874 } while (&desc->node != ioat_chan->used_desc.prev);
875
876 if (latest_desc != NULL) {
877
878 /* work forwards to clear finished descriptors */
879 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
880 &desc->node != latest_desc->node.next &&
881 &desc->node != ioat_chan->used_desc.next;
882 desc = to_ioat_desc(desc->node.next)) {
883 if (desc->async_tx.cookie) {
884 cookie = desc->async_tx.cookie;
885 desc->async_tx.cookie = 0;
886
887 pci_unmap_page(ioat_chan->device->pdev,
888 pci_unmap_addr(desc, dst),
889 pci_unmap_len(desc, len),
890 PCI_DMA_FROMDEVICE);
891 pci_unmap_page(ioat_chan->device->pdev,
892 pci_unmap_addr(desc, src),
893 pci_unmap_len(desc, len),
894 PCI_DMA_TODEVICE);
895
896 if (desc->async_tx.callback) {
897 desc->async_tx.callback(desc->async_tx.callback_param);
898 desc->async_tx.callback = NULL;
899 }
900 }
901 }
902
903 /* move used.prev up beyond those that are finished */
904 if (&desc->node == ioat_chan->used_desc.next)
905 ioat_chan->used_desc.prev = NULL;
906 else
907 ioat_chan->used_desc.prev = &desc->node;
559 } 908 }
909 break;
560 } 910 }
561 911
562 spin_unlock_bh(&ioat_chan->desc_lock); 912 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -621,8 +971,6 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
621 return dma_async_is_complete(cookie, last_complete, last_used); 971 return dma_async_is_complete(cookie, last_complete, last_used);
622} 972}
623 973
624/* PCI API */
625
626static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) 974static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
627{ 975{
628 struct ioat_desc_sw *desc; 976 struct ioat_desc_sw *desc;
@@ -633,21 +981,34 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
633 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 981 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
634 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 982 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
635 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 983 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
636 desc->hw->next = 0;
637 desc->hw->size = 0; 984 desc->hw->size = 0;
638 desc->hw->src_addr = 0; 985 desc->hw->src_addr = 0;
639 desc->hw->dst_addr = 0; 986 desc->hw->dst_addr = 0;
640 desc->async_tx.ack = 1; 987 desc->async_tx.ack = 1;
641 988 switch (ioat_chan->device->version) {
642 list_add_tail(&desc->node, &ioat_chan->used_desc); 989 case IOAT_VER_1_2:
990 desc->hw->next = 0;
991 list_add_tail(&desc->node, &ioat_chan->used_desc);
992
993 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
994 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
995 writel(((u64) desc->async_tx.phys) >> 32,
996 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
997
998 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
999 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1000 break;
1001 case IOAT_VER_2_0:
1002 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1003 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1004 writel(((u64) desc->async_tx.phys) >> 32,
1005 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1006
1007 ioat_chan->dmacount++;
1008 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1009 break;
1010 }
643 spin_unlock_bh(&ioat_chan->desc_lock); 1011 spin_unlock_bh(&ioat_chan->desc_lock);
644
645 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
646 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
647 writel(((u64) desc->async_tx.phys) >> 32,
648 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
649
650 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
651} 1012}
652 1013
653/* 1014/*
@@ -693,14 +1054,14 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
693 dma_chan = container_of(device->common.channels.next, 1054 dma_chan = container_of(device->common.channels.next,
694 struct dma_chan, 1055 struct dma_chan,
695 device_node); 1056 device_node);
696 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { 1057 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
697 dev_err(&device->pdev->dev, 1058 dev_err(&device->pdev->dev,
698 "selftest cannot allocate chan resource\n"); 1059 "selftest cannot allocate chan resource\n");
699 err = -ENODEV; 1060 err = -ENODEV;
700 goto out; 1061 goto out;
701 } 1062 }
702 1063
703 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); 1064 tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
704 if (!tx) { 1065 if (!tx) {
705 dev_err(&device->pdev->dev, 1066 dev_err(&device->pdev->dev,
706 "Self-test prep failed, disabling\n"); 1067 "Self-test prep failed, disabling\n");
@@ -710,24 +1071,25 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
710 1071
711 async_tx_ack(tx); 1072 async_tx_ack(tx);
712 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, 1073 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
713 DMA_TO_DEVICE); 1074 DMA_TO_DEVICE);
714 ioat_set_src(addr, tx, 0); 1075 tx->tx_set_src(addr, tx, 0);
715 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, 1076 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
716 DMA_FROM_DEVICE); 1077 DMA_FROM_DEVICE);
717 ioat_set_dest(addr, tx, 0); 1078 tx->tx_set_dest(addr, tx, 0);
718 tx->callback = ioat_dma_test_callback; 1079 tx->callback = ioat_dma_test_callback;
719 tx->callback_param = (void *)0x8086; 1080 tx->callback_param = (void *)0x8086;
720 cookie = ioat_tx_submit(tx); 1081 cookie = tx->tx_submit(tx);
721 if (cookie < 0) { 1082 if (cookie < 0) {
722 dev_err(&device->pdev->dev, 1083 dev_err(&device->pdev->dev,
723 "Self-test setup failed, disabling\n"); 1084 "Self-test setup failed, disabling\n");
724 err = -ENODEV; 1085 err = -ENODEV;
725 goto free_resources; 1086 goto free_resources;
726 } 1087 }
727 ioat_dma_memcpy_issue_pending(dma_chan); 1088 device->common.device_issue_pending(dma_chan);
728 msleep(1); 1089 msleep(1);
729 1090
730 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1091 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1092 != DMA_SUCCESS) {
731 dev_err(&device->pdev->dev, 1093 dev_err(&device->pdev->dev,
732 "Self-test copy timed out, disabling\n"); 1094 "Self-test copy timed out, disabling\n");
733 err = -ENODEV; 1095 err = -ENODEV;
@@ -741,7 +1103,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
741 } 1103 }
742 1104
743free_resources: 1105free_resources:
744 ioat_dma_free_chan_resources(dma_chan); 1106 device->common.device_free_chan_resources(dma_chan);
745out: 1107out:
746 kfree(src); 1108 kfree(src);
747 kfree(dest); 1109 kfree(dest);
@@ -941,16 +1303,28 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
941 INIT_LIST_HEAD(&device->common.channels); 1303 INIT_LIST_HEAD(&device->common.channels);
942 ioat_dma_enumerate_channels(device); 1304 ioat_dma_enumerate_channels(device);
943 1305
944 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
945 device->common.device_alloc_chan_resources = 1306 device->common.device_alloc_chan_resources =
946 ioat_dma_alloc_chan_resources; 1307 ioat_dma_alloc_chan_resources;
947 device->common.device_free_chan_resources = 1308 device->common.device_free_chan_resources =
948 ioat_dma_free_chan_resources; 1309 ioat_dma_free_chan_resources;
949 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; 1310 device->common.dev = &pdev->dev;
1311
1312 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
950 device->common.device_is_tx_complete = ioat_dma_is_complete; 1313 device->common.device_is_tx_complete = ioat_dma_is_complete;
951 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
952 device->common.device_dependency_added = ioat_dma_dependency_added; 1314 device->common.device_dependency_added = ioat_dma_dependency_added;
953 device->common.dev = &pdev->dev; 1315 switch (device->version) {
1316 case IOAT_VER_1_2:
1317 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1318 device->common.device_issue_pending =
1319 ioat1_dma_memcpy_issue_pending;
1320 break;
1321 case IOAT_VER_2_0:
1322 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1323 device->common.device_issue_pending =
1324 ioat2_dma_memcpy_issue_pending;
1325 break;
1326 }
1327
954 dev_err(&device->pdev->dev, 1328 dev_err(&device->pdev->dev,
955 "Intel(R) I/OAT DMA Engine found," 1329 "Intel(R) I/OAT DMA Engine found,"
956 " %d channels, device version 0x%02x, driver version %s\n", 1330 " %d channels, device version 0x%02x, driver version %s\n",
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index 5f9881e7b0ed..b668234ef654 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
@@ -28,7 +28,7 @@
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30 30
31#define IOAT_DMA_VERSION "1.26" 31#define IOAT_DMA_VERSION "2.04"
32 32
33enum ioat_interrupt { 33enum ioat_interrupt {
34 none = 0, 34 none = 0,
@@ -39,6 +39,8 @@ enum ioat_interrupt {
39}; 39};
40 40
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0
43
42 44
43/** 45/**
44 * struct ioatdma_device - internal representation of a IOAT device 46 * struct ioatdma_device - internal representation of a IOAT device
@@ -47,6 +49,9 @@ enum ioat_interrupt {
47 * @dma_pool: for allocating DMA descriptors 49 * @dma_pool: for allocating DMA descriptors
48 * @common: embedded struct dma_device 50 * @common: embedded struct dma_device
49 * @version: version of ioatdma device 51 * @version: version of ioatdma device
52 * @irq_mode: which style irq to use
53 * @msix_entries: irq handlers
54 * @idx: per channel data
50 */ 55 */
51 56
52struct ioatdma_device { 57struct ioatdma_device {
@@ -63,23 +68,7 @@ struct ioatdma_device {
63 68
64/** 69/**
65 * struct ioat_dma_chan - internal representation of a DMA channel 70 * struct ioat_dma_chan - internal representation of a DMA channel
66 * @device:
67 * @reg_base:
68 * @sw_in_use:
69 * @completion:
70 * @completion_low:
71 * @completion_high:
72 * @completed_cookie: last cookie seen completed on cleanup
73 * @cookie: value of last cookie given to client
74 * @last_completion:
75 * @xfercap:
76 * @desc_lock:
77 * @free_desc:
78 * @used_desc:
79 * @resource:
80 * @device_node:
81 */ 71 */
82
83struct ioat_dma_chan { 72struct ioat_dma_chan {
84 73
85 void __iomem *reg_base; 74 void __iomem *reg_base;
@@ -95,6 +84,8 @@ struct ioat_dma_chan {
95 struct list_head used_desc; 84 struct list_head used_desc;
96 85
97 int pending; 86 int pending;
87 int dmacount;
88 int desccount;
98 89
99 struct ioatdma_device *device; 90 struct ioatdma_device *device;
100 struct dma_chan common; 91 struct dma_chan common;
@@ -134,12 +125,13 @@ struct ioat_desc_sw {
134struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 125struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
135 void __iomem *iobase); 126 void __iomem *iobase);
136void ioat_dma_remove(struct ioatdma_device *device); 127void ioat_dma_remove(struct ioatdma_device *device);
137struct dca_provider *ioat_dca_init(struct pci_dev *pdev, 128struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
138 void __iomem *iobase); 129struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
139#else 130#else
140#define ioat_dma_probe(pdev, iobase) NULL 131#define ioat_dma_probe(pdev, iobase) NULL
141#define ioat_dma_remove(device) do { } while (0) 132#define ioat_dma_remove(device) do { } while (0)
142#define ioat_dca_init(pdev, iobase) NULL 133#define ioat_dca_init(pdev, iobase) NULL
134#define ioat2_dca_init(pdev, iobase) NULL
143#endif 135#endif
144 136
145#endif /* IOATDMA_H */ 137#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index 9e7434e1551f..dd470fa91d86 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
@@ -22,12 +22,19 @@
22#define _IOAT_HW_H_ 22#define _IOAT_HW_H_
23 23
24/* PCI Configuration Space Values */ 24/* PCI Configuration Space Values */
25#define IOAT_PCI_VID 0x8086 25#define IOAT_PCI_VID 0x8086
26#define IOAT_PCI_DID 0x1A38 26
27#define IOAT_PCI_RID 0x00 27/* CB device ID's */
28#define IOAT_PCI_SVID 0x8086 28#define IOAT_PCI_DID_5000 0x1A38
29#define IOAT_PCI_SID 0x8086 29#define IOAT_PCI_DID_CNB 0x360B
30#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 30#define IOAT_PCI_DID_SCNB 0x65FF
31#define IOAT_PCI_DID_SNB 0x402F
32
33#define IOAT_PCI_RID 0x00
34#define IOAT_PCI_SVID 0x8086
35#define IOAT_PCI_SID 0x8086
36#define IOAT_VER_1_2 0x12 /* Version 1.2 */
37#define IOAT_VER_2_0 0x20 /* Version 2.0 */
31 38
32struct ioat_dma_descriptor { 39struct ioat_dma_descriptor {
33 uint32_t size; 40 uint32_t size;
@@ -47,6 +54,16 @@ struct ioat_dma_descriptor {
47#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 54#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
48#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 55#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
49#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 56#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
50#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000 57#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
58#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
59#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100
60#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200
61#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400
62
63#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
64#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000
65
66#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001
67#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000
51 68
52#endif 69#endif
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index baaab5ea146a..9832d7ebd931 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -42,26 +42,25 @@
42#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ 42#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */
43#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ 43#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */
44#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ 44#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */
45#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ 45#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */
46 46
47#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ 47#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */
48 48
49#define IOAT_VER_OFFSET 0x08 /* 8-bit */ 49#define IOAT_VER_OFFSET 0x08 /* 8-bit */
50#define IOAT_VER_MAJOR_MASK 0xF0 50#define IOAT_VER_MAJOR_MASK 0xF0
51#define IOAT_VER_MINOR_MASK 0x0F 51#define IOAT_VER_MINOR_MASK 0x0F
52#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK) 52#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4)
53#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK) 53#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)
54 54
55#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ 55#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
56 56
57#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ 57#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
58#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ 58#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
59#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */ 59#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
60 60
61#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ 61#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
62#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 62#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
63 63
64
65#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ 64#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
66 65
67/* DMA Channel Registers */ 66/* DMA Channel Registers */
@@ -74,25 +73,101 @@
74#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 73#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
75#define IOAT_CHANCTRL_INT_DISABLE 0x0001 74#define IOAT_CHANCTRL_INT_DISABLE 0x0001
76 75
77#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */ 76#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
78#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */ 77#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
79 78#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
80#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ 79
81#define IOAT_CHANSTS_OFFSET_LOW 0x04 80
82#define IOAT_CHANSTS_OFFSET_HIGH 0x08 81#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
83#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL 82#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
83#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
84 ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
85#define IOAT1_CHANSTS_OFFSET_LOW 0x04
86#define IOAT2_CHANSTS_OFFSET_LOW 0x08
87#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
88 ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
89#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
90#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
91#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
92 ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
93#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
84#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 94#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
95#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008
85#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 96#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
86#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 97#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
87#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 98#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
88#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 99#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
89#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 100#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
90 101
91#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
92#define IOAT_CHAINADDR_OFFSET_LOW 0x0C
93#define IOAT_CHAINADDR_OFFSET_HIGH 0x10
94 102
95#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */ 103
104#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */
105
106#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */
107#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000
108#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */
109
110/* CB DCA Memory Space Registers */
111#define IOAT_DCAOFFSET_OFFSET 0x14
112/* CB_BAR + IOAT_DCAOFFSET value */
113#define IOAT_DCA_VER_OFFSET 0x00
114#define IOAT_DCA_VER_MAJOR_MASK 0xF0
115#define IOAT_DCA_VER_MINOR_MASK 0x0F
116
117#define IOAT_DCA_COMP_OFFSET 0x02
118#define IOAT_DCA_COMP_V1 0x1
119
120#define IOAT_FSB_CAPABILITY_OFFSET 0x04
121#define IOAT_FSB_CAPABILITY_PREFETCH 0x1
122
123#define IOAT_PCI_CAPABILITY_OFFSET 0x06
124#define IOAT_PCI_CAPABILITY_MEMWR 0x1
125
126#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08
127#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1
128
129#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A
130#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1
131
132#define IOAT_APICID_TAG_MAP_OFFSET 0x0C
133#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F
134#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0
135#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0
136#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4
137#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00
138#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8
139#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000
140#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12
141#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000
142#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16
143#define IOAT_APICID_TAG_CB2_VALID 0x8080808080
144
145#define IOAT_DCA_GREQID_OFFSET 0x10
146#define IOAT_DCA_GREQID_SIZE 0x04
147#define IOAT_DCA_GREQID_MASK 0xFFFF
148#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000
149#define IOAT_DCA_GREQID_VALID 0x20000000
150#define IOAT_DCA_GREQID_LASTID 0x80000000
151
152
153
154#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
155#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
156#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
157 ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET)
158#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C
159#define IOAT2_CHAINADDR_OFFSET_LOW 0x10
160#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
161 ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW)
162#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10
163#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14
164#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
165 ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH)
166
167#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
168#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */
169#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
170 ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET)
96#define IOAT_CHANCMD_RESET 0x20 171#define IOAT_CHANCMD_RESET 0x20
97#define IOAT_CHANCMD_RESUME 0x10 172#define IOAT_CHANCMD_RESUME 0x10
98#define IOAT_CHANCMD_ABORT 0x08 173#define IOAT_CHANCMD_ABORT 0x08
@@ -124,6 +199,7 @@
124#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 199#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
125#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 200#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
126#define IOAT_CHANERR_SOFT_ERR 0x4000 201#define IOAT_CHANERR_SOFT_ERR 0x4000
202#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000
127 203
128#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ 204#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
129 205
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 96f7e63e3996..a1f24c42d5ff 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1462,7 +1462,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
1462 * 1462 *
1463 */ 1463 */
1464static struct pci_driver i5000_driver = { 1464static struct pci_driver i5000_driver = {
1465 .name = __stringify(KBUILD_BASENAME), 1465 .name = KBUILD_BASENAME,
1466 .probe = i5000_init_one, 1466 .probe = i5000_init_one,
1467 .remove = __devexit_p(i5000_remove_one), 1467 .remove = __devexit_p(i5000_remove_one),
1468 .id_table = i5000_pci_tbl, 1468 .id_table = i5000_pci_tbl,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index d1e8df187222..e445fe6e4ba9 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -203,10 +203,6 @@ config BLK_DEV_IDECD
203 CD-ROM drive, you can say N to all other CD-ROM options, but be sure 203 CD-ROM drive, you can say N to all other CD-ROM options, but be sure
204 to say Y or M to "ISO 9660 CD-ROM file system support". 204 to say Y or M to "ISO 9660 CD-ROM file system support".
205 205
206 Note that older versions of LILO (LInux LOader) cannot properly deal
207 with IDE/ATAPI CD-ROMs, so install LILO 16 or higher, available from
208 <http://lilo.go.dyndns.org/>.
209
210 To compile this driver as a module, choose M here: the 206 To compile this driver as a module, choose M here: the
211 module will be called ide-cd. 207 module will be called ide-cd.
212 208
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index e196aefa2070..7f5bc2ee6c7e 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -748,8 +748,7 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
748 hold = ATA_DMA2_HOLD; 748 hold = ATA_DMA2_HOLD;
749 break; 749 break;
750 default: 750 default:
751 BUG(); 751 return;
752 break;
753 } 752 }
754 753
755 if (speed >= XFER_UDMA_0) 754 if (speed >= XFER_UDMA_0)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 755011827afa..db22d1ff4e55 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -885,7 +885,6 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
885 return do_rw_taskfile(drive, args); 885 return do_rw_taskfile(drive, args);
886 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 886 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
887 u8 *args = rq->buffer; 887 u8 *args = rq->buffer;
888 u8 sel;
889 888
890 if (!args) 889 if (!args)
891 goto done; 890 goto done;
@@ -903,10 +902,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
903 hwif->OUTB(args[3], IDE_SECTOR_REG); 902 hwif->OUTB(args[3], IDE_SECTOR_REG);
904 hwif->OUTB(args[4], IDE_LCYL_REG); 903 hwif->OUTB(args[4], IDE_LCYL_REG);
905 hwif->OUTB(args[5], IDE_HCYL_REG); 904 hwif->OUTB(args[5], IDE_HCYL_REG);
906 sel = (args[6] & ~0x10); 905 hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
907 if (drive->select.b.unit)
908 sel |= 0x10;
909 hwif->OUTB(sel, IDE_SELECT_REG);
910 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 906 ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
911 return ide_started; 907 return ide_started;
912 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 908 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index af86433baede..1609b8604f56 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -514,6 +514,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
514 if (drive->addressing == 1) { 514 if (drive->addressing == 1) {
515 __u64 sectors = 0; 515 __u64 sectors = 0;
516 u32 low = 0, high = 0; 516 u32 low = 0, high = 0;
517 hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG);
517 low = ide_read_24(drive); 518 low = ide_read_24(drive);
518 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 519 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
519 high = ide_read_24(drive); 520 high = ide_read_24(drive);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index ea0143ef5fe5..51fca441c294 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/cmd64x.c Version 1.50 May 10, 2007 2 * linux/drivers/ide/pci/cmd64x.c Version 1.51 Nov 8, 2007
3 * 3 *
4 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. 4 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
5 * Due to massive hardware bugs, UltraDMA is only supported 5 * Due to massive hardware bugs, UltraDMA is only supported
@@ -339,7 +339,8 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
339 u8 mrdmode = inb(hwif->dma_master + 0x01); 339 u8 mrdmode = inb(hwif->dma_master + 0x01);
340 340
341 /* clear the interrupt bit */ 341 /* clear the interrupt bit */
342 outb(mrdmode | irq_mask, hwif->dma_master + 0x01); 342 outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
343 hwif->dma_master + 0x01);
343 344
344 return err; 345 return err;
345} 346}
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 599408952bd4..547690395eee 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -117,8 +117,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
117 case XFER_MW_DMA_1: timings = 0x00012121; break; 117 case XFER_MW_DMA_1: timings = 0x00012121; break;
118 case XFER_MW_DMA_2: timings = 0x00002020; break; 118 case XFER_MW_DMA_2: timings = 0x00002020; break;
119 default: 119 default:
120 BUG(); 120 return;
121 break;
122 } 121 }
123 basereg = CS5530_BASEREG(drive->hwif); 122 basereg = CS5530_BASEREG(drive->hwif);
124 reg = inl(basereg + 4); /* get drive0 config register */ 123 reg = inl(basereg + 4); /* get drive0 config register */
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 5c9975435319..99b7d763b6c7 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -653,8 +653,7 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
653 653
654static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id) 654static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
655{ 655{
656 ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); 656 return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
657 return 0;
658} 657}
659 658
660static const struct pci_device_id it821x_pci_tbl[] = { 659static const struct pci_device_id it821x_pci_tbl[] = {
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index bdf64d997708..0083eaf89c77 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -139,8 +139,7 @@ static const struct ide_port_info jmicron_chipset __devinitdata = {
139 139
140static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id) 140static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
141{ 141{
142 ide_setup_pci_device(dev, &jmicron_chipset); 142 return ide_setup_pci_device(dev, &jmicron_chipset);
143 return 0;
144} 143}
145 144
146/* All JMB PATA controllers have and will continue to have the same 145/* All JMB PATA controllers have and will continue to have the same
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 0a7b3202066d..707d5ff66b03 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -186,8 +186,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
186 } 186 }
187 break; 187 break;
188 default: 188 default:
189 BUG(); 189 return;
190 break;
191 } 190 }
192 191
193 if (unit == 0) { /* are we configuring drive0? */ 192 if (unit == 0) { /* are we configuring drive0? */
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 6b7bb53acefd..f6e2ab3dd166 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -356,7 +356,6 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
356 sis_program_timings(drive, speed); 356 sis_program_timings(drive, speed);
357 break; 357 break;
358 default: 358 default:
359 BUG();
360 break; 359 break;
361 } 360 }
362} 361}
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 816b5311dad6..5afdfef7264c 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1138,6 +1138,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1138 hwif->drives[0].autotune = IDE_TUNE_AUTO; 1138 hwif->drives[0].autotune = IDE_TUNE_AUTO;
1139 hwif->drives[1].autotune = IDE_TUNE_AUTO; 1139 hwif->drives[1].autotune = IDE_TUNE_AUTO;
1140 hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1140 hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
1141 IDE_HFLAG_PIO_NO_DOWNGRADE |
1141 IDE_HFLAG_POST_SET_MODE; 1142 IDE_HFLAG_POST_SET_MODE;
1142 hwif->pio_mask = ATA_PIO4; 1143 hwif->pio_mask = ATA_PIO4;
1143 hwif->set_pio_mode = pmac_ide_set_pio_mode; 1144 hwif->set_pio_mode = pmac_ide_set_pio_mode;
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 02d14bf85ab2..25fd09053220 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -7,11 +7,6 @@
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 */ 8 */
9 9
10/*
11 * This module provides support for automatic detection and
12 * configuration of all PCI IDE interfaces present in a system.
13 */
14
15#include <linux/module.h> 10#include <linux/module.h>
16#include <linux/types.h> 11#include <linux/types.h>
17#include <linux/kernel.h> 12#include <linux/kernel.h>
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 9d716fa42cad..3b92a61ba8d2 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -184,7 +184,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
184free_regs: 184free_regs:
185 free_page(lg->regs_page); 185 free_page(lg->regs_page);
186release_guest: 186release_guest:
187 memset(lg, 0, sizeof(*lg)); 187 kfree(lg);
188unlock: 188unlock:
189 mutex_unlock(&lguest_lock); 189 mutex_unlock(&lguest_lock);
190 return err; 190 return err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1cfc984cc7b7..a5aad8cad843 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -688,7 +688,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
688} 688}
689 689
690static struct dma_async_tx_descriptor * 690static struct dma_async_tx_descriptor *
691ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 691ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
692 unsigned long pending)
692{ 693{
693 int disks = sh->disks; 694 int disks = sh->disks;
694 int pd_idx = sh->pd_idx, i; 695 int pd_idx = sh->pd_idx, i;
@@ -696,7 +697,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
696 /* check if prexor is active which means only process blocks 697 /* check if prexor is active which means only process blocks
697 * that are part of a read-modify-write (Wantprexor) 698 * that are part of a read-modify-write (Wantprexor)
698 */ 699 */
699 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 700 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
700 701
701 pr_debug("%s: stripe %llu\n", __FUNCTION__, 702 pr_debug("%s: stripe %llu\n", __FUNCTION__,
702 (unsigned long long)sh->sector); 703 (unsigned long long)sh->sector);
@@ -773,7 +774,8 @@ static void ops_complete_write(void *stripe_head_ref)
773} 774}
774 775
775static void 776static void
776ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 777ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
778 unsigned long pending)
777{ 779{
778 /* kernel stack size limits the total number of disks */ 780 /* kernel stack size limits the total number of disks */
779 int disks = sh->disks; 781 int disks = sh->disks;
@@ -781,7 +783,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
781 783
782 int count = 0, pd_idx = sh->pd_idx, i; 784 int count = 0, pd_idx = sh->pd_idx, i;
783 struct page *xor_dest; 785 struct page *xor_dest;
784 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 786 int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
785 unsigned long flags; 787 unsigned long flags;
786 dma_async_tx_callback callback; 788 dma_async_tx_callback callback;
787 789
@@ -808,7 +810,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
808 } 810 }
809 811
810 /* check whether this postxor is part of a write */ 812 /* check whether this postxor is part of a write */
811 callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ? 813 callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
812 ops_complete_write : ops_complete_postxor; 814 ops_complete_write : ops_complete_postxor;
813 815
814 /* 1/ if we prexor'd then the dest is reused as a source 816 /* 1/ if we prexor'd then the dest is reused as a source
@@ -896,12 +898,12 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
896 tx = ops_run_prexor(sh, tx); 898 tx = ops_run_prexor(sh, tx);
897 899
898 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 900 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
899 tx = ops_run_biodrain(sh, tx); 901 tx = ops_run_biodrain(sh, tx, pending);
900 overlap_clear++; 902 overlap_clear++;
901 } 903 }
902 904
903 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 905 if (test_bit(STRIPE_OP_POSTXOR, &pending))
904 ops_run_postxor(sh, tx); 906 ops_run_postxor(sh, tx, pending);
905 907
906 if (test_bit(STRIPE_OP_CHECK, &pending)) 908 if (test_bit(STRIPE_OP_CHECK, &pending))
907 ops_run_check(sh); 909 ops_run_check(sh);
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 6a5a05d1f392..05172d2613d6 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -244,10 +244,11 @@ ioc4_variant(struct ioc4_driver_data *idd)
244 idd->idd_pdev->bus->number == pdev->bus->number && 244 idd->idd_pdev->bus->number == pdev->bus->number &&
245 3 == PCI_SLOT(pdev->devfn)) 245 3 == PCI_SLOT(pdev->devfn))
246 found = 1; 246 found = 1;
247 pci_dev_put(pdev);
248 } while (pdev && !found); 247 } while (pdev && !found);
249 if (NULL != pdev) 248 if (NULL != pdev) {
249 pci_dev_put(pdev);
250 return IOC4_VARIANT_IO9; 250 return IOC4_VARIANT_IO9;
251 }
251 252
252 /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */ 253 /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
253 pdev = NULL; 254 pdev = NULL;
@@ -258,10 +259,11 @@ ioc4_variant(struct ioc4_driver_data *idd)
258 idd->idd_pdev->bus->number == pdev->bus->number && 259 idd->idd_pdev->bus->number == pdev->bus->number &&
259 3 == PCI_SLOT(pdev->devfn)) 260 3 == PCI_SLOT(pdev->devfn))
260 found = 1; 261 found = 1;
261 pci_dev_put(pdev);
262 } while (pdev && !found); 262 } while (pdev && !found);
263 if (NULL != pdev) 263 if (NULL != pdev) {
264 pci_dev_put(pdev);
264 return IOC4_VARIANT_IO10; 265 return IOC4_VARIANT_IO10;
266 }
265 267
266 /* PCI-RT: No SCSI/SATA controller will be present */ 268 /* PCI-RT: No SCSI/SATA controller will be present */
267 return IOC4_VARIANT_PCI_RT; 269 return IOC4_VARIANT_PCI_RT;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index edd6828f0a78..917b7b46f1a7 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -250,6 +250,7 @@
250#include <asm/system.h> 250#include <asm/system.h>
251#include <asm/ethernet.h> 251#include <asm/ethernet.h>
252#include <asm/cache.h> 252#include <asm/cache.h>
253#include <asm/arch/io_interface_mux.h>
253 254
254//#define ETHDEBUG 255//#define ETHDEBUG
255#define D(x) 256#define D(x)
@@ -279,6 +280,9 @@ struct net_local {
279 * by this lock as well. 280 * by this lock as well.
280 */ 281 */
281 spinlock_t lock; 282 spinlock_t lock;
283
284 spinlock_t led_lock; /* Protect LED state */
285 spinlock_t transceiver_lock; /* Protect transceiver state. */
282}; 286};
283 287
284typedef struct etrax_eth_descr 288typedef struct etrax_eth_descr
@@ -295,8 +299,6 @@ struct transceiver_ops
295 void (*check_duplex)(struct net_device* dev); 299 void (*check_duplex)(struct net_device* dev);
296}; 300};
297 301
298struct transceiver_ops* transceiver;
299
300/* Duplex settings */ 302/* Duplex settings */
301enum duplex 303enum duplex
302{ 304{
@@ -307,7 +309,7 @@ enum duplex
307 309
308/* Dma descriptors etc. */ 310/* Dma descriptors etc. */
309 311
310#define MAX_MEDIA_DATA_SIZE 1518 312#define MAX_MEDIA_DATA_SIZE 1522
311 313
312#define MIN_PACKET_LEN 46 314#define MIN_PACKET_LEN 46
313#define ETHER_HEAD_LEN 14 315#define ETHER_HEAD_LEN 14
@@ -332,8 +334,8 @@ enum duplex
332 334
333/*Intel LXT972A specific*/ 335/*Intel LXT972A specific*/
334#define MDIO_INT_STATUS_REG_2 0x0011 336#define MDIO_INT_STATUS_REG_2 0x0011
335#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 ) 337#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
336#define MDIO_INT_SPEED ( 1 << 14 ) 338#define MDIO_INT_SPEED (1 << 14)
337 339
338/* Network flash constants */ 340/* Network flash constants */
339#define NET_FLASH_TIME (HZ/50) /* 20 ms */ 341#define NET_FLASH_TIME (HZ/50) /* 20 ms */
@@ -344,8 +346,8 @@ enum duplex
344#define NO_NETWORK_ACTIVITY 0 346#define NO_NETWORK_ACTIVITY 0
345#define NETWORK_ACTIVITY 1 347#define NETWORK_ACTIVITY 1
346 348
347#define NBR_OF_RX_DESC 64 349#define NBR_OF_RX_DESC 32
348#define NBR_OF_TX_DESC 256 350#define NBR_OF_TX_DESC 16
349 351
350/* Large packets are sent directly to upper layers while small packets are */ 352/* Large packets are sent directly to upper layers while small packets are */
351/* copied (to reduce memory waste). The following constant decides the breakpoint */ 353/* copied (to reduce memory waste). The following constant decides the breakpoint */
@@ -367,7 +369,6 @@ enum duplex
367static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to 369static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
368 to be processed */ 370 to be processed */
369static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ 371static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
370static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
371 372
372static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); 373static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
373 374
@@ -377,7 +378,6 @@ static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
377static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); 378static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
378 379
379static unsigned int network_rec_config_shadow = 0; 380static unsigned int network_rec_config_shadow = 0;
380static unsigned int mdio_phy_addr; /* Transciever address */
381 381
382static unsigned int network_tr_ctrl_shadow = 0; 382static unsigned int network_tr_ctrl_shadow = 0;
383 383
@@ -411,7 +411,7 @@ static int e100_set_config(struct net_device* dev, struct ifmap* map);
411static void e100_tx_timeout(struct net_device *dev); 411static void e100_tx_timeout(struct net_device *dev);
412static struct net_device_stats *e100_get_stats(struct net_device *dev); 412static struct net_device_stats *e100_get_stats(struct net_device *dev);
413static void set_multicast_list(struct net_device *dev); 413static void set_multicast_list(struct net_device *dev);
414static void e100_hardware_send_packet(char *buf, int length); 414static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
415static void update_rx_stats(struct net_device_stats *); 415static void update_rx_stats(struct net_device_stats *);
416static void update_tx_stats(struct net_device_stats *); 416static void update_tx_stats(struct net_device_stats *);
417static int e100_probe_transceiver(struct net_device* dev); 417static int e100_probe_transceiver(struct net_device* dev);
@@ -434,7 +434,10 @@ static void e100_clear_network_leds(unsigned long dummy);
434static void e100_set_network_leds(int active); 434static void e100_set_network_leds(int active);
435 435
436static const struct ethtool_ops e100_ethtool_ops; 436static const struct ethtool_ops e100_ethtool_ops;
437 437#if defined(CONFIG_ETRAX_NO_PHY)
438static void dummy_check_speed(struct net_device* dev);
439static void dummy_check_duplex(struct net_device* dev);
440#else
438static void broadcom_check_speed(struct net_device* dev); 441static void broadcom_check_speed(struct net_device* dev);
439static void broadcom_check_duplex(struct net_device* dev); 442static void broadcom_check_duplex(struct net_device* dev);
440static void tdk_check_speed(struct net_device* dev); 443static void tdk_check_speed(struct net_device* dev);
@@ -443,16 +446,28 @@ static void intel_check_speed(struct net_device* dev);
443static void intel_check_duplex(struct net_device* dev); 446static void intel_check_duplex(struct net_device* dev);
444static void generic_check_speed(struct net_device* dev); 447static void generic_check_speed(struct net_device* dev);
445static void generic_check_duplex(struct net_device* dev); 448static void generic_check_duplex(struct net_device* dev);
449#endif
450#ifdef CONFIG_NET_POLL_CONTROLLER
451static void e100_netpoll(struct net_device* dev);
452#endif
453
454static int autoneg_normal = 1;
446 455
447struct transceiver_ops transceivers[] = 456struct transceiver_ops transceivers[] =
448{ 457{
458#if defined(CONFIG_ETRAX_NO_PHY)
459 {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
460#else
449 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ 461 {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
450 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ 462 {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
451 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ 463 {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
452 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ 464 {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
453 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ 465 {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
466#endif
454}; 467};
455 468
469struct transceiver_ops* transceiver = &transceivers[0];
470
456#define tx_done(dev) (*R_DMA_CH0_CMD == 0) 471#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
457 472
458/* 473/*
@@ -471,14 +486,22 @@ etrax_ethernet_init(void)
471 int i, err; 486 int i, err;
472 487
473 printk(KERN_INFO 488 printk(KERN_INFO
474 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n"); 489 "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
475 490
476 dev = alloc_etherdev(sizeof(struct net_local)); 491 if (cris_request_io_interface(if_eth, cardname)) {
477 np = dev->priv; 492 printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
493 return -EBUSY;
494 }
478 495
496 dev = alloc_etherdev(sizeof(struct net_local));
479 if (!dev) 497 if (!dev)
480 return -ENOMEM; 498 return -ENOMEM;
481 499
500 np = netdev_priv(dev);
501
502 /* we do our own locking */
503 dev->features |= NETIF_F_LLTX;
504
482 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ 505 dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
483 506
484 /* now setup our etrax specific stuff */ 507 /* now setup our etrax specific stuff */
@@ -498,14 +521,22 @@ etrax_ethernet_init(void)
498 dev->do_ioctl = e100_ioctl; 521 dev->do_ioctl = e100_ioctl;
499 dev->set_config = e100_set_config; 522 dev->set_config = e100_set_config;
500 dev->tx_timeout = e100_tx_timeout; 523 dev->tx_timeout = e100_tx_timeout;
524#ifdef CONFIG_NET_POLL_CONTROLLER
525 dev->poll_controller = e100_netpoll;
526#endif
527
528 spin_lock_init(&np->lock);
529 spin_lock_init(&np->led_lock);
530 spin_lock_init(&np->transceiver_lock);
501 531
502 /* Initialise the list of Etrax DMA-descriptors */ 532 /* Initialise the list of Etrax DMA-descriptors */
503 533
504 /* Initialise receive descriptors */ 534 /* Initialise receive descriptors */
505 535
506 for (i = 0; i < NBR_OF_RX_DESC; i++) { 536 for (i = 0; i < NBR_OF_RX_DESC; i++) {
507 /* Allocate two extra cachelines to make sure that buffer used by DMA 537 /* Allocate two extra cachelines to make sure that buffer used
508 * does not share cacheline with any other data (to avoid cache bug) 538 * by DMA does not share cacheline with any other data (to
539 * avoid cache bug)
509 */ 540 */
510 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 541 RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
511 if (!RxDescList[i].skb) 542 if (!RxDescList[i].skb)
@@ -541,7 +572,6 @@ etrax_ethernet_init(void)
541 572
542 myNextRxDesc = &RxDescList[0]; 573 myNextRxDesc = &RxDescList[0];
543 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; 574 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
544 myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
545 myFirstTxDesc = &TxDescList[0]; 575 myFirstTxDesc = &TxDescList[0];
546 myNextTxDesc = &TxDescList[0]; 576 myNextTxDesc = &TxDescList[0];
547 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; 577 myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
@@ -562,10 +592,11 @@ etrax_ethernet_init(void)
562 current_speed = 10; 592 current_speed = 10;
563 current_speed_selection = 0; /* Auto */ 593 current_speed_selection = 0; /* Auto */
564 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 594 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
565 duplex_timer.data = (unsigned long)dev; 595 speed_timer.data = (unsigned long)dev;
566 speed_timer.function = e100_check_speed; 596 speed_timer.function = e100_check_speed;
567 597
568 clear_led_timer.function = e100_clear_network_leds; 598 clear_led_timer.function = e100_clear_network_leds;
599 clear_led_timer.data = (unsigned long)dev;
569 600
570 full_duplex = 0; 601 full_duplex = 0;
571 current_duplex = autoneg; 602 current_duplex = autoneg;
@@ -574,7 +605,6 @@ etrax_ethernet_init(void)
574 duplex_timer.function = e100_check_duplex; 605 duplex_timer.function = e100_check_duplex;
575 606
576 /* Initialize mii interface */ 607 /* Initialize mii interface */
577 np->mii_if.phy_id = mdio_phy_addr;
578 np->mii_if.phy_id_mask = 0x1f; 608 np->mii_if.phy_id_mask = 0x1f;
579 np->mii_if.reg_num_mask = 0x1f; 609 np->mii_if.reg_num_mask = 0x1f;
580 np->mii_if.dev = dev; 610 np->mii_if.dev = dev;
@@ -585,6 +615,9 @@ etrax_ethernet_init(void)
585 /* unwanted addresses are matched */ 615 /* unwanted addresses are matched */
586 *R_NETWORK_GA_0 = 0x00000000; 616 *R_NETWORK_GA_0 = 0x00000000;
587 *R_NETWORK_GA_1 = 0x00000000; 617 *R_NETWORK_GA_1 = 0x00000000;
618
619 /* Initialize next time the led can flash */
620 led_next_time = jiffies;
588 return 0; 621 return 0;
589} 622}
590 623
@@ -595,9 +628,9 @@ etrax_ethernet_init(void)
595static int 628static int
596e100_set_mac_address(struct net_device *dev, void *p) 629e100_set_mac_address(struct net_device *dev, void *p)
597{ 630{
598 struct net_local *np = (struct net_local *)dev->priv; 631 struct net_local *np = netdev_priv(dev);
599 struct sockaddr *addr = p; 632 struct sockaddr *addr = p;
600 int i; 633 DECLARE_MAC_BUF(mac);
601 634
602 spin_lock(&np->lock); /* preemption protection */ 635 spin_lock(&np->lock); /* preemption protection */
603 636
@@ -686,6 +719,25 @@ e100_open(struct net_device *dev)
686 goto grace_exit2; 719 goto grace_exit2;
687 } 720 }
688 721
722 /*
723 * Always allocate the DMA channels after the IRQ,
724 * and clean up on failure.
725 */
726
727 if (cris_request_dma(NETWORK_TX_DMA_NBR,
728 cardname,
729 DMA_VERBOSE_ON_ERROR,
730 dma_eth)) {
731 goto grace_exit3;
732 }
733
734 if (cris_request_dma(NETWORK_RX_DMA_NBR,
735 cardname,
736 DMA_VERBOSE_ON_ERROR,
737 dma_eth)) {
738 goto grace_exit4;
739 }
740
689 /* give the HW an idea of what MAC address we want */ 741 /* give the HW an idea of what MAC address we want */
690 742
691 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | 743 *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
@@ -700,6 +752,7 @@ e100_open(struct net_device *dev)
700 752
701 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ 753 *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
702#else 754#else
755 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
703 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); 756 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
704 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); 757 SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
705 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); 758 SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
@@ -719,8 +772,7 @@ e100_open(struct net_device *dev)
719 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); 772 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
720 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 773 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
721 774
722 save_flags(flags); 775 local_irq_save(flags);
723 cli();
724 776
725 /* enable the irq's for ethernet DMA */ 777 /* enable the irq's for ethernet DMA */
726 778
@@ -752,12 +804,13 @@ e100_open(struct net_device *dev)
752 804
753 *R_DMA_CH0_FIRST = 0; 805 *R_DMA_CH0_FIRST = 0;
754 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); 806 *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
807 netif_start_queue(dev);
755 808
756 restore_flags(flags); 809 local_irq_restore(flags);
757 810
758 /* Probe for transceiver */ 811 /* Probe for transceiver */
759 if (e100_probe_transceiver(dev)) 812 if (e100_probe_transceiver(dev))
760 goto grace_exit3; 813 goto grace_exit5;
761 814
762 /* Start duplex/speed timers */ 815 /* Start duplex/speed timers */
763 add_timer(&speed_timer); 816 add_timer(&speed_timer);
@@ -766,10 +819,14 @@ e100_open(struct net_device *dev)
766 /* We are now ready to accept transmit requeusts from 819 /* We are now ready to accept transmit requeusts from
767 * the queueing layer of the networking. 820 * the queueing layer of the networking.
768 */ 821 */
769 netif_start_queue(dev); 822 netif_carrier_on(dev);
770 823
771 return 0; 824 return 0;
772 825
826grace_exit5:
827 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
828grace_exit4:
829 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
773grace_exit3: 830grace_exit3:
774 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 831 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
775grace_exit2: 832grace_exit2:
@@ -780,12 +837,20 @@ grace_exit0:
780 return -EAGAIN; 837 return -EAGAIN;
781} 838}
782 839
783 840#if defined(CONFIG_ETRAX_NO_PHY)
841static void
842dummy_check_speed(struct net_device* dev)
843{
844 current_speed = 100;
845}
846#else
784static void 847static void
785generic_check_speed(struct net_device* dev) 848generic_check_speed(struct net_device* dev)
786{ 849{
787 unsigned long data; 850 unsigned long data;
788 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 851 struct net_local *np = netdev_priv(dev);
852
853 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
789 if ((data & ADVERTISE_100FULL) || 854 if ((data & ADVERTISE_100FULL) ||
790 (data & ADVERTISE_100HALF)) 855 (data & ADVERTISE_100HALF))
791 current_speed = 100; 856 current_speed = 100;
@@ -797,7 +862,10 @@ static void
797tdk_check_speed(struct net_device* dev) 862tdk_check_speed(struct net_device* dev)
798{ 863{
799 unsigned long data; 864 unsigned long data;
800 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 865 struct net_local *np = netdev_priv(dev);
866
867 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
868 MDIO_TDK_DIAGNOSTIC_REG);
801 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); 869 current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
802} 870}
803 871
@@ -805,7 +873,10 @@ static void
805broadcom_check_speed(struct net_device* dev) 873broadcom_check_speed(struct net_device* dev)
806{ 874{
807 unsigned long data; 875 unsigned long data;
808 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 876 struct net_local *np = netdev_priv(dev);
877
878 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
879 MDIO_AUX_CTRL_STATUS_REG);
809 current_speed = (data & MDIO_BC_SPEED ? 100 : 10); 880 current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
810} 881}
811 882
@@ -813,46 +884,62 @@ static void
813intel_check_speed(struct net_device* dev) 884intel_check_speed(struct net_device* dev)
814{ 885{
815 unsigned long data; 886 unsigned long data;
816 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 887 struct net_local *np = netdev_priv(dev);
888
889 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
890 MDIO_INT_STATUS_REG_2);
817 current_speed = (data & MDIO_INT_SPEED ? 100 : 10); 891 current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
818} 892}
819 893#endif
820static void 894static void
821e100_check_speed(unsigned long priv) 895e100_check_speed(unsigned long priv)
822{ 896{
823 struct net_device* dev = (struct net_device*)priv; 897 struct net_device* dev = (struct net_device*)priv;
898 struct net_local *np = netdev_priv(dev);
824 static int led_initiated = 0; 899 static int led_initiated = 0;
825 unsigned long data; 900 unsigned long data;
826 int old_speed = current_speed; 901 int old_speed = current_speed;
827 902
828 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR); 903 spin_lock(&np->transceiver_lock);
904
905 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
829 if (!(data & BMSR_LSTATUS)) { 906 if (!(data & BMSR_LSTATUS)) {
830 current_speed = 0; 907 current_speed = 0;
831 } else { 908 } else {
832 transceiver->check_speed(dev); 909 transceiver->check_speed(dev);
833 } 910 }
834 911
912 spin_lock(&np->led_lock);
835 if ((old_speed != current_speed) || !led_initiated) { 913 if ((old_speed != current_speed) || !led_initiated) {
836 led_initiated = 1; 914 led_initiated = 1;
837 e100_set_network_leds(NO_NETWORK_ACTIVITY); 915 e100_set_network_leds(NO_NETWORK_ACTIVITY);
916 if (current_speed)
917 netif_carrier_on(dev);
918 else
919 netif_carrier_off(dev);
838 } 920 }
921 spin_unlock(&np->led_lock);
839 922
840 /* Reinitialize the timer. */ 923 /* Reinitialize the timer. */
841 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; 924 speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
842 add_timer(&speed_timer); 925 add_timer(&speed_timer);
926
927 spin_unlock(&np->transceiver_lock);
843} 928}
844 929
845static void 930static void
846e100_negotiate(struct net_device* dev) 931e100_negotiate(struct net_device* dev)
847{ 932{
848 unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 933 struct net_local *np = netdev_priv(dev);
934 unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
935 MII_ADVERTISE);
849 936
850 /* Discard old speed and duplex settings */ 937 /* Discard old speed and duplex settings */
851 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | 938 data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
852 ADVERTISE_10HALF | ADVERTISE_10FULL); 939 ADVERTISE_10HALF | ADVERTISE_10FULL);
853 940
854 switch (current_speed_selection) { 941 switch (current_speed_selection) {
855 case 10 : 942 case 10:
856 if (current_duplex == full) 943 if (current_duplex == full)
857 data |= ADVERTISE_10FULL; 944 data |= ADVERTISE_10FULL;
858 else if (current_duplex == half) 945 else if (current_duplex == half)
@@ -861,7 +948,7 @@ e100_negotiate(struct net_device* dev)
861 data |= ADVERTISE_10HALF | ADVERTISE_10FULL; 948 data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
862 break; 949 break;
863 950
864 case 100 : 951 case 100:
865 if (current_duplex == full) 952 if (current_duplex == full)
866 data |= ADVERTISE_100FULL; 953 data |= ADVERTISE_100FULL;
867 else if (current_duplex == half) 954 else if (current_duplex == half)
@@ -870,7 +957,7 @@ e100_negotiate(struct net_device* dev)
870 data |= ADVERTISE_100HALF | ADVERTISE_100FULL; 957 data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
871 break; 958 break;
872 959
873 case 0 : /* Auto */ 960 case 0: /* Auto */
874 if (current_duplex == full) 961 if (current_duplex == full)
875 data |= ADVERTISE_100FULL | ADVERTISE_10FULL; 962 data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
876 else if (current_duplex == half) 963 else if (current_duplex == half)
@@ -880,35 +967,44 @@ e100_negotiate(struct net_device* dev)
880 ADVERTISE_100HALF | ADVERTISE_100FULL; 967 ADVERTISE_100HALF | ADVERTISE_100FULL;
881 break; 968 break;
882 969
883 default : /* assume autoneg speed and duplex */ 970 default: /* assume autoneg speed and duplex */
884 data |= ADVERTISE_10HALF | ADVERTISE_10FULL | 971 data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
885 ADVERTISE_100HALF | ADVERTISE_100FULL; 972 ADVERTISE_100HALF | ADVERTISE_100FULL;
973 break;
886 } 974 }
887 975
888 e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data); 976 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
889 977
890 /* Renegotiate with link partner */ 978 /* Renegotiate with link partner */
891 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 979 if (autoneg_normal) {
980 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
892 data |= BMCR_ANENABLE | BMCR_ANRESTART; 981 data |= BMCR_ANENABLE | BMCR_ANRESTART;
893 982 }
894 e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data); 983 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
895} 984}
896 985
897static void 986static void
898e100_set_speed(struct net_device* dev, unsigned long speed) 987e100_set_speed(struct net_device* dev, unsigned long speed)
899{ 988{
989 struct net_local *np = netdev_priv(dev);
990
991 spin_lock(&np->transceiver_lock);
900 if (speed != current_speed_selection) { 992 if (speed != current_speed_selection) {
901 current_speed_selection = speed; 993 current_speed_selection = speed;
902 e100_negotiate(dev); 994 e100_negotiate(dev);
903 } 995 }
996 spin_unlock(&np->transceiver_lock);
904} 997}
905 998
906static void 999static void
907e100_check_duplex(unsigned long priv) 1000e100_check_duplex(unsigned long priv)
908{ 1001{
909 struct net_device *dev = (struct net_device *)priv; 1002 struct net_device *dev = (struct net_device *)priv;
910 struct net_local *np = (struct net_local *)dev->priv; 1003 struct net_local *np = netdev_priv(dev);
911 int old_duplex = full_duplex; 1004 int old_duplex;
1005
1006 spin_lock(&np->transceiver_lock);
1007 old_duplex = full_duplex;
912 transceiver->check_duplex(dev); 1008 transceiver->check_duplex(dev);
913 if (old_duplex != full_duplex) { 1009 if (old_duplex != full_duplex) {
914 /* Duplex changed */ 1010 /* Duplex changed */
@@ -920,13 +1016,22 @@ e100_check_duplex(unsigned long priv)
920 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; 1016 duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
921 add_timer(&duplex_timer); 1017 add_timer(&duplex_timer);
922 np->mii_if.full_duplex = full_duplex; 1018 np->mii_if.full_duplex = full_duplex;
1019 spin_unlock(&np->transceiver_lock);
923} 1020}
924 1021#if defined(CONFIG_ETRAX_NO_PHY)
1022static void
1023dummy_check_duplex(struct net_device* dev)
1024{
1025 full_duplex = 1;
1026}
1027#else
925static void 1028static void
926generic_check_duplex(struct net_device* dev) 1029generic_check_duplex(struct net_device* dev)
927{ 1030{
928 unsigned long data; 1031 unsigned long data;
929 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); 1032 struct net_local *np = netdev_priv(dev);
1033
1034 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
930 if ((data & ADVERTISE_10FULL) || 1035 if ((data & ADVERTISE_10FULL) ||
931 (data & ADVERTISE_100FULL)) 1036 (data & ADVERTISE_100FULL))
932 full_duplex = 1; 1037 full_duplex = 1;
@@ -938,7 +1043,10 @@ static void
938tdk_check_duplex(struct net_device* dev) 1043tdk_check_duplex(struct net_device* dev)
939{ 1044{
940 unsigned long data; 1045 unsigned long data;
941 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); 1046 struct net_local *np = netdev_priv(dev);
1047
1048 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1049 MDIO_TDK_DIAGNOSTIC_REG);
942 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; 1050 full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
943} 1051}
944 1052
@@ -946,7 +1054,10 @@ static void
946broadcom_check_duplex(struct net_device* dev) 1054broadcom_check_duplex(struct net_device* dev)
947{ 1055{
948 unsigned long data; 1056 unsigned long data;
949 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); 1057 struct net_local *np = netdev_priv(dev);
1058
1059 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1060 MDIO_AUX_CTRL_STATUS_REG);
950 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; 1061 full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
951} 1062}
952 1063
@@ -954,38 +1065,55 @@ static void
954intel_check_duplex(struct net_device* dev) 1065intel_check_duplex(struct net_device* dev)
955{ 1066{
956 unsigned long data; 1067 unsigned long data;
957 data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); 1068 struct net_local *np = netdev_priv(dev);
1069
1070 data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
1071 MDIO_INT_STATUS_REG_2);
958 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; 1072 full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
959} 1073}
960 1074#endif
961static void 1075static void
962e100_set_duplex(struct net_device* dev, enum duplex new_duplex) 1076e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
963{ 1077{
1078 struct net_local *np = netdev_priv(dev);
1079
1080 spin_lock(&np->transceiver_lock);
964 if (new_duplex != current_duplex) { 1081 if (new_duplex != current_duplex) {
965 current_duplex = new_duplex; 1082 current_duplex = new_duplex;
966 e100_negotiate(dev); 1083 e100_negotiate(dev);
967 } 1084 }
1085 spin_unlock(&np->transceiver_lock);
968} 1086}
969 1087
970static int 1088static int
971e100_probe_transceiver(struct net_device* dev) 1089e100_probe_transceiver(struct net_device* dev)
972{ 1090{
1091 int ret = 0;
1092
1093#if !defined(CONFIG_ETRAX_NO_PHY)
973 unsigned int phyid_high; 1094 unsigned int phyid_high;
974 unsigned int phyid_low; 1095 unsigned int phyid_low;
975 unsigned int oui; 1096 unsigned int oui;
976 struct transceiver_ops* ops = NULL; 1097 struct transceiver_ops* ops = NULL;
1098 struct net_local *np = netdev_priv(dev);
1099
1100 spin_lock(&np->transceiver_lock);
977 1101
978 /* Probe MDIO physical address */ 1102 /* Probe MDIO physical address */
979 for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { 1103 for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
980 if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff) 1104 np->mii_if.phy_id++) {
1105 if (e100_get_mdio_reg(dev,
1106 np->mii_if.phy_id, MII_BMSR) != 0xffff)
981 break; 1107 break;
982 } 1108 }
983 if (mdio_phy_addr == 32) 1109 if (np->mii_if.phy_id == 32) {
984 return -ENODEV; 1110 ret = -ENODEV;
1111 goto out;
1112 }
985 1113
986 /* Get manufacturer */ 1114 /* Get manufacturer */
987 phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1); 1115 phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
988 phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2); 1116 phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
989 oui = (phyid_high << 6) | (phyid_low >> 10); 1117 oui = (phyid_high << 6) | (phyid_low >> 10);
990 1118
991 for (ops = &transceivers[0]; ops->oui; ops++) { 1119 for (ops = &transceivers[0]; ops->oui; ops++) {
@@ -993,8 +1121,10 @@ e100_probe_transceiver(struct net_device* dev)
993 break; 1121 break;
994 } 1122 }
995 transceiver = ops; 1123 transceiver = ops;
996 1124out:
997 return 0; 1125 spin_unlock(&np->transceiver_lock);
1126#endif
1127 return ret;
998} 1128}
999 1129
1000static int 1130static int
@@ -1088,13 +1218,14 @@ e100_receive_mdio_bit()
1088static void 1218static void
1089e100_reset_transceiver(struct net_device* dev) 1219e100_reset_transceiver(struct net_device* dev)
1090{ 1220{
1221 struct net_local *np = netdev_priv(dev);
1091 unsigned short cmd; 1222 unsigned short cmd;
1092 unsigned short data; 1223 unsigned short data;
1093 int bitCounter; 1224 int bitCounter;
1094 1225
1095 data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); 1226 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
1096 1227
1097 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2); 1228 cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
1098 1229
1099 e100_send_mdio_cmd(cmd, 1); 1230 e100_send_mdio_cmd(cmd, 1);
1100 1231
@@ -1112,7 +1243,7 @@ e100_reset_transceiver(struct net_device* dev)
1112static void 1243static void
1113e100_tx_timeout(struct net_device *dev) 1244e100_tx_timeout(struct net_device *dev)
1114{ 1245{
1115 struct net_local *np = (struct net_local *)dev->priv; 1246 struct net_local *np = netdev_priv(dev);
1116 unsigned long flags; 1247 unsigned long flags;
1117 1248
1118 spin_lock_irqsave(&np->lock, flags); 1249 spin_lock_irqsave(&np->lock, flags);
@@ -1134,8 +1265,7 @@ e100_tx_timeout(struct net_device *dev)
1134 e100_reset_transceiver(dev); 1265 e100_reset_transceiver(dev);
1135 1266
1136 /* and get rid of the packets that never got an interrupt */ 1267 /* and get rid of the packets that never got an interrupt */
1137 while (myFirstTxDesc != myNextTxDesc) 1268 while (myFirstTxDesc != myNextTxDesc) {
1138 {
1139 dev_kfree_skb(myFirstTxDesc->skb); 1269 dev_kfree_skb(myFirstTxDesc->skb);
1140 myFirstTxDesc->skb = 0; 1270 myFirstTxDesc->skb = 0;
1141 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 1271 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
@@ -1161,7 +1291,7 @@ e100_tx_timeout(struct net_device *dev)
1161static int 1291static int
1162e100_send_packet(struct sk_buff *skb, struct net_device *dev) 1292e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1163{ 1293{
1164 struct net_local *np = (struct net_local *)dev->priv; 1294 struct net_local *np = netdev_priv(dev);
1165 unsigned char *buf = skb->data; 1295 unsigned char *buf = skb->data;
1166 unsigned long flags; 1296 unsigned long flags;
1167 1297
@@ -1174,7 +1304,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1174 1304
1175 dev->trans_start = jiffies; 1305 dev->trans_start = jiffies;
1176 1306
1177 e100_hardware_send_packet(buf, skb->len); 1307 e100_hardware_send_packet(np, buf, skb->len);
1178 1308
1179 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); 1309 myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
1180 1310
@@ -1197,13 +1327,15 @@ static irqreturn_t
1197e100rxtx_interrupt(int irq, void *dev_id) 1327e100rxtx_interrupt(int irq, void *dev_id)
1198{ 1328{
1199 struct net_device *dev = (struct net_device *)dev_id; 1329 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = (struct net_local *)dev->priv; 1330 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK2_RD; 1331 unsigned long irqbits;
1202 1332
1203 /* Disable RX/TX IRQs to avoid reentrancy */ 1333 /*
1204 *R_IRQ_MASK2_CLR = 1334 * Note that both rx and tx interrupts are blocked at this point,
1205 IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | 1335 * regardless of which got us here.
1206 IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); 1336 */
1337
1338 irqbits = *R_IRQ_MASK2_RD;
1207 1339
1208 /* Handle received packets */ 1340 /* Handle received packets */
1209 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { 1341 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
@@ -1219,7 +1351,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1219 * allocate a new buffer to put a packet in. 1351 * allocate a new buffer to put a packet in.
1220 */ 1352 */
1221 e100_rx(dev); 1353 e100_rx(dev);
1222 ((struct net_local *)dev->priv)->stats.rx_packets++; 1354 np->stats.rx_packets++;
1223 /* restart/continue on the channel, for safety */ 1355 /* restart/continue on the channel, for safety */
1224 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1356 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1225 /* clear dma channel 1 eop/descr irq bits */ 1357 /* clear dma channel 1 eop/descr irq bits */
@@ -1233,9 +1365,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1233 } 1365 }
1234 1366
1235 /* Report any packets that have been sent */ 1367 /* Report any packets that have been sent */
1236 while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && 1368 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1237 myFirstTxDesc != myNextTxDesc) 1369 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1238 {
1239 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1370 np->stats.tx_bytes += myFirstTxDesc->skb->len;
1240 np->stats.tx_packets++; 1371 np->stats.tx_packets++;
1241 1372
@@ -1244,19 +1375,15 @@ e100rxtx_interrupt(int irq, void *dev_id)
1244 dev_kfree_skb_irq(myFirstTxDesc->skb); 1375 dev_kfree_skb_irq(myFirstTxDesc->skb);
1245 myFirstTxDesc->skb = 0; 1376 myFirstTxDesc->skb = 0;
1246 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); 1377 myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
1378 /* Wake up queue. */
1379 netif_wake_queue(dev);
1247 } 1380 }
1248 1381
1249 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { 1382 if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
1250 /* acknowledge the eop interrupt and wake up queue */ 1383 /* acknowledge the eop interrupt. */
1251 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); 1384 *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
1252 netif_wake_queue(dev);
1253 } 1385 }
1254 1386
1255 /* Enable RX/TX IRQs again */
1256 *R_IRQ_MASK2_SET =
1257 IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
1258 IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
1259
1260 return IRQ_HANDLED; 1387 return IRQ_HANDLED;
1261} 1388}
1262 1389
@@ -1264,7 +1391,7 @@ static irqreturn_t
1264e100nw_interrupt(int irq, void *dev_id) 1391e100nw_interrupt(int irq, void *dev_id)
1265{ 1392{
1266 struct net_device *dev = (struct net_device *)dev_id; 1393 struct net_device *dev = (struct net_device *)dev_id;
1267 struct net_local *np = (struct net_local *)dev->priv; 1394 struct net_local *np = netdev_priv(dev);
1268 unsigned long irqbits = *R_IRQ_MASK0_RD; 1395 unsigned long irqbits = *R_IRQ_MASK0_RD;
1269 1396
1270 /* check for underrun irq */ 1397 /* check for underrun irq */
@@ -1286,7 +1413,6 @@ e100nw_interrupt(int irq, void *dev_id)
1286 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1413 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1287 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1414 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1288 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1415 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1289 *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
1290 np->stats.tx_errors++; 1416 np->stats.tx_errors++;
1291 D(printk("ethernet excessive collisions!\n")); 1417 D(printk("ethernet excessive collisions!\n"));
1292 } 1418 }
@@ -1299,12 +1425,13 @@ e100_rx(struct net_device *dev)
1299{ 1425{
1300 struct sk_buff *skb; 1426 struct sk_buff *skb;
1301 int length = 0; 1427 int length = 0;
1302 struct net_local *np = (struct net_local *)dev->priv; 1428 struct net_local *np = netdev_priv(dev);
1303 unsigned char *skb_data_ptr; 1429 unsigned char *skb_data_ptr;
1304#ifdef ETHDEBUG 1430#ifdef ETHDEBUG
1305 int i; 1431 int i;
1306#endif 1432#endif
1307 1433 etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
1434 spin_lock(&np->led_lock);
1308 if (!led_active && time_after(jiffies, led_next_time)) { 1435 if (!led_active && time_after(jiffies, led_next_time)) {
1309 /* light the network leds depending on the current speed. */ 1436 /* light the network leds depending on the current speed. */
1310 e100_set_network_leds(NETWORK_ACTIVITY); 1437 e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1314,9 +1441,10 @@ e100_rx(struct net_device *dev)
1314 led_active = 1; 1441 led_active = 1;
1315 mod_timer(&clear_led_timer, jiffies + HZ/10); 1442 mod_timer(&clear_led_timer, jiffies + HZ/10);
1316 } 1443 }
1444 spin_unlock(&np->led_lock);
1317 1445
1318 length = myNextRxDesc->descr.hw_len - 4; 1446 length = myNextRxDesc->descr.hw_len - 4;
1319 ((struct net_local *)dev->priv)->stats.rx_bytes += length; 1447 np->stats.rx_bytes += length;
1320 1448
1321#ifdef ETHDEBUG 1449#ifdef ETHDEBUG
1322 printk("Got a packet of length %d:\n", length); 1450 printk("Got a packet of length %d:\n", length);
@@ -1336,7 +1464,7 @@ e100_rx(struct net_device *dev)
1336 if (!skb) { 1464 if (!skb) {
1337 np->stats.rx_errors++; 1465 np->stats.rx_errors++;
1338 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1466 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1339 return; 1467 goto update_nextrxdesc;
1340 } 1468 }
1341 1469
1342 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ 1470 skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
@@ -1354,15 +1482,15 @@ e100_rx(struct net_device *dev)
1354 else { 1482 else {
1355 /* Large packet, send directly to upper layers and allocate new 1483 /* Large packet, send directly to upper layers and allocate new
1356 * memory (aligned to cache line boundary to avoid bug). 1484 * memory (aligned to cache line boundary to avoid bug).
1357 * Before sending the skb to upper layers we must make sure that 1485 * Before sending the skb to upper layers we must make sure
1358 * skb->data points to the aligned start of the packet. 1486 * that skb->data points to the aligned start of the packet.
1359 */ 1487 */
1360 int align; 1488 int align;
1361 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1489 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1362 if (!new_skb) { 1490 if (!new_skb) {
1363 np->stats.rx_errors++; 1491 np->stats.rx_errors++;
1364 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1492 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1365 return; 1493 goto update_nextrxdesc;
1366 } 1494 }
1367 skb = myNextRxDesc->skb; 1495 skb = myNextRxDesc->skb;
1368 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; 1496 align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
@@ -1377,9 +1505,10 @@ e100_rx(struct net_device *dev)
1377 /* Send the packet to the upper layers */ 1505 /* Send the packet to the upper layers */
1378 netif_rx(skb); 1506 netif_rx(skb);
1379 1507
1508 update_nextrxdesc:
1380 /* Prepare for next packet */ 1509 /* Prepare for next packet */
1381 myNextRxDesc->descr.status = 0; 1510 myNextRxDesc->descr.status = 0;
1382 myPrevRxDesc = myNextRxDesc; 1511 prevRxDesc = myNextRxDesc;
1383 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); 1512 myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
1384 1513
1385 rx_queue_len++; 1514 rx_queue_len++;
@@ -1387,9 +1516,9 @@ e100_rx(struct net_device *dev)
1387 /* Check if descriptors should be returned */ 1516 /* Check if descriptors should be returned */
1388 if (rx_queue_len == RX_QUEUE_THRESHOLD) { 1517 if (rx_queue_len == RX_QUEUE_THRESHOLD) {
1389 flush_etrax_cache(); 1518 flush_etrax_cache();
1390 myPrevRxDesc->descr.ctrl |= d_eol; 1519 prevRxDesc->descr.ctrl |= d_eol;
1391 myLastRxDesc->descr.ctrl &= ~d_eol; 1520 myLastRxDesc->descr.ctrl &= ~d_eol;
1392 myLastRxDesc = myPrevRxDesc; 1521 myLastRxDesc = prevRxDesc;
1393 rx_queue_len = 0; 1522 rx_queue_len = 0;
1394 } 1523 }
1395} 1524}
@@ -1398,7 +1527,7 @@ e100_rx(struct net_device *dev)
1398static int 1527static int
1399e100_close(struct net_device *dev) 1528e100_close(struct net_device *dev)
1400{ 1529{
1401 struct net_local *np = (struct net_local *)dev->priv; 1530 struct net_local *np = netdev_priv(dev);
1402 1531
1403 printk(KERN_INFO "Closing %s.\n", dev->name); 1532 printk(KERN_INFO "Closing %s.\n", dev->name);
1404 1533
@@ -1426,6 +1555,9 @@ e100_close(struct net_device *dev)
1426 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); 1555 free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
1427 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); 1556 free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
1428 1557
1558 cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
1559 cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
1560
1429 /* Update the statistics here. */ 1561 /* Update the statistics here. */
1430 1562
1431 update_rx_stats(&np->stats); 1563 update_rx_stats(&np->stats);
@@ -1443,18 +1575,11 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1443{ 1575{
1444 struct mii_ioctl_data *data = if_mii(ifr); 1576 struct mii_ioctl_data *data = if_mii(ifr);
1445 struct net_local *np = netdev_priv(dev); 1577 struct net_local *np = netdev_priv(dev);
1578 int rc = 0;
1579 int old_autoneg;
1446 1580
1447 spin_lock(&np->lock); /* Preempt protection */ 1581 spin_lock(&np->lock); /* Preempt protection */
1448 switch (cmd) { 1582 switch (cmd) {
1449 case SIOCGMIIPHY: /* Get PHY address */
1450 data->phy_id = mdio_phy_addr;
1451 break;
1452 case SIOCGMIIREG: /* Read MII register */
1453 data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
1454 break;
1455 case SIOCSMIIREG: /* Write MII register */
1456 e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
1457 break;
1458 /* The ioctls below should be considered obsolete but are */ 1583 /* The ioctls below should be considered obsolete but are */
1459 /* still present for compatability with old scripts/apps */ 1584 /* still present for compatability with old scripts/apps */
1460 case SET_ETH_SPEED_10: /* 10 Mbps */ 1585 case SET_ETH_SPEED_10: /* 10 Mbps */
@@ -1463,60 +1588,47 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1463 case SET_ETH_SPEED_100: /* 100 Mbps */ 1588 case SET_ETH_SPEED_100: /* 100 Mbps */
1464 e100_set_speed(dev, 100); 1589 e100_set_speed(dev, 100);
1465 break; 1590 break;
1466 case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */ 1591 case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
1467 e100_set_speed(dev, 0); 1592 e100_set_speed(dev, 0);
1468 break; 1593 break;
1469 case SET_ETH_DUPLEX_HALF: /* Half duplex. */ 1594 case SET_ETH_DUPLEX_HALF: /* Half duplex */
1470 e100_set_duplex(dev, half); 1595 e100_set_duplex(dev, half);
1471 break; 1596 break;
1472 case SET_ETH_DUPLEX_FULL: /* Full duplex. */ 1597 case SET_ETH_DUPLEX_FULL: /* Full duplex */
1473 e100_set_duplex(dev, full); 1598 e100_set_duplex(dev, full);
1474 break; 1599 break;
1475 case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/ 1600 case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
1476 e100_set_duplex(dev, autoneg); 1601 e100_set_duplex(dev, autoneg);
1477 break; 1602 break;
1603 case SET_ETH_AUTONEG:
1604 old_autoneg = autoneg_normal;
1605 autoneg_normal = *(int*)data;
1606 if (autoneg_normal != old_autoneg)
1607 e100_negotiate(dev);
1608 break;
1478 default: 1609 default:
1479 return -EINVAL; 1610 rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
1611 cmd, NULL);
1612 break;
1480 } 1613 }
1481 spin_unlock(&np->lock); 1614 spin_unlock(&np->lock);
1482 return 0; 1615 return rc;
1483} 1616}
1484 1617
1485static int e100_set_settings(struct net_device *dev, 1618static int e100_get_settings(struct net_device *dev,
1486 struct ethtool_cmd *ecmd) 1619 struct ethtool_cmd *cmd)
1487{ 1620{
1488 ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | 1621 struct net_local *np = netdev_priv(dev);
1489 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1622 int err;
1490 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
1491 ecmd->port = PORT_TP;
1492 ecmd->transceiver = XCVR_EXTERNAL;
1493 ecmd->phy_address = mdio_phy_addr;
1494 ecmd->speed = current_speed;
1495 ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1496 ecmd->advertising = ADVERTISED_TP;
1497 1623
1498 if (current_duplex == autoneg && current_speed_selection == 0) 1624 spin_lock_irq(&np->lock);
1499 ecmd->advertising |= ADVERTISED_Autoneg; 1625 err = mii_ethtool_gset(&np->mii_if, cmd);
1500 else { 1626 spin_unlock_irq(&np->lock);
1501 ecmd->advertising |=
1502 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1503 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1504 if (current_speed_selection == 10)
1505 ecmd->advertising &= ~(ADVERTISED_100baseT_Half |
1506 ADVERTISED_100baseT_Full);
1507 else if (current_speed_selection == 100)
1508 ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
1509 ADVERTISED_10baseT_Full);
1510 if (current_duplex == half)
1511 ecmd->advertising &= ~(ADVERTISED_10baseT_Full |
1512 ADVERTISED_100baseT_Full);
1513 else if (current_duplex == full)
1514 ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
1515 ADVERTISED_100baseT_Half);
1516 }
1517 1627
1518 ecmd->autoneg = AUTONEG_ENABLE; 1628 /* The PHY may support 1000baseT, but the Etrax100 does not. */
1519 return 0; 1629 cmd->supported &= ~(SUPPORTED_1000baseT_Half
1630 | SUPPORTED_1000baseT_Full);
1631 return err;
1520} 1632}
1521 1633
1522static int e100_set_settings(struct net_device *dev, 1634static int e100_set_settings(struct net_device *dev,
@@ -1560,7 +1672,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
1560static int 1672static int
1561e100_set_config(struct net_device *dev, struct ifmap *map) 1673e100_set_config(struct net_device *dev, struct ifmap *map)
1562{ 1674{
1563 struct net_local *np = (struct net_local *)dev->priv; 1675 struct net_local *np = netdev_priv(dev);
1676
1564 spin_lock(&np->lock); /* Preempt protection */ 1677 spin_lock(&np->lock); /* Preempt protection */
1565 1678
1566 switch(map->port) { 1679 switch(map->port) {
@@ -1612,7 +1725,6 @@ update_tx_stats(struct net_device_stats *es)
1612 es->collisions += 1725 es->collisions +=
1613 IO_EXTRACT(R_TR_COUNTERS, single_col, r) + 1726 IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
1614 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); 1727 IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
1615 es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
1616} 1728}
1617 1729
1618/* 1730/*
@@ -1622,8 +1734,9 @@ update_tx_stats(struct net_device_stats *es)
1622static struct net_device_stats * 1734static struct net_device_stats *
1623e100_get_stats(struct net_device *dev) 1735e100_get_stats(struct net_device *dev)
1624{ 1736{
1625 struct net_local *lp = (struct net_local *)dev->priv; 1737 struct net_local *lp = netdev_priv(dev);
1626 unsigned long flags; 1738 unsigned long flags;
1739
1627 spin_lock_irqsave(&lp->lock, flags); 1740 spin_lock_irqsave(&lp->lock, flags);
1628 1741
1629 update_rx_stats(&lp->stats); 1742 update_rx_stats(&lp->stats);
@@ -1643,13 +1756,13 @@ e100_get_stats(struct net_device *dev)
1643static void 1756static void
1644set_multicast_list(struct net_device *dev) 1757set_multicast_list(struct net_device *dev)
1645{ 1758{
1646 struct net_local *lp = (struct net_local *)dev->priv; 1759 struct net_local *lp = netdev_priv(dev);
1647 int num_addr = dev->mc_count; 1760 int num_addr = dev->mc_count;
1648 unsigned long int lo_bits; 1761 unsigned long int lo_bits;
1649 unsigned long int hi_bits; 1762 unsigned long int hi_bits;
1763
1650 spin_lock(&lp->lock); 1764 spin_lock(&lp->lock);
1651 if (dev->flags & IFF_PROMISC) 1765 if (dev->flags & IFF_PROMISC) {
1652 {
1653 /* promiscuous mode */ 1766 /* promiscuous mode */
1654 lo_bits = 0xfffffffful; 1767 lo_bits = 0xfffffffful;
1655 hi_bits = 0xfffffffful; 1768 hi_bits = 0xfffffffful;
@@ -1679,9 +1792,10 @@ set_multicast_list(struct net_device *dev)
1679 struct dev_mc_list *dmi = dev->mc_list; 1792 struct dev_mc_list *dmi = dev->mc_list;
1680 int i; 1793 int i;
1681 char *baddr; 1794 char *baddr;
1795
1682 lo_bits = 0x00000000ul; 1796 lo_bits = 0x00000000ul;
1683 hi_bits = 0x00000000ul; 1797 hi_bits = 0x00000000ul;
1684 for (i=0; i<num_addr; i++) { 1798 for (i = 0; i < num_addr; i++) {
1685 /* Calculate the hash index for the GA registers */ 1799 /* Calculate the hash index for the GA registers */
1686 1800
1687 hash_ix = 0; 1801 hash_ix = 0;
@@ -1708,8 +1822,7 @@ set_multicast_list(struct net_device *dev)
1708 1822
1709 if (hash_ix >= 32) { 1823 if (hash_ix >= 32) {
1710 hi_bits |= (1 << (hash_ix-32)); 1824 hi_bits |= (1 << (hash_ix-32));
1711 } 1825 } else {
1712 else {
1713 lo_bits |= (1 << hash_ix); 1826 lo_bits |= (1 << hash_ix);
1714 } 1827 }
1715 dmi = dmi->next; 1828 dmi = dmi->next;
@@ -1724,10 +1837,11 @@ set_multicast_list(struct net_device *dev)
1724} 1837}
1725 1838
1726void 1839void
1727e100_hardware_send_packet(char *buf, int length) 1840e100_hardware_send_packet(struct net_local *np, char *buf, int length)
1728{ 1841{
1729 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); 1842 D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
1730 1843
1844 spin_lock(&np->led_lock);
1731 if (!led_active && time_after(jiffies, led_next_time)) { 1845 if (!led_active && time_after(jiffies, led_next_time)) {
1732 /* light the network leds depending on the current speed. */ 1846 /* light the network leds depending on the current speed. */
1733 e100_set_network_leds(NETWORK_ACTIVITY); 1847 e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1737,6 +1851,7 @@ e100_hardware_send_packet(char *buf, int length)
1737 led_active = 1; 1851 led_active = 1;
1738 mod_timer(&clear_led_timer, jiffies + HZ/10); 1852 mod_timer(&clear_led_timer, jiffies + HZ/10);
1739 } 1853 }
1854 spin_unlock(&np->led_lock);
1740 1855
1741 /* configure the tx dma descriptor */ 1856 /* configure the tx dma descriptor */
1742 myNextTxDesc->descr.sw_len = length; 1857 myNextTxDesc->descr.sw_len = length;
@@ -1754,6 +1869,11 @@ e100_hardware_send_packet(char *buf, int length)
1754static void 1869static void
1755e100_clear_network_leds(unsigned long dummy) 1870e100_clear_network_leds(unsigned long dummy)
1756{ 1871{
1872 struct net_device *dev = (struct net_device *)dummy;
1873 struct net_local *np = netdev_priv(dev);
1874
1875 spin_lock(&np->led_lock);
1876
1757 if (led_active && time_after(jiffies, led_next_time)) { 1877 if (led_active && time_after(jiffies, led_next_time)) {
1758 e100_set_network_leds(NO_NETWORK_ACTIVITY); 1878 e100_set_network_leds(NO_NETWORK_ACTIVITY);
1759 1879
@@ -1761,6 +1881,8 @@ e100_clear_network_leds(unsigned long dummy)
1761 led_next_time = jiffies + NET_FLASH_PAUSE; 1881 led_next_time = jiffies + NET_FLASH_PAUSE;
1762 led_active = 0; 1882 led_active = 0;
1763 } 1883 }
1884
1885 spin_unlock(&np->led_lock);
1764} 1886}
1765 1887
1766static void 1888static void
@@ -1781,19 +1903,25 @@ e100_set_network_leds(int active)
1781#else 1903#else
1782 LED_NETWORK_SET(LED_OFF); 1904 LED_NETWORK_SET(LED_OFF);
1783#endif 1905#endif
1784 } 1906 } else if (light_leds) {
1785 else if (light_leds) {
1786 if (current_speed == 10) { 1907 if (current_speed == 10) {
1787 LED_NETWORK_SET(LED_ORANGE); 1908 LED_NETWORK_SET(LED_ORANGE);
1788 } else { 1909 } else {
1789 LED_NETWORK_SET(LED_GREEN); 1910 LED_NETWORK_SET(LED_GREEN);
1790 } 1911 }
1791 } 1912 } else {
1792 else {
1793 LED_NETWORK_SET(LED_OFF); 1913 LED_NETWORK_SET(LED_OFF);
1794 } 1914 }
1795} 1915}
1796 1916
1917#ifdef CONFIG_NET_POLL_CONTROLLER
1918static void
1919e100_netpoll(struct net_device* netdev)
1920{
1921 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
1922}
1923#endif
1924
1797static int 1925static int
1798etrax_init_module(void) 1926etrax_init_module(void)
1799{ 1927{
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a83c3db7d18f..c93d3d2640ab 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -64,6 +64,8 @@ int alloc_cpu_buffers(void)
64 b->head_pos = 0; 64 b->head_pos = 0;
65 b->sample_received = 0; 65 b->sample_received = 0;
66 b->sample_lost_overflow = 0; 66 b->sample_lost_overflow = 0;
67 b->backtrace_aborted = 0;
68 b->sample_invalid_eip = 0;
67 b->cpu = i; 69 b->cpu = i;
68 INIT_DELAYED_WORK(&b->work, wq_sync_buffer); 70 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
69 } 71 }
@@ -175,6 +177,11 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
175 177
176 cpu_buf->sample_received++; 178 cpu_buf->sample_received++;
177 179
180 if (pc == ESCAPE_CODE) {
181 cpu_buf->sample_invalid_eip++;
182 return 0;
183 }
184
178 if (nr_available_slots(cpu_buf) < 3) { 185 if (nr_available_slots(cpu_buf) < 3) {
179 cpu_buf->sample_lost_overflow++; 186 cpu_buf->sample_lost_overflow++;
180 return 0; 187 return 0;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 49900d9e3235..c66c025abe75 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -42,6 +42,7 @@ struct oprofile_cpu_buffer {
42 unsigned long sample_received; 42 unsigned long sample_received;
43 unsigned long sample_lost_overflow; 43 unsigned long sample_lost_overflow;
44 unsigned long backtrace_aborted; 44 unsigned long backtrace_aborted;
45 unsigned long sample_invalid_eip;
45 int cpu; 46 int cpu;
46 struct delayed_work work; 47 struct delayed_work work;
47} ____cacheline_aligned; 48} ____cacheline_aligned;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f0acb661c253..d1f6d776e9e4 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -26,6 +26,8 @@ void oprofile_reset_stats(void)
26 cpu_buf = &cpu_buffer[i]; 26 cpu_buf = &cpu_buffer[i];
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
28 cpu_buf->sample_lost_overflow = 0; 28 cpu_buf->sample_lost_overflow = 0;
29 cpu_buf->backtrace_aborted = 0;
30 cpu_buf->sample_invalid_eip = 0;
29 } 31 }
30 32
31 atomic_set(&oprofile_stats.sample_lost_no_mm, 0); 33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
@@ -61,6 +63,8 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
61 &cpu_buf->sample_lost_overflow); 63 &cpu_buf->sample_lost_overflow);
62 oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", 64 oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
63 &cpu_buf->backtrace_aborted); 65 &cpu_buf->backtrace_aborted);
66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
67 &cpu_buf->sample_invalid_eip);
64 } 68 }
65 69
66 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", 70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cbde770eb121..e5cdc0294aaa 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -36,7 +36,9 @@ config RTC_HCTOSYS_DEVICE
36 help 36 help
37 The RTC device that will be used to (re)initialize the system 37 The RTC device that will be used to (re)initialize the system
38 clock, usually rtc0. Initialization is done when the system 38 clock, usually rtc0. Initialization is done when the system
39 starts up, and when it resumes from a low power state. 39 starts up, and when it resumes from a low power state. This
40 device should record time in UTC, since the kernel won't do
41 timezone correction.
40 42
41 The driver for this RTC device must be loaded before late_initcall 43 The driver for this RTC device must be loaded before late_initcall
42 functions run, so it must usually be statically linked. 44 functions run, so it must usually be statically linked.
@@ -133,8 +135,8 @@ config RTC_DRV_DS1307
133 135
134 The first seven registers on these chips hold an RTC, and other 136 The first seven registers on these chips hold an RTC, and other
135 registers may add features such as NVRAM, a trickle charger for 137 registers may add features such as NVRAM, a trickle charger for
136 the RTC/NVRAM backup power, and alarms. This driver may not 138 the RTC/NVRAM backup power, and alarms. NVRAM is visible in
137 expose all those available chip features. 139 sysfs, but other chip features may not be available.
138 140
139 This driver can also be built as a module. If so, the module 141 This driver can also be built as a module. If so, the module
140 will be called rtc-ds1307. 142 will be called rtc-ds1307.
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 178527252c6a..33c0e98243ee 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -47,8 +47,8 @@ static int __init rtc_hctosys(void)
47 do_settimeofday(&tv); 47 do_settimeofday(&tv);
48 48
49 dev_info(rtc->dev.parent, 49 dev_info(rtc->dev.parent,
50 "setting the system clock to " 50 "setting system clock to "
51 "%d-%02d-%02d %02d:%02d:%02d (%u)\n", 51 "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n",
52 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 52 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
53 tm.tm_hour, tm.tm_min, tm.tm_sec, 53 tm.tm_hour, tm.tm_min, tm.tm_sec,
54 (unsigned int) tv.tv_sec); 54 (unsigned int) tv.tv_sec);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index db6f3f0d8982..bc1c7fe94ad3 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -89,6 +89,7 @@ enum ds_type {
89 89
90struct ds1307 { 90struct ds1307 {
91 u8 reg_addr; 91 u8 reg_addr;
92 bool has_nvram;
92 u8 regs[8]; 93 u8 regs[8];
93 enum ds_type type; 94 enum ds_type type;
94 struct i2c_msg msg[2]; 95 struct i2c_msg msg[2];
@@ -242,6 +243,87 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
242 .set_time = ds1307_set_time, 243 .set_time = ds1307_set_time,
243}; 244};
244 245
246/*----------------------------------------------------------------------*/
247
248#define NVRAM_SIZE 56
249
250static ssize_t
251ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
252 char *buf, loff_t off, size_t count)
253{
254 struct i2c_client *client;
255 struct ds1307 *ds1307;
256 struct i2c_msg msg[2];
257 int result;
258
259 client = to_i2c_client(container_of(kobj, struct device, kobj));
260 ds1307 = i2c_get_clientdata(client);
261
262 if (unlikely(off >= NVRAM_SIZE))
263 return 0;
264 if ((off + count) > NVRAM_SIZE)
265 count = NVRAM_SIZE - off;
266 if (unlikely(!count))
267 return count;
268
269 msg[0].addr = client->addr;
270 msg[0].flags = 0;
271 msg[0].len = 1;
272 msg[0].buf = buf;
273
274 buf[0] = 8 + off;
275
276 msg[1].addr = client->addr;
277 msg[1].flags = I2C_M_RD;
278 msg[1].len = count;
279 msg[1].buf = buf;
280
281 result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2);
282 if (result != 2) {
283 dev_err(&client->dev, "%s error %d\n", "nvram read", result);
284 return -EIO;
285 }
286 return count;
287}
288
289static ssize_t
290ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
291 char *buf, loff_t off, size_t count)
292{
293 struct i2c_client *client;
294 u8 buffer[NVRAM_SIZE + 1];
295 int ret;
296
297 client = to_i2c_client(container_of(kobj, struct device, kobj));
298
299 if (unlikely(off >= NVRAM_SIZE))
300 return -EFBIG;
301 if ((off + count) > NVRAM_SIZE)
302 count = NVRAM_SIZE - off;
303 if (unlikely(!count))
304 return count;
305
306 buffer[0] = 8 + off;
307 memcpy(buffer + 1, buf, count);
308
309 ret = i2c_master_send(client, buffer, count + 1);
310 return (ret < 0) ? ret : (ret - 1);
311}
312
313static struct bin_attribute nvram = {
314 .attr = {
315 .name = "nvram",
316 .mode = S_IRUGO | S_IWUSR,
317 .owner = THIS_MODULE,
318 },
319
320 .read = ds1307_nvram_read,
321 .write = ds1307_nvram_write,
322 .size = NVRAM_SIZE,
323};
324
325/*----------------------------------------------------------------------*/
326
245static struct i2c_driver ds1307_driver; 327static struct i2c_driver ds1307_driver;
246 328
247static int __devinit ds1307_probe(struct i2c_client *client) 329static int __devinit ds1307_probe(struct i2c_client *client)
@@ -413,6 +495,14 @@ read_rtc:
413 goto exit_free; 495 goto exit_free;
414 } 496 }
415 497
498 if (chip->nvram56) {
499 err = sysfs_create_bin_file(&client->dev.kobj, &nvram);
500 if (err == 0) {
501 ds1307->has_nvram = true;
502 dev_info(&client->dev, "56 bytes nvram\n");
503 }
504 }
505
416 return 0; 506 return 0;
417 507
418exit_bad: 508exit_bad:
@@ -432,6 +522,9 @@ static int __devexit ds1307_remove(struct i2c_client *client)
432{ 522{
433 struct ds1307 *ds1307 = i2c_get_clientdata(client); 523 struct ds1307 *ds1307 = i2c_get_clientdata(client);
434 524
525 if (ds1307->has_nvram)
526 sysfs_remove_bin_file(&client->dev.kobj, &nvram);
527
435 rtc_device_unregister(ds1307->rtc); 528 rtc_device_unregister(ds1307->rtc);
436 kfree(ds1307); 529 kfree(ds1307);
437 return 0; 530 return 0;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index bb53c09bad16..d9e848dcd450 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -291,7 +291,7 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
291static struct bin_attribute ds1553_nvram_attr = { 291static struct bin_attribute ds1553_nvram_attr = {
292 .attr = { 292 .attr = {
293 .name = "nvram", 293 .name = "nvram",
294 .mode = S_IRUGO | S_IWUGO, 294 .mode = S_IRUGO | S_IWUSR,
295 }, 295 },
296 .size = RTC_OFFSET, 296 .size = RTC_OFFSET,
297 .read = ds1553_nvram_read, 297 .read = ds1553_nvram_read,
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index c535b78698e2..2e73f0b183b2 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -160,10 +160,13 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
160static struct bin_attribute ds1742_nvram_attr = { 160static struct bin_attribute ds1742_nvram_attr = {
161 .attr = { 161 .attr = {
162 .name = "nvram", 162 .name = "nvram",
163 .mode = S_IRUGO | S_IWUGO, 163 .mode = S_IRUGO | S_IWUSR,
164 }, 164 },
165 .read = ds1742_nvram_read, 165 .read = ds1742_nvram_read,
166 .write = ds1742_nvram_write, 166 .write = ds1742_nvram_write,
167 /* REVISIT: size in sysfs won't match actual size... if it's
168 * not a constant, each RTC should have its own attribute.
169 */
167}; 170};
168 171
169static int __devinit ds1742_rtc_probe(struct platform_device *pdev) 172static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 2bad1637330a..cd0bbc0e8038 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -353,11 +353,12 @@ static ssize_t m48t59_nvram_write(struct kobject *kobj,
353static struct bin_attribute m48t59_nvram_attr = { 353static struct bin_attribute m48t59_nvram_attr = {
354 .attr = { 354 .attr = {
355 .name = "nvram", 355 .name = "nvram",
356 .mode = S_IRUGO | S_IWUGO, 356 .mode = S_IRUGO | S_IWUSR,
357 .owner = THIS_MODULE, 357 .owner = THIS_MODULE,
358 }, 358 },
359 .read = m48t59_nvram_read, 359 .read = m48t59_nvram_read,
360 .write = m48t59_nvram_write, 360 .write = m48t59_nvram_write,
361 .size = M48T59_NVRAM_SIZE,
361}; 362};
362 363
363static int __devinit m48t59_rtc_probe(struct platform_device *pdev) 364static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 8288b6b2bf2b..a265da7c6ff8 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -291,7 +291,7 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
291static struct bin_attribute stk17ta8_nvram_attr = { 291static struct bin_attribute stk17ta8_nvram_attr = {
292 .attr = { 292 .attr = {
293 .name = "nvram", 293 .name = "nvram",
294 .mode = S_IRUGO | S_IWUGO, 294 .mode = S_IRUGO | S_IWUSR,
295 .owner = THIS_MODULE, 295 .owner = THIS_MODULE,
296 }, 296 },
297 .size = RTC_OFFSET, 297 .size = RTC_OFFSET,
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 5b0932f61473..06509bff71f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -377,7 +377,7 @@ out:
377 377
378#define FLASH_RESET 0xF0 378#define FLASH_RESET 0xF0
379 379
380#define FLASH_SIZE 0x200000 380#define ASD_FLASH_SIZE 0x200000
381#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " 381#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
382#define FLASH_NEXT_ENTRY_OFFS 0x2000 382#define FLASH_NEXT_ENTRY_OFFS 0x2000
383#define FLASH_MAX_DIR_ENTRIES 32 383#define FLASH_MAX_DIR_ENTRIES 32
@@ -609,7 +609,7 @@ static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
609 struct asd_flash_dir *flash_dir) 609 struct asd_flash_dir *flash_dir)
610{ 610{
611 u32 v; 611 u32 v;
612 for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { 612 for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
613 asd_read_flash_seg(asd_ha, flash_dir, v, 613 asd_read_flash_seg(asd_ha, flash_dir, v,
614 sizeof(FLASH_DIR_COOKIE)-1); 614 sizeof(FLASH_DIR_COOKIE)-1);
615 if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, 615 if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 926f58a674a1..1de098e75497 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -69,6 +69,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
69 { "CTL3001", 0 }, 69 { "CTL3001", 0 },
70 /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ 70 /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
71 { "CTL3011", 0 }, 71 { "CTL3011", 0 },
72 /* Davicom ISA 33.6K Modem */
73 { "DAV0336", 0 },
72 /* Creative */ 74 /* Creative */
73 /* Creative Modem Blaster Flash56 DI5601-1 */ 75 /* Creative Modem Blaster Flash56 DI5601-1 */
74 { "DMB1032", 0 }, 76 { "DMB1032", 0 },
@@ -345,6 +347,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
345 /* Fujitsu Wacom Tablet PC devices */ 347 /* Fujitsu Wacom Tablet PC devices */
346 { "FUJ02E5", 0 }, 348 { "FUJ02E5", 0 },
347 { "FUJ02E6", 0 }, 349 { "FUJ02E6", 0 },
350 /*
351 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
352 * disguise)
353 */
354 { "LTS0001", 0 },
348 /* Rockwell's (PORALiNK) 33600 INT PNP */ 355 /* Rockwell's (PORALiNK) 33600 INT PNP */
349 { "WCI0003", 0 }, 356 { "WCI0003", 0 },
350 /* Unkown PnP modems */ 357 /* Unkown PnP modems */
@@ -432,7 +439,8 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
432 } 439 }
433 440
434 memset(&port, 0, sizeof(struct uart_port)); 441 memset(&port, 0, sizeof(struct uart_port));
435 port.irq = pnp_irq(dev, 0); 442 if (pnp_irq_valid(dev, 0))
443 port.irq = pnp_irq(dev, 0);
436 if (pnp_port_valid(dev, 0)) { 444 if (pnp_port_valid(dev, 0)) {
437 port.iobase = pnp_port_start(dev, 0); 445 port.iobase = pnp_port_start(dev, 0);
438 port.iotype = UPIO_PORT; 446 port.iotype = UPIO_PORT;
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 4d6b3c56d20e..111da57f5334 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -204,8 +204,6 @@ static u_int atmel_get_mctrl(struct uart_port *port)
204 */ 204 */
205static void atmel_stop_tx(struct uart_port *port) 205static void atmel_stop_tx(struct uart_port *port)
206{ 206{
207 struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
208
209 UART_PUT_IDR(port, ATMEL_US_TXRDY); 207 UART_PUT_IDR(port, ATMEL_US_TXRDY);
210} 208}
211 209
@@ -214,8 +212,6 @@ static void atmel_stop_tx(struct uart_port *port)
214 */ 212 */
215static void atmel_start_tx(struct uart_port *port) 213static void atmel_start_tx(struct uart_port *port)
216{ 214{
217 struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
218
219 UART_PUT_IER(port, ATMEL_US_TXRDY); 215 UART_PUT_IER(port, ATMEL_US_TXRDY);
220} 216}
221 217
@@ -224,8 +220,6 @@ static void atmel_start_tx(struct uart_port *port)
224 */ 220 */
225static void atmel_stop_rx(struct uart_port *port) 221static void atmel_stop_rx(struct uart_port *port)
226{ 222{
227 struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
228
229 UART_PUT_IDR(port, ATMEL_US_RXRDY); 223 UART_PUT_IDR(port, ATMEL_US_RXRDY);
230} 224}
231 225
@@ -409,7 +403,6 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
409 */ 403 */
410static int atmel_startup(struct uart_port *port) 404static int atmel_startup(struct uart_port *port)
411{ 405{
412 struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
413 int retval; 406 int retval;
414 407
415 /* 408 /*
@@ -456,8 +449,6 @@ static int atmel_startup(struct uart_port *port)
456 */ 449 */
457static void atmel_shutdown(struct uart_port *port) 450static void atmel_shutdown(struct uart_port *port)
458{ 451{
459 struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
460
461 /* 452 /*
462 * Disable all interrupts, port and break condition. 453 * Disable all interrupts, port and break condition.
463 */ 454 */
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index f523cdf4b02b..a4e23cf47906 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -1,426 +1,10 @@
1/* $Id: serial.c,v 1.25 2004/09/29 10:33:49 starvik Exp $ 1/*
2 *
3 * Serial port driver for the ETRAX 100LX chip 2 * Serial port driver for the ETRAX 100LX chip
4 * 3 *
5 * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Axis Communications AB 4 * Copyright (C) 1998-2007 Axis Communications AB
6 * 5 *
7 * Many, many authors. Based once upon a time on serial.c for 16x50. 6 * Many, many authors. Based once upon a time on serial.c for 16x50.
8 * 7 *
9 * $Log: serial.c,v $
10 * Revision 1.25 2004/09/29 10:33:49 starvik
11 * Resolved a dealock when printing debug from kernel.
12 *
13 * Revision 1.24 2004/08/27 23:25:59 johana
14 * rs_set_termios() must call change_speed() if c_iflag has changed or
15 * automatic XOFF handling will be enabled and transmitter will stop
16 * if 0x13 is received.
17 *
18 * Revision 1.23 2004/08/24 06:57:13 starvik
19 * More whitespace cleanup
20 *
21 * Revision 1.22 2004/08/24 06:12:20 starvik
22 * Whitespace cleanup
23 *
24 * Revision 1.20 2004/05/24 12:00:20 starvik
25 * Big merge of stuff from Linux 2.4 (e.g. manual mode for the serial port).
26 *
27 * Revision 1.19 2004/05/17 13:12:15 starvik
28 * Kernel console hook
29 * Big merge from Linux 2.4 still pending.
30 *
31 * Revision 1.18 2003/10/28 07:18:30 starvik
32 * Compiles with debug info
33 *
34 * Revision 1.17 2003/07/04 08:27:37 starvik
35 * Merge of Linux 2.5.74
36 *
37 * Revision 1.16 2003/06/13 10:05:19 johana
38 * Help the user to avoid trouble by:
39 * Forcing mixed mode for status/control lines if not all pins are used.
40 *
41 * Revision 1.15 2003/06/13 09:43:01 johana
42 * Merged in the following changes from os/linux/arch/cris/drivers/serial.c
43 * + some minor changes to reduce diff.
44 *
45 * Revision 1.49 2003/05/30 11:31:54 johana
46 * Merged in change-branch--serial9bit that adds CMSPAR support for sticky
47 * parity (mark/space)
48 *
49 * Revision 1.48 2003/05/30 11:03:57 johana
50 * Implemented rs_send_xchar() by disabling the DMA and writing manually.
51 * Added e100_disable_txdma_channel() and e100_enable_txdma_channel().
52 * Fixed rs_throttle() and rs_unthrottle() to properly call rs_send_xchar
53 * instead of setting info->x_char and check the CRTSCTS flag before
54 * controlling the rts pin.
55 *
56 * Revision 1.14 2003/04/09 08:12:44 pkj
57 * Corrected typo changes made upstream.
58 *
59 * Revision 1.13 2003/04/09 05:20:47 starvik
60 * Merge of Linux 2.5.67
61 *
62 * Revision 1.11 2003/01/22 06:48:37 starvik
63 * Fixed warnings issued by GCC 3.2.1
64 *
65 * Revision 1.9 2002/12/13 09:07:47 starvik
66 * Alert user that RX_TIMEOUT_TICKS==0 doesn't work
67 *
68 * Revision 1.8 2002/12/11 13:13:57 starvik
69 * Added arch/ to v10 specific includes
70 * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer)
71 *
72 * Revision 1.7 2002/12/06 07:13:57 starvik
73 * Corrected work queue stuff
74 * Removed CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
75 *
76 * Revision 1.6 2002/11/21 07:17:46 starvik
77 * Change static inline to extern inline where otherwise outlined with gcc-3.2
78 *
79 * Revision 1.5 2002/11/14 15:59:49 starvik
80 * Linux 2.5 port of the latest serial driver from 2.4. The work queue stuff
81 * probably doesn't work yet.
82 *
83 * Revision 1.42 2002/11/05 09:08:47 johana
84 * Better implementation of rs_stop() and rs_start() that uses the XOFF
85 * register to start/stop transmission.
86 * change_speed() also initilises XOFF register correctly so that
87 * auto_xoff is enabled when IXON flag is set by user.
88 * This gives fast XOFF response times.
89 *
90 * Revision 1.41 2002/11/04 18:40:57 johana
91 * Implemented rs_stop() and rs_start().
92 * Simple tests using hwtestserial indicates that this should be enough
93 * to make it work.
94 *
95 * Revision 1.40 2002/10/14 05:33:18 starvik
96 * RS-485 uses fast timers even if SERIAL_FAST_TIMER is disabled
97 *
98 * Revision 1.39 2002/09/30 21:00:57 johana
99 * Support for CONFIG_ETRAX_SERx_DTR_RI_DSR_CD_MIXED where the status and
100 * control pins can be mixed between PA and PB.
101 * If no serial port uses MIXED old solution is used
102 * (saves a few bytes and cycles).
103 * control_pins struct uses masks instead of bit numbers.
104 * Corrected dummy values and polarity in line_info() so
105 * /proc/tty/driver/serial is now correct.
106 * (the E100_xxx_GET() macros is really active low - perhaps not obvious)
107 *
108 * Revision 1.38 2002/08/23 11:01:36 starvik
109 * Check that serial port is enabled in all interrupt handlers to avoid
110 * restarts of DMA channels not assigned to serial ports
111 *
112 * Revision 1.37 2002/08/13 13:02:37 bjornw
113 * Removed some warnings because of unused code
114 *
115 * Revision 1.36 2002/08/08 12:50:01 starvik
116 * Serial interrupt is shared with synchronous serial port driver
117 *
118 * Revision 1.35 2002/06/03 10:40:49 starvik
119 * Increased RS-485 RTS toggle timer to 2 characters
120 *
121 * Revision 1.34 2002/05/28 18:59:36 johana
122 * Whitespace and comment fixing to be more like etrax100ser.c 1.71.
123 *
124 * Revision 1.33 2002/05/28 17:55:43 johana
125 * RS-485 uses FAST_TIMER if enabled, and starts a short (one char time)
126 * timer from tranismit_chars (interrupt context).
127 * The timer toggles RTS in interrupt context when expired giving minimum
128 * latencies.
129 *
130 * Revision 1.32 2002/05/22 13:58:00 johana
131 * Renamed rs_write() to raw_write() and made it inline.
132 * New rs_write() handles RS-485 if configured and enabled
133 * (moved code from e100_write_rs485()).
134 * RS-485 ioctl's uses copy_from_user() instead of verify_area().
135 *
136 * Revision 1.31 2002/04/22 11:20:03 johana
137 * Updated copyright years.
138 *
139 * Revision 1.30 2002/04/22 09:39:12 johana
140 * RS-485 support compiles.
141 *
142 * Revision 1.29 2002/01/14 16:10:01 pkj
143 * Allocate the receive buffers dynamically. The static 4kB buffer was
144 * too small for the peaks. This means that we can get rid of the extra
145 * buffer and the copying to it. It also means we require less memory
146 * under normal operations, but can use more when needed (there is a
147 * cap at 64kB for safety reasons). If there is no memory available
148 * we panic(), and die a horrible death...
149 *
150 * Revision 1.28 2001/12/18 15:04:53 johana
151 * Cleaned up write_rs485() - now it works correctly without padding extra
152 * char.
153 * Added sane default initialisation of rs485.
154 * Added #ifdef around dummy variables.
155 *
156 * Revision 1.27 2001/11/29 17:00:41 pkj
157 * 2kB seems to be too small a buffer when using 921600 bps,
158 * so increase it to 4kB (this was already done for the elinux
159 * version of the serial driver).
160 *
161 * Revision 1.26 2001/11/19 14:20:41 pkj
162 * Minor changes to comments and unused code.
163 *
164 * Revision 1.25 2001/11/12 20:03:43 pkj
165 * Fixed compiler warnings.
166 *
167 * Revision 1.24 2001/11/12 15:10:05 pkj
168 * Total redesign of the receiving part of the serial driver.
169 * Uses eight chained descriptors to write to a 4kB buffer.
170 * This data is then serialised into a 2kB buffer. From there it
171 * is copied into the TTY's flip buffers when they become available.
172 * A lot of copying, and the sizes of the buffers might need to be
173 * tweaked, but all in all it should work better than the previous
174 * version, without the need to modify the TTY code in any way.
175 * Also note that erroneous bytes are now correctly marked in the
176 * flag buffers (instead of always marking the first byte).
177 *
178 * Revision 1.23 2001/10/30 17:53:26 pkj
179 * * Set info->uses_dma to 0 when a port is closed.
180 * * Mark the timer1 interrupt as a fast one (SA_INTERRUPT).
181 * * Call start_flush_timer() in start_receive() if
182 * CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST is defined.
183 *
184 * Revision 1.22 2001/10/30 17:44:03 pkj
185 * Use %lu for received and transmitted counters in line_info().
186 *
187 * Revision 1.21 2001/10/30 17:40:34 pkj
188 * Clean-up. The only change to functionality is that
189 * CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS(=5) is used instead of
190 * MAX_FLUSH_TIME(=8).
191 *
192 * Revision 1.20 2001/10/30 15:24:49 johana
193 * Added char_time stuff from 2.0 driver.
194 *
195 * Revision 1.19 2001/10/30 15:23:03 johana
196 * Merged with 1.13.2 branch + fixed indentation
197 * and changed CONFIG_ETRAX100_XYS to CONFIG_ETRAX_XYZ
198 *
199 * Revision 1.18 2001/09/24 09:27:22 pkj
200 * Completed ext_baud_table[] in cflag_to_baud() and cflag_to_etrax_baud().
201 *
202 * Revision 1.17 2001/08/24 11:32:49 ronny
203 * More fixes for the CONFIG_ETRAX_SERIAL_PORT0 define.
204 *
205 * Revision 1.16 2001/08/24 07:56:22 ronny
206 * Added config ifdefs around ser0 irq requests.
207 *
208 * Revision 1.15 2001/08/16 09:10:31 bjarne
209 * serial.c - corrected the initialization of rs_table, the wrong defines
210 * where used.
211 * Corrected a test in timed_flush_handler.
212 * Changed configured to enabled.
213 * serial.h - Changed configured to enabled.
214 *
215 * Revision 1.14 2001/08/15 07:31:23 bjarne
216 * Introduced two new members to the e100_serial struct.
217 * configured - Will be set to 1 if the port has been configured in .config
218 * uses_dma - Should be set to 1 if the port uses DMA. Currently it is set
219 * to 1
220 * when a port is opened. This is used to limit the DMA interrupt
221 * routines to only manipulate DMA channels actually used by the
222 * serial driver.
223 *
224 * Revision 1.13.2.2 2001/10/17 13:57:13 starvik
225 * Receiver was broken by the break fixes
226 *
227 * Revision 1.13.2.1 2001/07/20 13:57:39 ronny
228 * Merge with new stuff from etrax100ser.c. Works but haven't checked stuff
229 * like break handling.
230 *
231 * Revision 1.13 2001/05/09 12:40:31 johana
232 * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h
233 *
234 * Revision 1.12 2001/04/19 12:23:07 bjornw
235 * CONFIG_RS485 -> CONFIG_ETRAX_RS485
236 *
237 * Revision 1.11 2001/04/05 14:29:48 markusl
238 * Updated according to review remarks i.e.
239 * -Use correct types in port structure to avoid compiler warnings
240 * -Try to use IO_* macros whenever possible
241 * -Open should never return -EBUSY
242 *
243 * Revision 1.10 2001/03/05 13:14:07 bjornw
244 * Another spelling fix
245 *
246 * Revision 1.9 2001/02/23 13:46:38 bjornw
247 * Spellling check
248 *
249 * Revision 1.8 2001/01/23 14:56:35 markusl
250 * Made use of ser1 optional
251 * Needed by USB
252 *
253 * Revision 1.7 2001/01/19 16:14:48 perf
254 * Added kernel options for serial ports 234.
255 * Changed option names from CONFIG_ETRAX100_XYZ to CONFIG_ETRAX_XYZ.
256 *
257 * Revision 1.6 2000/11/22 16:36:09 bjornw
258 * Please marketing by using the correct case when spelling Etrax.
259 *
260 * Revision 1.5 2000/11/21 16:43:37 bjornw
261 * Fixed so it compiles under CONFIG_SVINTO_SIM
262 *
263 * Revision 1.4 2000/11/15 17:34:12 bjornw
264 * Added a timeout timer for flushing input channels. The interrupt-based
265 * fast flush system should be easy to merge with this later (works the same
266 * way, only with an irq instead of a system timer_list)
267 *
268 * Revision 1.3 2000/11/13 17:19:57 bjornw
269 * * Incredibly, this almost complete rewrite of serial.c worked (at least
270 * for output) the first time.
271 *
272 * Items worth noticing:
273 *
274 * No Etrax100 port 1 workarounds (does only compile on 2.4 anyway now)
275 * RS485 is not ported (why can't it be done in userspace as on x86 ?)
276 * Statistics done through async_icount - if any more stats are needed,
277 * that's the place to put them or in an arch-dep version of it.
278 * timeout_interrupt and the other fast timeout stuff not ported yet
279 * There be dragons in this 3k+ line driver
280 *
281 * Revision 1.2 2000/11/10 16:50:28 bjornw
282 * First shot at a 2.4 port, does not compile totally yet
283 *
284 * Revision 1.1 2000/11/10 16:47:32 bjornw
285 * Added verbatim copy of rev 1.49 etrax100ser.c from elinux
286 *
287 * Revision 1.49 2000/10/30 15:47:14 tobiasa
288 * Changed version number.
289 *
290 * Revision 1.48 2000/10/25 11:02:43 johana
291 * Changed %ul to %lu in printf's
292 *
293 * Revision 1.47 2000/10/18 15:06:53 pkj
294 * Compile correctly with CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST and
295 * CONFIG_ETRAX_SERIAL_PROC_ENTRY together.
296 * Some clean-up of the /proc/serial file.
297 *
298 * Revision 1.46 2000/10/16 12:59:40 johana
299 * Added CONFIG_ETRAX_SERIAL_PROC_ENTRY for statistics and debug info.
300 *
301 * Revision 1.45 2000/10/13 17:10:59 pkj
302 * Do not flush DMAs while flipping TTY buffers.
303 *
304 * Revision 1.44 2000/10/13 16:34:29 pkj
305 * Added a delay in ser_interrupt() for 2.3ms when an error is detected.
306 * We do not know why this delay is required yet, but without it the
307 * irmaflash program does not work (this was the program that needed
308 * the ser_interrupt() to be needed in the first place). This should not
309 * affect normal use of the serial ports.
310 *
311 * Revision 1.43 2000/10/13 16:30:44 pkj
312 * New version of the fast flush of serial buffers code. This time
313 * it is localized to the serial driver and uses a fast timer to
314 * do the work.
315 *
316 * Revision 1.42 2000/10/13 14:54:26 bennyo
317 * Fix for switching RTS when using rs485
318 *
319 * Revision 1.41 2000/10/12 11:43:44 pkj
320 * Cleaned up a number of comments.
321 *
322 * Revision 1.40 2000/10/10 11:58:39 johana
323 * Made RS485 support generic for all ports.
324 * Toggle rts in interrupt if no delay wanted.
325 * WARNING: No true transmitter empty check??
326 * Set d_wait bit when sending data so interrupt is delayed until
327 * fifo flushed. (Fix tcdrain() problem)
328 *
329 * Revision 1.39 2000/10/04 16:08:02 bjornw
330 * * Use virt_to_phys etc. for DMA addresses
331 * * Removed CONFIG_FLUSH_DMA_FAST hacks
332 * * Indentation fix
333 *
334 * Revision 1.38 2000/10/02 12:27:10 mattias
335 * * added variable used when using fast flush on serial dma.
336 * (CONFIG_FLUSH_DMA_FAST)
337 *
338 * Revision 1.37 2000/09/27 09:44:24 pkj
339 * Uncomment definition of SERIAL_HANDLE_EARLY_ERRORS.
340 *
341 * Revision 1.36 2000/09/20 13:12:52 johana
342 * Support for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS:
343 * Number of timer ticks between flush of receive fifo (1 tick = 10ms).
344 * Try 0-3 for low latency applications. Approx 5 for high load
345 * applications (e.g. PPP). Maybe this should be more adaptive some day...
346 *
347 * Revision 1.35 2000/09/20 10:36:08 johana
348 * Typo in get_lsr_info()
349 *
350 * Revision 1.34 2000/09/20 10:29:59 johana
351 * Let rs_chars_in_buffer() check fifo content as well.
352 * get_lsr_info() might work now (not tested).
353 * Easier to change the port to debug.
354 *
355 * Revision 1.33 2000/09/13 07:52:11 torbjore
356 * Support RS485
357 *
358 * Revision 1.32 2000/08/31 14:45:37 bjornw
359 * After sending a break we need to reset the transmit DMA channel
360 *
361 * Revision 1.31 2000/06/21 12:13:29 johana
362 * Fixed wait for all chars sent when closing port.
363 * (Used to always take 1 second!)
364 * Added shadows for directions of status/ctrl signals.
365 *
366 * Revision 1.30 2000/05/29 16:27:55 bjornw
367 * Simulator ifdef moved a bit
368 *
369 * Revision 1.29 2000/05/09 09:40:30 mattias
370 * * Added description of dma registers used in timeout_interrupt
371 * * Removed old code
372 *
373 * Revision 1.28 2000/05/08 16:38:58 mattias
374 * * Bugfix for flushing fifo in timeout_interrupt
375 * Problem occurs when bluetooth stack waits for a small number of bytes
376 * containing an event acknowledging free buffers in bluetooth HW
377 * As before, data was stuck in fifo until more data came on uart and
378 * flushed it up to the stack.
379 *
380 * Revision 1.27 2000/05/02 09:52:28 jonasd
381 * Added fix for peculiar etrax behaviour when eop is forced on an empty
382 * fifo. This is used when flashing the IRMA chip. Disabled by default.
383 *
384 * Revision 1.26 2000/03/29 15:32:02 bjornw
385 * 2.0.34 updates
386 *
387 * Revision 1.25 2000/02/16 16:59:36 bjornw
388 * * Receive DMA directly into the flip-buffer, eliminating an intermediary
389 * receive buffer and a memcpy. Will avoid some overruns.
390 * * Error message on debug port if an overrun or flip buffer overrun occurs.
391 * * Just use the first byte in the flag flip buffer for errors.
392 * * Check for timeout on the serial ports only each 5/100 s, not 1/100.
393 *
394 * Revision 1.24 2000/02/09 18:02:28 bjornw
395 * * Clear serial errors (overrun, framing, parity) correctly. Before, the
396 * receiver would get stuck if an error occurred and we did not restart
397 * the input DMA.
398 * * Cosmetics (indentation, some code made into inlines)
399 * * Some more debug options
400 * * Actually shut down the serial port (DMA irq, DMA reset, receiver stop)
401 * when the last open is closed. Corresponding fixes in startup().
402 * * rs_close() "tx FIFO wait" code moved into right place, bug & -> && fixed
403 * and make a special case out of port 1 (R_DMA_CHx_STATUS is broken for that)
404 * * e100_disable_rx/enable_rx just disables/enables the receiver, not RTS
405 *
406 * Revision 1.23 2000/01/24 17:46:19 johana
407 * Wait for flush of DMA/FIFO when closing port.
408 *
409 * Revision 1.22 2000/01/20 18:10:23 johana
410 * Added TIOCMGET ioctl to return modem status.
411 * Implemented modem status/control that works with the extra signals
412 * (DTR, DSR, RI,CD) as well.
413 * 3 different modes supported:
414 * ser0 on PB (Bundy), ser1 on PB (Lisa) and ser2 on PA (Bundy)
415 * Fixed DEF_TX value that caused the serial transmitter pin (txd) to go to 0 when
416 * closing the last filehandle, NASTY!.
417 * Added break generation, not tested though!
418 * Use IRQF_SHARED when request_irq() for ser2 and ser3 (shared with) par0 and par1.
419 * You can't use them at the same time (yet..), but you can hopefully switch
420 * between ser2/par0, ser3/par1 with the same kernel config.
421 * Replaced some magic constants with defines
422 *
423 *
424 */ 8 */
425 9
426static char *serial_version = "$Revision: 1.25 $"; 10static char *serial_version = "$Revision: 1.25 $";
@@ -446,6 +30,7 @@ static char *serial_version = "$Revision: 1.25 $";
446 30
447#include <asm/io.h> 31#include <asm/io.h>
448#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/dma.h>
449#include <asm/system.h> 34#include <asm/system.h>
450#include <linux/delay.h> 35#include <linux/delay.h>
451 36
@@ -454,8 +39,9 @@ static char *serial_version = "$Revision: 1.25 $";
454/* non-arch dependent serial structures are in linux/serial.h */ 39/* non-arch dependent serial structures are in linux/serial.h */
455#include <linux/serial.h> 40#include <linux/serial.h>
456/* while we keep our own stuff (struct e100_serial) in a local .h file */ 41/* while we keep our own stuff (struct e100_serial) in a local .h file */
457#include "serial.h" 42#include "crisv10.h"
458#include <asm/fasttimer.h> 43#include <asm/fasttimer.h>
44#include <asm/arch/io_interface_mux.h>
459 45
460#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER 46#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
461#ifndef CONFIG_ETRAX_FAST_TIMER 47#ifndef CONFIG_ETRAX_FAST_TIMER
@@ -504,18 +90,6 @@ struct tty_driver *serial_driver;
504 from eLinux */ 90 from eLinux */
505#define SERIAL_HANDLE_EARLY_ERRORS 91#define SERIAL_HANDLE_EARLY_ERRORS
506 92
507/* Defined and used in n_tty.c, but we need it here as well */
508#define TTY_THRESHOLD_THROTTLE 128
509
510/* Due to buffersizes and threshold values, our SERIAL_DESCR_BUF_SIZE
511 * must not be to high or flow control won't work if we leave it to the tty
512 * layer so we have our own throttling in flush_to_flip
513 * TTY_FLIPBUF_SIZE=512,
514 * TTY_THRESHOLD_THROTTLE/UNTHROTTLE=128
515 * BUF_SIZE can't be > 128
516 */
517#define CRIS_BUF_SIZE 512
518
519/* Currently 16 descriptors x 128 bytes = 2048 bytes */ 93/* Currently 16 descriptors x 128 bytes = 2048 bytes */
520#define SERIAL_DESCR_BUF_SIZE 256 94#define SERIAL_DESCR_BUF_SIZE 256
521 95
@@ -588,13 +162,13 @@ unsigned long timer_data_to_ns(unsigned long timer_data);
588static void change_speed(struct e100_serial *info); 162static void change_speed(struct e100_serial *info);
589static void rs_throttle(struct tty_struct * tty); 163static void rs_throttle(struct tty_struct * tty);
590static void rs_wait_until_sent(struct tty_struct *tty, int timeout); 164static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
591static int rs_write(struct tty_struct * tty, int from_user, 165static int rs_write(struct tty_struct *tty,
592 const unsigned char *buf, int count); 166 const unsigned char *buf, int count);
593#ifdef CONFIG_ETRAX_RS485 167#ifdef CONFIG_ETRAX_RS485
594static int e100_write_rs485(struct tty_struct * tty, int from_user, 168static int e100_write_rs485(struct tty_struct *tty,
595 const unsigned char *buf, int count); 169 const unsigned char *buf, int count);
596#endif 170#endif
597static int get_lsr_info(struct e100_serial * info, unsigned int *value); 171static int get_lsr_info(struct e100_serial *info, unsigned int *value);
598 172
599 173
600#define DEF_BAUD 115200 /* 115.2 kbit/s */ 174#define DEF_BAUD 115200 /* 115.2 kbit/s */
@@ -679,20 +253,39 @@ static struct e100_serial rs_table[] = {
679 .rx_ctrl = DEF_RX, 253 .rx_ctrl = DEF_RX,
680 .tx_ctrl = DEF_TX, 254 .tx_ctrl = DEF_TX,
681 .iseteop = 2, 255 .iseteop = 2,
256 .dma_owner = dma_ser0,
257 .io_if = if_serial_0,
682#ifdef CONFIG_ETRAX_SERIAL_PORT0 258#ifdef CONFIG_ETRAX_SERIAL_PORT0
683 .enabled = 1, 259 .enabled = 1,
684#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT 260#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
685 .dma_out_enabled = 1, 261 .dma_out_enabled = 1,
262 .dma_out_nbr = SER0_TX_DMA_NBR,
263 .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
264 .dma_out_irq_flags = IRQF_DISABLED,
265 .dma_out_irq_description = "serial 0 dma tr",
686#else 266#else
687 .dma_out_enabled = 0, 267 .dma_out_enabled = 0,
268 .dma_out_nbr = UINT_MAX,
269 .dma_out_irq_nbr = 0,
270 .dma_out_irq_flags = 0,
271 .dma_out_irq_description = NULL,
688#endif 272#endif
689#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN 273#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
690 .dma_in_enabled = 1, 274 .dma_in_enabled = 1,
275 .dma_in_nbr = SER0_RX_DMA_NBR,
276 .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
277 .dma_in_irq_flags = IRQF_DISABLED,
278 .dma_in_irq_description = "serial 0 dma rec",
691#else 279#else
692 .dma_in_enabled = 0 280 .dma_in_enabled = 0,
281 .dma_in_nbr = UINT_MAX,
282 .dma_in_irq_nbr = 0,
283 .dma_in_irq_flags = 0,
284 .dma_in_irq_description = NULL,
693#endif 285#endif
694#else 286#else
695 .enabled = 0, 287 .enabled = 0,
288 .io_if_description = NULL,
696 .dma_out_enabled = 0, 289 .dma_out_enabled = 0,
697 .dma_in_enabled = 0 290 .dma_in_enabled = 0
698#endif 291#endif
@@ -714,20 +307,42 @@ static struct e100_serial rs_table[] = {
714 .rx_ctrl = DEF_RX, 307 .rx_ctrl = DEF_RX,
715 .tx_ctrl = DEF_TX, 308 .tx_ctrl = DEF_TX,
716 .iseteop = 3, 309 .iseteop = 3,
310 .dma_owner = dma_ser1,
311 .io_if = if_serial_1,
717#ifdef CONFIG_ETRAX_SERIAL_PORT1 312#ifdef CONFIG_ETRAX_SERIAL_PORT1
718 .enabled = 1, 313 .enabled = 1,
314 .io_if_description = "ser1",
719#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT 315#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
720 .dma_out_enabled = 1, 316 .dma_out_enabled = 1,
317 .dma_out_nbr = SER1_TX_DMA_NBR,
318 .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
319 .dma_out_irq_flags = IRQF_DISABLED,
320 .dma_out_irq_description = "serial 1 dma tr",
721#else 321#else
722 .dma_out_enabled = 0, 322 .dma_out_enabled = 0,
323 .dma_out_nbr = UINT_MAX,
324 .dma_out_irq_nbr = 0,
325 .dma_out_irq_flags = 0,
326 .dma_out_irq_description = NULL,
723#endif 327#endif
724#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN 328#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
725 .dma_in_enabled = 1, 329 .dma_in_enabled = 1,
330 .dma_in_nbr = SER1_RX_DMA_NBR,
331 .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
332 .dma_in_irq_flags = IRQF_DISABLED,
333 .dma_in_irq_description = "serial 1 dma rec",
726#else 334#else
727 .dma_in_enabled = 0 335 .dma_in_enabled = 0,
336 .dma_in_enabled = 0,
337 .dma_in_nbr = UINT_MAX,
338 .dma_in_irq_nbr = 0,
339 .dma_in_irq_flags = 0,
340 .dma_in_irq_description = NULL,
728#endif 341#endif
729#else 342#else
730 .enabled = 0, 343 .enabled = 0,
344 .io_if_description = NULL,
345 .dma_in_irq_nbr = 0,
731 .dma_out_enabled = 0, 346 .dma_out_enabled = 0,
732 .dma_in_enabled = 0 347 .dma_in_enabled = 0
733#endif 348#endif
@@ -748,20 +363,40 @@ static struct e100_serial rs_table[] = {
748 .rx_ctrl = DEF_RX, 363 .rx_ctrl = DEF_RX,
749 .tx_ctrl = DEF_TX, 364 .tx_ctrl = DEF_TX,
750 .iseteop = 0, 365 .iseteop = 0,
366 .dma_owner = dma_ser2,
367 .io_if = if_serial_2,
751#ifdef CONFIG_ETRAX_SERIAL_PORT2 368#ifdef CONFIG_ETRAX_SERIAL_PORT2
752 .enabled = 1, 369 .enabled = 1,
370 .io_if_description = "ser2",
753#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT 371#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
754 .dma_out_enabled = 1, 372 .dma_out_enabled = 1,
373 .dma_out_nbr = SER2_TX_DMA_NBR,
374 .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
375 .dma_out_irq_flags = IRQF_DISABLED,
376 .dma_out_irq_description = "serial 2 dma tr",
755#else 377#else
756 .dma_out_enabled = 0, 378 .dma_out_enabled = 0,
379 .dma_out_nbr = UINT_MAX,
380 .dma_out_irq_nbr = 0,
381 .dma_out_irq_flags = 0,
382 .dma_out_irq_description = NULL,
757#endif 383#endif
758#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN 384#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
759 .dma_in_enabled = 1, 385 .dma_in_enabled = 1,
386 .dma_in_nbr = SER2_RX_DMA_NBR,
387 .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
388 .dma_in_irq_flags = IRQF_DISABLED,
389 .dma_in_irq_description = "serial 2 dma rec",
760#else 390#else
761 .dma_in_enabled = 0 391 .dma_in_enabled = 0,
392 .dma_in_nbr = UINT_MAX,
393 .dma_in_irq_nbr = 0,
394 .dma_in_irq_flags = 0,
395 .dma_in_irq_description = NULL,
762#endif 396#endif
763#else 397#else
764 .enabled = 0, 398 .enabled = 0,
399 .io_if_description = NULL,
765 .dma_out_enabled = 0, 400 .dma_out_enabled = 0,
766 .dma_in_enabled = 0 401 .dma_in_enabled = 0
767#endif 402#endif
@@ -782,20 +417,40 @@ static struct e100_serial rs_table[] = {
782 .rx_ctrl = DEF_RX, 417 .rx_ctrl = DEF_RX,
783 .tx_ctrl = DEF_TX, 418 .tx_ctrl = DEF_TX,
784 .iseteop = 1, 419 .iseteop = 1,
420 .dma_owner = dma_ser3,
421 .io_if = if_serial_3,
785#ifdef CONFIG_ETRAX_SERIAL_PORT3 422#ifdef CONFIG_ETRAX_SERIAL_PORT3
786 .enabled = 1, 423 .enabled = 1,
424 .io_if_description = "ser3",
787#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT 425#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
788 .dma_out_enabled = 1, 426 .dma_out_enabled = 1,
427 .dma_out_nbr = SER3_TX_DMA_NBR,
428 .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
429 .dma_out_irq_flags = IRQF_DISABLED,
430 .dma_out_irq_description = "serial 3 dma tr",
789#else 431#else
790 .dma_out_enabled = 0, 432 .dma_out_enabled = 0,
433 .dma_out_nbr = UINT_MAX,
434 .dma_out_irq_nbr = 0,
435 .dma_out_irq_flags = 0,
436 .dma_out_irq_description = NULL,
791#endif 437#endif
792#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN 438#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
793 .dma_in_enabled = 1, 439 .dma_in_enabled = 1,
440 .dma_in_nbr = SER3_RX_DMA_NBR,
441 .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
442 .dma_in_irq_flags = IRQF_DISABLED,
443 .dma_in_irq_description = "serial 3 dma rec",
794#else 444#else
795 .dma_in_enabled = 0 445 .dma_in_enabled = 0,
446 .dma_in_nbr = UINT_MAX,
447 .dma_in_irq_nbr = 0,
448 .dma_in_irq_flags = 0,
449 .dma_in_irq_description = NULL
796#endif 450#endif
797#else 451#else
798 .enabled = 0, 452 .enabled = 0,
453 .io_if_description = NULL,
799 .dma_out_enabled = 0, 454 .dma_out_enabled = 0,
800 .dma_in_enabled = 0 455 .dma_in_enabled = 0
801#endif 456#endif
@@ -1416,12 +1071,11 @@ e100_dtr(struct e100_serial *info, int set)
1416 { 1071 {
1417 unsigned long flags; 1072 unsigned long flags;
1418 1073
1419 save_flags(flags); 1074 local_irq_save(flags);
1420 cli();
1421 *e100_modem_pins[info->line].dtr_shadow &= ~mask; 1075 *e100_modem_pins[info->line].dtr_shadow &= ~mask;
1422 *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask); 1076 *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
1423 *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow; 1077 *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
1424 restore_flags(flags); 1078 local_irq_restore(flags);
1425 } 1079 }
1426 1080
1427#ifdef SERIAL_DEBUG_IO 1081#ifdef SERIAL_DEBUG_IO
@@ -1440,12 +1094,11 @@ e100_rts(struct e100_serial *info, int set)
1440{ 1094{
1441#ifndef CONFIG_SVINTO_SIM 1095#ifndef CONFIG_SVINTO_SIM
1442 unsigned long flags; 1096 unsigned long flags;
1443 save_flags(flags); 1097 local_irq_save(flags);
1444 cli();
1445 info->rx_ctrl &= ~E100_RTS_MASK; 1098 info->rx_ctrl &= ~E100_RTS_MASK;
1446 info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */ 1099 info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
1447 info->port[REG_REC_CTRL] = info->rx_ctrl; 1100 info->port[REG_REC_CTRL] = info->rx_ctrl;
1448 restore_flags(flags); 1101 local_irq_restore(flags);
1449#ifdef SERIAL_DEBUG_IO 1102#ifdef SERIAL_DEBUG_IO
1450 printk("ser%i rts %i\n", info->line, set); 1103 printk("ser%i rts %i\n", info->line, set);
1451#endif 1104#endif
@@ -1463,12 +1116,11 @@ e100_ri_out(struct e100_serial *info, int set)
1463 unsigned char mask = e100_modem_pins[info->line].ri_mask; 1116 unsigned char mask = e100_modem_pins[info->line].ri_mask;
1464 unsigned long flags; 1117 unsigned long flags;
1465 1118
1466 save_flags(flags); 1119 local_irq_save(flags);
1467 cli();
1468 *e100_modem_pins[info->line].ri_shadow &= ~mask; 1120 *e100_modem_pins[info->line].ri_shadow &= ~mask;
1469 *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask); 1121 *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
1470 *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow; 1122 *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
1471 restore_flags(flags); 1123 local_irq_restore(flags);
1472 } 1124 }
1473#endif 1125#endif
1474} 1126}
@@ -1481,12 +1133,11 @@ e100_cd_out(struct e100_serial *info, int set)
1481 unsigned char mask = e100_modem_pins[info->line].cd_mask; 1133 unsigned char mask = e100_modem_pins[info->line].cd_mask;
1482 unsigned long flags; 1134 unsigned long flags;
1483 1135
1484 save_flags(flags); 1136 local_irq_save(flags);
1485 cli();
1486 *e100_modem_pins[info->line].cd_shadow &= ~mask; 1137 *e100_modem_pins[info->line].cd_shadow &= ~mask;
1487 *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask); 1138 *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
1488 *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow; 1139 *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
1489 restore_flags(flags); 1140 local_irq_restore(flags);
1490 } 1141 }
1491#endif 1142#endif
1492} 1143}
@@ -1560,8 +1211,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info)
1560 /* Disable output DMA channel for the serial port in question 1211 /* Disable output DMA channel for the serial port in question
1561 * ( set to something other then serialX) 1212 * ( set to something other then serialX)
1562 */ 1213 */
1563 save_flags(flags); 1214 local_irq_save(flags);
1564 cli();
1565 DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line)); 1215 DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
1566 if (info->line == 0) { 1216 if (info->line == 0) {
1567 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) == 1217 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
@@ -1589,7 +1239,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info)
1589 } 1239 }
1590 } 1240 }
1591 *R_GEN_CONFIG = genconfig_shadow; 1241 *R_GEN_CONFIG = genconfig_shadow;
1592 restore_flags(flags); 1242 local_irq_restore(flags);
1593} 1243}
1594 1244
1595 1245
@@ -1597,8 +1247,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info)
1597{ 1247{
1598 unsigned long flags; 1248 unsigned long flags;
1599 1249
1600 save_flags(flags); 1250 local_irq_save(flags);
1601 cli();
1602 DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line)); 1251 DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
1603 /* Enable output DMA channel for the serial port in question */ 1252 /* Enable output DMA channel for the serial port in question */
1604 if (info->line == 0) { 1253 if (info->line == 0) {
@@ -1615,7 +1264,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info)
1615 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3); 1264 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
1616 } 1265 }
1617 *R_GEN_CONFIG = genconfig_shadow; 1266 *R_GEN_CONFIG = genconfig_shadow;
1618 restore_flags(flags); 1267 local_irq_restore(flags);
1619} 1268}
1620 1269
1621static void e100_disable_rxdma_channel(struct e100_serial *info) 1270static void e100_disable_rxdma_channel(struct e100_serial *info)
@@ -1625,8 +1274,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info)
1625 /* Disable input DMA channel for the serial port in question 1274 /* Disable input DMA channel for the serial port in question
1626 * ( set to something other then serialX) 1275 * ( set to something other then serialX)
1627 */ 1276 */
1628 save_flags(flags); 1277 local_irq_save(flags);
1629 cli();
1630 if (info->line == 0) { 1278 if (info->line == 0) {
1631 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) == 1279 if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
1632 IO_STATE(R_GEN_CONFIG, dma7, serial0)) { 1280 IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
@@ -1653,7 +1301,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info)
1653 } 1301 }
1654 } 1302 }
1655 *R_GEN_CONFIG = genconfig_shadow; 1303 *R_GEN_CONFIG = genconfig_shadow;
1656 restore_flags(flags); 1304 local_irq_restore(flags);
1657} 1305}
1658 1306
1659 1307
@@ -1661,8 +1309,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info)
1661{ 1309{
1662 unsigned long flags; 1310 unsigned long flags;
1663 1311
1664 save_flags(flags); 1312 local_irq_save(flags);
1665 cli();
1666 /* Enable input DMA channel for the serial port in question */ 1313 /* Enable input DMA channel for the serial port in question */
1667 if (info->line == 0) { 1314 if (info->line == 0) {
1668 genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7); 1315 genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
@@ -1678,7 +1325,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info)
1678 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3); 1325 genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
1679 } 1326 }
1680 *R_GEN_CONFIG = genconfig_shadow; 1327 *R_GEN_CONFIG = genconfig_shadow;
1681 restore_flags(flags); 1328 local_irq_restore(flags);
1682} 1329}
1683 1330
1684#ifdef SERIAL_HANDLE_EARLY_ERRORS 1331#ifdef SERIAL_HANDLE_EARLY_ERRORS
@@ -1785,7 +1432,7 @@ e100_enable_rs485(struct tty_struct *tty,struct rs485_control *r)
1785} 1432}
1786 1433
1787static int 1434static int
1788e100_write_rs485(struct tty_struct *tty, int from_user, 1435e100_write_rs485(struct tty_struct *tty,
1789 const unsigned char *buf, int count) 1436 const unsigned char *buf, int count)
1790{ 1437{
1791 struct e100_serial * info = (struct e100_serial *)tty->driver_data; 1438 struct e100_serial * info = (struct e100_serial *)tty->driver_data;
@@ -1798,7 +1445,7 @@ e100_write_rs485(struct tty_struct *tty, int from_user,
1798 */ 1445 */
1799 info->rs485.enabled = 1; 1446 info->rs485.enabled = 1;
1800 /* rs_write now deals with RS485 if enabled */ 1447 /* rs_write now deals with RS485 if enabled */
1801 count = rs_write(tty, from_user, buf, count); 1448 count = rs_write(tty, buf, count);
1802 info->rs485.enabled = old_enabled; 1449 info->rs485.enabled = old_enabled;
1803 return count; 1450 return count;
1804} 1451}
@@ -1836,7 +1483,7 @@ rs_stop(struct tty_struct *tty)
1836 unsigned long flags; 1483 unsigned long flags;
1837 unsigned long xoff; 1484 unsigned long xoff;
1838 1485
1839 save_flags(flags); cli(); 1486 local_irq_save(flags);
1840 DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n", 1487 DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
1841 CIRC_CNT(info->xmit.head, 1488 CIRC_CNT(info->xmit.head,
1842 info->xmit.tail,SERIAL_XMIT_SIZE))); 1489 info->xmit.tail,SERIAL_XMIT_SIZE)));
@@ -1848,7 +1495,7 @@ rs_stop(struct tty_struct *tty)
1848 } 1495 }
1849 1496
1850 *((unsigned long *)&info->port[REG_XOFF]) = xoff; 1497 *((unsigned long *)&info->port[REG_XOFF]) = xoff;
1851 restore_flags(flags); 1498 local_irq_restore(flags);
1852 } 1499 }
1853} 1500}
1854 1501
@@ -1860,7 +1507,7 @@ rs_start(struct tty_struct *tty)
1860 unsigned long flags; 1507 unsigned long flags;
1861 unsigned long xoff; 1508 unsigned long xoff;
1862 1509
1863 save_flags(flags); cli(); 1510 local_irq_save(flags);
1864 DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n", 1511 DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
1865 CIRC_CNT(info->xmit.head, 1512 CIRC_CNT(info->xmit.head,
1866 info->xmit.tail,SERIAL_XMIT_SIZE))); 1513 info->xmit.tail,SERIAL_XMIT_SIZE)));
@@ -1875,7 +1522,7 @@ rs_start(struct tty_struct *tty)
1875 info->xmit.head != info->xmit.tail && info->xmit.buf) 1522 info->xmit.head != info->xmit.tail && info->xmit.buf)
1876 e100_enable_serial_tx_ready_irq(info); 1523 e100_enable_serial_tx_ready_irq(info);
1877 1524
1878 restore_flags(flags); 1525 local_irq_restore(flags);
1879 } 1526 }
1880} 1527}
1881 1528
@@ -2055,8 +1702,7 @@ static int serial_fast_timer_expired = 0;
2055static void flush_timeout_function(unsigned long data); 1702static void flush_timeout_function(unsigned long data);
2056#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\ 1703#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
2057 unsigned long timer_flags; \ 1704 unsigned long timer_flags; \
2058 save_flags(timer_flags); \ 1705 local_irq_save(timer_flags); \
2059 cli(); \
2060 if (fast_timers[info->line].function == NULL) { \ 1706 if (fast_timers[info->line].function == NULL) { \
2061 serial_fast_timer_started++; \ 1707 serial_fast_timer_started++; \
2062 TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \ 1708 TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
@@ -2070,7 +1716,7 @@ static void flush_timeout_function(unsigned long data);
2070 else { \ 1716 else { \
2071 TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \ 1717 TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
2072 } \ 1718 } \
2073 restore_flags(timer_flags); \ 1719 local_irq_restore(timer_flags); \
2074} 1720}
2075#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec) 1721#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
2076 1722
@@ -2099,8 +1745,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
2099{ 1745{
2100 unsigned long flags; 1746 unsigned long flags;
2101 1747
2102 save_flags(flags); 1748 local_irq_save(flags);
2103 cli();
2104 1749
2105 if (!info->first_recv_buffer) 1750 if (!info->first_recv_buffer)
2106 info->first_recv_buffer = buffer; 1751 info->first_recv_buffer = buffer;
@@ -2113,7 +1758,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
2113 if (info->recv_cnt > info->max_recv_cnt) 1758 if (info->recv_cnt > info->max_recv_cnt)
2114 info->max_recv_cnt = info->recv_cnt; 1759 info->max_recv_cnt = info->recv_cnt;
2115 1760
2116 restore_flags(flags); 1761 local_irq_restore(flags);
2117} 1762}
2118 1763
2119static int 1764static int
@@ -2133,11 +1778,7 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
2133 info->icount.rx++; 1778 info->icount.rx++;
2134 } else { 1779 } else {
2135 struct tty_struct *tty = info->tty; 1780 struct tty_struct *tty = info->tty;
2136 *tty->flip.char_buf_ptr = data; 1781 tty_insert_flip_char(tty, data, flag);
2137 *tty->flip.flag_buf_ptr = flag;
2138 tty->flip.flag_buf_ptr++;
2139 tty->flip.char_buf_ptr++;
2140 tty->flip.count++;
2141 info->icount.rx++; 1782 info->icount.rx++;
2142 } 1783 }
2143 1784
@@ -2322,7 +1963,6 @@ start_receive(struct e100_serial *info)
2322 */ 1963 */
2323 return; 1964 return;
2324#endif 1965#endif
2325 info->tty->flip.count = 0;
2326 if (info->uses_dma_in) { 1966 if (info->uses_dma_in) {
2327 /* reset the input dma channel to be sure it works */ 1967 /* reset the input dma channel to be sure it works */
2328 1968
@@ -2484,32 +2124,20 @@ static void flush_to_flip_buffer(struct e100_serial *info)
2484{ 2124{
2485 struct tty_struct *tty; 2125 struct tty_struct *tty;
2486 struct etrax_recv_buffer *buffer; 2126 struct etrax_recv_buffer *buffer;
2487 unsigned int length;
2488 unsigned long flags; 2127 unsigned long flags;
2489 int max_flip_size;
2490
2491 if (!info->first_recv_buffer)
2492 return;
2493 2128
2494 save_flags(flags); 2129 local_irq_save(flags);
2495 cli(); 2130 tty = info->tty;
2496 2131
2497 if (!(tty = info->tty)) { 2132 if (!tty) {
2498 restore_flags(flags); 2133 local_irq_restore(flags);
2499 return; 2134 return;
2500 } 2135 }
2501 2136
2502 while ((buffer = info->first_recv_buffer) != NULL) { 2137 while ((buffer = info->first_recv_buffer) != NULL) {
2503 unsigned int count = buffer->length; 2138 unsigned int count = buffer->length;
2504 2139
2505 count = tty_buffer_request_room(tty, count); 2140 tty_insert_flip_string(tty, buffer->buffer, count);
2506 if (count == 0) /* Throttle ?? */
2507 break;
2508
2509 if (count > 1)
2510 tty_insert_flip_strings(tty, buffer->buffer, count - 1);
2511 tty_insert_flip_char(tty, buffer->buffer[count-1], buffer->error);
2512
2513 info->recv_cnt -= count; 2141 info->recv_cnt -= count;
2514 2142
2515 if (count == buffer->length) { 2143 if (count == buffer->length) {
@@ -2525,18 +2153,9 @@ static void flush_to_flip_buffer(struct e100_serial *info)
2525 if (!info->first_recv_buffer) 2153 if (!info->first_recv_buffer)
2526 info->last_recv_buffer = NULL; 2154 info->last_recv_buffer = NULL;
2527 2155
2528 restore_flags(flags); 2156 local_irq_restore(flags);
2529
2530 DFLIP(
2531 if (1) {
2532 DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx);
2533 DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty));
2534 DEBUG_LOG(info->line, "room %lu\n", tty->ldisc.receive_room(tty));
2535 }
2536 2157
2537 ); 2158 /* This includes a check for low-latency */
2538
2539 /* this includes a check for low-latency */
2540 tty_flip_buffer_push(tty); 2159 tty_flip_buffer_push(tty);
2541} 2160}
2542 2161
@@ -2679,21 +2298,7 @@ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
2679 printk("!NO TTY!\n"); 2298 printk("!NO TTY!\n");
2680 return info; 2299 return info;
2681 } 2300 }
2682 if (tty->flip.count >= CRIS_BUF_SIZE - TTY_THRESHOLD_THROTTLE) { 2301
2683 /* check TTY_THROTTLED first so it indicates our state */
2684 if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
2685 DFLOW(DEBUG_LOG(info->line, "rs_throttle flip.count: %i\n", tty->flip.count));
2686 rs_throttle(tty);
2687 }
2688 }
2689 if (tty->flip.count >= CRIS_BUF_SIZE) {
2690 DEBUG_LOG(info->line, "force FLIP! %i\n", tty->flip.count);
2691 tty->flip.work.func((void *) tty);
2692 if (tty->flip.count >= CRIS_BUF_SIZE) {
2693 DEBUG_LOG(info->line, "FLIP FULL! %i\n", tty->flip.count);
2694 return info; /* if TTY_DONT_FLIP is set */
2695 }
2696 }
2697 /* Read data and status at the same time */ 2302 /* Read data and status at the same time */
2698 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); 2303 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
2699more_data: 2304more_data:
@@ -2746,27 +2351,26 @@ more_data:
2746 DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt); 2351 DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
2747 info->errorcode = ERRCODE_INSERT_BREAK; 2352 info->errorcode = ERRCODE_INSERT_BREAK;
2748 } else { 2353 } else {
2354 unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
2355 data_in, data_read);
2356 char flag = TTY_NORMAL;
2749 if (info->errorcode == ERRCODE_INSERT_BREAK) { 2357 if (info->errorcode == ERRCODE_INSERT_BREAK) {
2750 info->icount.brk++; 2358 struct tty_struct *tty = info->tty;
2751 *tty->flip.char_buf_ptr = 0; 2359 tty_insert_flip_char(tty, 0, flag);
2752 *tty->flip.flag_buf_ptr = TTY_BREAK;
2753 tty->flip.flag_buf_ptr++;
2754 tty->flip.char_buf_ptr++;
2755 tty->flip.count++;
2756 info->icount.rx++; 2360 info->icount.rx++;
2757 } 2361 }
2758 *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
2759 2362
2760 if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) { 2363 if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
2761 info->icount.parity++; 2364 info->icount.parity++;
2762 *tty->flip.flag_buf_ptr = TTY_PARITY; 2365 flag = TTY_PARITY;
2763 } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) { 2366 } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
2764 info->icount.overrun++; 2367 info->icount.overrun++;
2765 *tty->flip.flag_buf_ptr = TTY_OVERRUN; 2368 flag = TTY_OVERRUN;
2766 } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) { 2369 } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
2767 info->icount.frame++; 2370 info->icount.frame++;
2768 *tty->flip.flag_buf_ptr = TTY_FRAME; 2371 flag = TTY_FRAME;
2769 } 2372 }
2373 tty_insert_flip_char(tty, data, flag);
2770 info->errorcode = 0; 2374 info->errorcode = 0;
2771 } 2375 }
2772 info->break_detected_cnt = 0; 2376 info->break_detected_cnt = 0;
@@ -2782,16 +2386,14 @@ more_data:
2782 log_int(rdpc(), 0, 0); 2386 log_int(rdpc(), 0, 0);
2783 } 2387 }
2784 ); 2388 );
2785 *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read); 2389 tty_insert_flip_char(tty,
2786 *tty->flip.flag_buf_ptr = 0; 2390 IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
2391 TTY_NORMAL);
2787 } else { 2392 } else {
2788 DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read); 2393 DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
2789 } 2394 }
2790 2395
2791 2396
2792 tty->flip.flag_buf_ptr++;
2793 tty->flip.char_buf_ptr++;
2794 tty->flip.count++;
2795 info->icount.rx++; 2397 info->icount.rx++;
2796 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); 2398 data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
2797 if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) { 2399 if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
@@ -2929,7 +2531,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2929 if (info->x_char) { 2531 if (info->x_char) {
2930 unsigned char rstat; 2532 unsigned char rstat;
2931 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char)); 2533 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
2932 save_flags(flags); cli(); 2534 local_irq_save(flags);
2933 rstat = info->port[REG_STATUS]; 2535 rstat = info->port[REG_STATUS];
2934 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); 2536 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
2935 2537
@@ -2938,7 +2540,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2938 info->x_char = 0; 2540 info->x_char = 0;
2939 /* We must enable since it is disabled in ser_interrupt */ 2541 /* We must enable since it is disabled in ser_interrupt */
2940 e100_enable_serial_tx_ready_irq(info); 2542 e100_enable_serial_tx_ready_irq(info);
2941 restore_flags(flags); 2543 local_irq_restore(flags);
2942 return; 2544 return;
2943 } 2545 }
2944 if (info->uses_dma_out) { 2546 if (info->uses_dma_out) {
@@ -2946,7 +2548,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2946 int i; 2548 int i;
2947 /* We only use normal tx interrupt when sending x_char */ 2549 /* We only use normal tx interrupt when sending x_char */
2948 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0)); 2550 DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
2949 save_flags(flags); cli(); 2551 local_irq_save(flags);
2950 rstat = info->port[REG_STATUS]; 2552 rstat = info->port[REG_STATUS];
2951 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); 2553 DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
2952 e100_disable_serial_tx_ready_irq(info); 2554 e100_disable_serial_tx_ready_irq(info);
@@ -2959,7 +2561,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2959 nop(); 2561 nop();
2960 2562
2961 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue); 2563 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
2962 restore_flags(flags); 2564 local_irq_restore(flags);
2963 return; 2565 return;
2964 } 2566 }
2965 /* Normal char-by-char interrupt */ 2567 /* Normal char-by-char interrupt */
@@ -2973,7 +2575,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2973 } 2575 }
2974 DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail])); 2576 DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
2975 /* Send a byte, rs485 timing is critical so turn of ints */ 2577 /* Send a byte, rs485 timing is critical so turn of ints */
2976 save_flags(flags); cli(); 2578 local_irq_save(flags);
2977 info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail]; 2579 info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
2978 info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1); 2580 info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
2979 info->icount.tx++; 2581 info->icount.tx++;
@@ -2997,7 +2599,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
2997 /* We must enable since it is disabled in ser_interrupt */ 2599 /* We must enable since it is disabled in ser_interrupt */
2998 e100_enable_serial_tx_ready_irq(info); 2600 e100_enable_serial_tx_ready_irq(info);
2999 } 2601 }
3000 restore_flags(flags); 2602 local_irq_restore(flags);
3001 2603
3002 if (CIRC_CNT(info->xmit.head, 2604 if (CIRC_CNT(info->xmit.head,
3003 info->xmit.tail, 2605 info->xmit.tail,
@@ -3022,7 +2624,7 @@ ser_interrupt(int irq, void *dev_id)
3022 int handled = 0; 2624 int handled = 0;
3023 static volatile unsigned long reentered_ready_mask = 0; 2625 static volatile unsigned long reentered_ready_mask = 0;
3024 2626
3025 save_flags(flags); cli(); 2627 local_irq_save(flags);
3026 irq_mask1_rd = *R_IRQ_MASK1_RD; 2628 irq_mask1_rd = *R_IRQ_MASK1_RD;
3027 /* First handle all rx interrupts with ints disabled */ 2629 /* First handle all rx interrupts with ints disabled */
3028 info = rs_table; 2630 info = rs_table;
@@ -3067,7 +2669,7 @@ ser_interrupt(int irq, void *dev_id)
3067 /* Unblock the serial interrupt */ 2669 /* Unblock the serial interrupt */
3068 *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set); 2670 *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
3069 2671
3070 sti(); 2672 local_irq_enable();
3071 ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */ 2673 ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
3072 info = rs_table; 2674 info = rs_table;
3073 for (i = 0; i < NR_PORTS; i++) { 2675 for (i = 0; i < NR_PORTS; i++) {
@@ -3080,11 +2682,11 @@ ser_interrupt(int irq, void *dev_id)
3080 ready_mask <<= 2; 2682 ready_mask <<= 2;
3081 } 2683 }
3082 /* handle_ser_tx_interrupt enables tr_ready interrupts */ 2684 /* handle_ser_tx_interrupt enables tr_ready interrupts */
3083 cli(); 2685 local_irq_disable();
3084 /* Handle reentered TX interrupt */ 2686 /* Handle reentered TX interrupt */
3085 irq_mask1_rd = reentered_ready_mask; 2687 irq_mask1_rd = reentered_ready_mask;
3086 } 2688 }
3087 cli(); 2689 local_irq_disable();
3088 tx_started = 0; 2690 tx_started = 0;
3089 } else { 2691 } else {
3090 unsigned long ready_mask; 2692 unsigned long ready_mask;
@@ -3100,7 +2702,7 @@ ser_interrupt(int irq, void *dev_id)
3100 } 2702 }
3101 } 2703 }
3102 2704
3103 restore_flags(flags); 2705 local_irq_restore(flags);
3104 return IRQ_RETVAL(handled); 2706 return IRQ_RETVAL(handled);
3105} /* ser_interrupt */ 2707} /* ser_interrupt */
3106#endif 2708#endif
@@ -3121,11 +2723,13 @@ ser_interrupt(int irq, void *dev_id)
3121 * them using rs_sched_event(), and they get done here. 2723 * them using rs_sched_event(), and they get done here.
3122 */ 2724 */
3123static void 2725static void
3124do_softint(void *private_) 2726do_softint(struct work_struct *work)
3125{ 2727{
3126 struct e100_serial *info = (struct e100_serial *) private_; 2728 struct e100_serial *info;
3127 struct tty_struct *tty; 2729 struct tty_struct *tty;
3128 2730
2731 info = container_of(work, struct e100_serial, work);
2732
3129 tty = info->tty; 2733 tty = info->tty;
3130 if (!tty) 2734 if (!tty)
3131 return; 2735 return;
@@ -3145,13 +2749,12 @@ startup(struct e100_serial * info)
3145 if (!xmit_page) 2749 if (!xmit_page)
3146 return -ENOMEM; 2750 return -ENOMEM;
3147 2751
3148 save_flags(flags); 2752 local_irq_save(flags);
3149 cli();
3150 2753
3151 /* if it was already initialized, skip this */ 2754 /* if it was already initialized, skip this */
3152 2755
3153 if (info->flags & ASYNC_INITIALIZED) { 2756 if (info->flags & ASYNC_INITIALIZED) {
3154 restore_flags(flags); 2757 local_irq_restore(flags);
3155 free_page(xmit_page); 2758 free_page(xmit_page);
3156 return 0; 2759 return 0;
3157 } 2760 }
@@ -3277,7 +2880,7 @@ startup(struct e100_serial * info)
3277 2880
3278 info->flags |= ASYNC_INITIALIZED; 2881 info->flags |= ASYNC_INITIALIZED;
3279 2882
3280 restore_flags(flags); 2883 local_irq_restore(flags);
3281 return 0; 2884 return 0;
3282} 2885}
3283 2886
@@ -3328,8 +2931,7 @@ shutdown(struct e100_serial * info)
3328 info->irq); 2931 info->irq);
3329#endif 2932#endif
3330 2933
3331 save_flags(flags); 2934 local_irq_save(flags);
3332 cli(); /* Disable interrupts */
3333 2935
3334 if (info->xmit.buf) { 2936 if (info->xmit.buf) {
3335 free_page((unsigned long)info->xmit.buf); 2937 free_page((unsigned long)info->xmit.buf);
@@ -3353,7 +2955,7 @@ shutdown(struct e100_serial * info)
3353 set_bit(TTY_IO_ERROR, &info->tty->flags); 2955 set_bit(TTY_IO_ERROR, &info->tty->flags);
3354 2956
3355 info->flags &= ~ASYNC_INITIALIZED; 2957 info->flags &= ~ASYNC_INITIALIZED;
3356 restore_flags(flags); 2958 local_irq_restore(flags);
3357} 2959}
3358 2960
3359 2961
@@ -3411,7 +3013,6 @@ change_speed(struct e100_serial *info)
3411 DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8)); 3013 DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
3412 info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8; 3014 info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
3413 } 3015 }
3414 }
3415#endif 3016#endif
3416 else 3017 else
3417 { 3018 {
@@ -3445,8 +3046,7 @@ change_speed(struct e100_serial *info)
3445 3046
3446#ifndef CONFIG_SVINTO_SIM 3047#ifndef CONFIG_SVINTO_SIM
3447 /* start with default settings and then fill in changes */ 3048 /* start with default settings and then fill in changes */
3448 save_flags(flags); 3049 local_irq_save(flags);
3449 cli();
3450 /* 8 bit, no/even parity */ 3050 /* 8 bit, no/even parity */
3451 info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) | 3051 info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
3452 IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) | 3052 IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
@@ -3510,7 +3110,7 @@ change_speed(struct e100_serial *info)
3510 } 3110 }
3511 3111
3512 *((unsigned long *)&info->port[REG_XOFF]) = xoff; 3112 *((unsigned long *)&info->port[REG_XOFF]) = xoff;
3513 restore_flags(flags); 3113 local_irq_restore(flags);
3514#endif /* !CONFIG_SVINTO_SIM */ 3114#endif /* !CONFIG_SVINTO_SIM */
3515 3115
3516 update_char_time(info); 3116 update_char_time(info);
@@ -3538,13 +3138,12 @@ rs_flush_chars(struct tty_struct *tty)
3538 3138
3539 /* this protection might not exactly be necessary here */ 3139 /* this protection might not exactly be necessary here */
3540 3140
3541 save_flags(flags); 3141 local_irq_save(flags);
3542 cli();
3543 start_transmit(info); 3142 start_transmit(info);
3544 restore_flags(flags); 3143 local_irq_restore(flags);
3545} 3144}
3546 3145
3547static int rs_raw_write(struct tty_struct * tty, int from_user, 3146static int rs_raw_write(struct tty_struct *tty,
3548 const unsigned char *buf, int count) 3147 const unsigned char *buf, int count)
3549{ 3148{
3550 int c, ret = 0; 3149 int c, ret = 0;
@@ -3567,53 +3166,19 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
3567 SIMCOUT(buf, count); 3166 SIMCOUT(buf, count);
3568 return count; 3167 return count;
3569#endif 3168#endif
3570 save_flags(flags); 3169 local_save_flags(flags);
3571 DFLOW(DEBUG_LOG(info->line, "write count %i ", count)); 3170 DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
3572 DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty))); 3171 DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
3573 3172
3574 3173
3575 /* the cli/restore_flags pairs below are needed because the 3174 /* The local_irq_disable/restore_flags pairs below are needed
3576 * DMA interrupt handler moves the info->xmit values. the memcpy 3175 * because the DMA interrupt handler moves the info->xmit values.
3577 * needs to be in the critical region unfortunately, because we 3176 * the memcpy needs to be in the critical region unfortunately,
3578 * need to read xmit values, memcpy, write xmit values in one 3177 * because we need to read xmit values, memcpy, write xmit values
3579 * atomic operation... this could perhaps be avoided by more clever 3178 * in one atomic operation... this could perhaps be avoided by
3580 * design. 3179 * more clever design.
3581 */ 3180 */
3582 if (from_user) { 3181 local_irq_disable();
3583 mutex_lock(&tmp_buf_mutex);
3584 while (1) {
3585 int c1;
3586 c = CIRC_SPACE_TO_END(info->xmit.head,
3587 info->xmit.tail,
3588 SERIAL_XMIT_SIZE);
3589 if (count < c)
3590 c = count;
3591 if (c <= 0)
3592 break;
3593
3594 c -= copy_from_user(tmp_buf, buf, c);
3595 if (!c) {
3596 if (!ret)
3597 ret = -EFAULT;
3598 break;
3599 }
3600 cli();
3601 c1 = CIRC_SPACE_TO_END(info->xmit.head,
3602 info->xmit.tail,
3603 SERIAL_XMIT_SIZE);
3604 if (c1 < c)
3605 c = c1;
3606 memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
3607 info->xmit.head = ((info->xmit.head + c) &
3608 (SERIAL_XMIT_SIZE-1));
3609 restore_flags(flags);
3610 buf += c;
3611 count -= c;
3612 ret += c;
3613 }
3614 mutex_unlock(&tmp_buf_mutex);
3615 } else {
3616 cli();
3617 while (count) { 3182 while (count) {
3618 c = CIRC_SPACE_TO_END(info->xmit.head, 3183 c = CIRC_SPACE_TO_END(info->xmit.head,
3619 info->xmit.tail, 3184 info->xmit.tail,
@@ -3631,8 +3196,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
3631 count -= c; 3196 count -= c;
3632 ret += c; 3197 ret += c;
3633 } 3198 }
3634 restore_flags(flags); 3199 local_irq_restore(flags);
3635 }
3636 3200
3637 /* enable transmitter if not running, unless the tty is stopped 3201 /* enable transmitter if not running, unless the tty is stopped
3638 * this does not need IRQ protection since if tr_running == 0 3202 * this does not need IRQ protection since if tr_running == 0
@@ -3651,7 +3215,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
3651} /* raw_raw_write() */ 3215} /* raw_raw_write() */
3652 3216
3653static int 3217static int
3654rs_write(struct tty_struct * tty, int from_user, 3218rs_write(struct tty_struct *tty,
3655 const unsigned char *buf, int count) 3219 const unsigned char *buf, int count)
3656{ 3220{
3657#if defined(CONFIG_ETRAX_RS485) 3221#if defined(CONFIG_ETRAX_RS485)
@@ -3678,7 +3242,7 @@ rs_write(struct tty_struct * tty, int from_user,
3678 } 3242 }
3679#endif /* CONFIG_ETRAX_RS485 */ 3243#endif /* CONFIG_ETRAX_RS485 */
3680 3244
3681 count = rs_raw_write(tty, from_user, buf, count); 3245 count = rs_raw_write(tty, buf, count);
3682 3246
3683#if defined(CONFIG_ETRAX_RS485) 3247#if defined(CONFIG_ETRAX_RS485)
3684 if (info->rs485.enabled) 3248 if (info->rs485.enabled)
@@ -3746,10 +3310,9 @@ rs_flush_buffer(struct tty_struct *tty)
3746 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3310 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3747 unsigned long flags; 3311 unsigned long flags;
3748 3312
3749 save_flags(flags); 3313 local_irq_save(flags);
3750 cli();
3751 info->xmit.head = info->xmit.tail = 0; 3314 info->xmit.head = info->xmit.tail = 0;
3752 restore_flags(flags); 3315 local_irq_restore(flags);
3753 3316
3754 tty_wakeup(tty); 3317 tty_wakeup(tty);
3755} 3318}
@@ -3767,7 +3330,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
3767{ 3330{
3768 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3331 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3769 unsigned long flags; 3332 unsigned long flags;
3770 save_flags(flags); cli(); 3333 local_irq_save(flags);
3771 if (info->uses_dma_out) { 3334 if (info->uses_dma_out) {
3772 /* Put the DMA on hold and disable the channel */ 3335 /* Put the DMA on hold and disable the channel */
3773 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold); 3336 *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
@@ -3784,7 +3347,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
3784 DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch)); 3347 DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
3785 info->x_char = ch; 3348 info->x_char = ch;
3786 e100_enable_serial_tx_ready_irq(info); 3349 e100_enable_serial_tx_ready_irq(info);
3787 restore_flags(flags); 3350 local_irq_restore(flags);
3788} 3351}
3789 3352
3790/* 3353/*
@@ -3996,21 +3559,61 @@ char *get_control_state_str(int MLines, char *s)
3996} 3559}
3997#endif 3560#endif
3998 3561
3562static void
3563rs_break(struct tty_struct *tty, int break_state)
3564{
3565 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3566 unsigned long flags;
3567
3568 if (!info->port)
3569 return;
3570
3571 local_irq_save(flags);
3572 if (break_state == -1) {
3573 /* Go to manual mode and set the txd pin to 0 */
3574 /* Clear bit 7 (txd) and 6 (tr_enable) */
3575 info->tx_ctrl &= 0x3F;
3576 } else {
3577 /* Set bit 7 (txd) and 6 (tr_enable) */
3578 info->tx_ctrl |= (0x80 | 0x40);
3579 }
3580 info->port[REG_TR_CTRL] = info->tx_ctrl;
3581 local_irq_restore(flags);
3582}
3583
3999static int 3584static int
4000get_modem_info(struct e100_serial * info, unsigned int *value) 3585rs_tiocmset(struct tty_struct *tty, struct file *file,
3586 unsigned int set, unsigned int clear)
4001{ 3587{
4002 unsigned int result; 3588 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
4003 /* Polarity isn't verified */
4004#if 0 /*def SERIAL_DEBUG_IO */
4005 3589
4006 printk("get_modem_info: RTS: %i DTR: %i CD: %i RI: %i DSR: %i CTS: %i\n", 3590 if (clear & TIOCM_RTS)
4007 E100_RTS_GET(info), 3591 e100_rts(info, 0);
4008 E100_DTR_GET(info), 3592 if (clear & TIOCM_DTR)
4009 E100_CD_GET(info), 3593 e100_dtr(info, 0);
4010 E100_RI_GET(info), 3594 /* Handle FEMALE behaviour */
4011 E100_DSR_GET(info), 3595 if (clear & TIOCM_RI)
4012 E100_CTS_GET(info)); 3596 e100_ri_out(info, 0);
4013#endif 3597 if (clear & TIOCM_CD)
3598 e100_cd_out(info, 0);
3599
3600 if (set & TIOCM_RTS)
3601 e100_rts(info, 1);
3602 if (set & TIOCM_DTR)
3603 e100_dtr(info, 1);
3604 /* Handle FEMALE behaviour */
3605 if (set & TIOCM_RI)
3606 e100_ri_out(info, 1);
3607 if (set & TIOCM_CD)
3608 e100_cd_out(info, 1);
3609 return 0;
3610}
3611
3612static int
3613rs_tiocmget(struct tty_struct *tty, struct file *file)
3614{
3615 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
3616 unsigned int result;
4014 3617
4015 result = 3618 result =
4016 (!E100_RTS_GET(info) ? TIOCM_RTS : 0) 3619 (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
@@ -4021,95 +3624,20 @@ get_modem_info(struct e100_serial * info, unsigned int *value)
4021 | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); 3624 | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
4022 3625
4023#ifdef SERIAL_DEBUG_IO 3626#ifdef SERIAL_DEBUG_IO
4024 printk("e100ser: modem state: %i 0x%08X\n", result, result); 3627 printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
3628 info->line, result, result);
4025 { 3629 {
4026 char s[100]; 3630 char s[100];
4027 3631
4028 get_control_state_str(result, s); 3632 get_control_state_str(result, s);
4029 printk("state: %s\n", s); 3633 printk(KERN_DEBUG "state: %s\n", s);
4030 } 3634 }
4031#endif 3635#endif
4032 if (copy_to_user(value, &result, sizeof(int))) 3636 return result;
4033 return -EFAULT;
4034 return 0;
4035}
4036 3637
4037
4038static int
4039set_modem_info(struct e100_serial * info, unsigned int cmd,
4040 unsigned int *value)
4041{
4042 unsigned int arg;
4043
4044 if (copy_from_user(&arg, value, sizeof(int)))
4045 return -EFAULT;
4046
4047 switch (cmd) {
4048 case TIOCMBIS:
4049 if (arg & TIOCM_RTS) {
4050 e100_rts(info, 1);
4051 }
4052 if (arg & TIOCM_DTR) {
4053 e100_dtr(info, 1);
4054 }
4055 /* Handle FEMALE behaviour */
4056 if (arg & TIOCM_RI) {
4057 e100_ri_out(info, 1);
4058 }
4059 if (arg & TIOCM_CD) {
4060 e100_cd_out(info, 1);
4061 }
4062 break;
4063 case TIOCMBIC:
4064 if (arg & TIOCM_RTS) {
4065 e100_rts(info, 0);
4066 }
4067 if (arg & TIOCM_DTR) {
4068 e100_dtr(info, 0);
4069 }
4070 /* Handle FEMALE behaviour */
4071 if (arg & TIOCM_RI) {
4072 e100_ri_out(info, 0);
4073 }
4074 if (arg & TIOCM_CD) {
4075 e100_cd_out(info, 0);
4076 }
4077 break;
4078 case TIOCMSET:
4079 e100_rts(info, arg & TIOCM_RTS);
4080 e100_dtr(info, arg & TIOCM_DTR);
4081 /* Handle FEMALE behaviour */
4082 e100_ri_out(info, arg & TIOCM_RI);
4083 e100_cd_out(info, arg & TIOCM_CD);
4084 break;
4085 default:
4086 return -EINVAL;
4087 }
4088 return 0;
4089} 3638}
4090 3639
4091 3640
4092static void
4093rs_break(struct tty_struct *tty, int break_state)
4094{
4095 struct e100_serial * info = (struct e100_serial *)tty->driver_data;
4096 unsigned long flags;
4097
4098 if (!info->port)
4099 return;
4100
4101 save_flags(flags);
4102 cli();
4103 if (break_state == -1) {
4104 /* Go to manual mode and set the txd pin to 0 */
4105 info->tx_ctrl &= 0x3F; /* Clear bit 7 (txd) and 6 (tr_enable) */
4106 } else {
4107 info->tx_ctrl |= (0x80 | 0x40); /* Set bit 7 (txd) and 6 (tr_enable) */
4108 }
4109 info->port[REG_TR_CTRL] = info->tx_ctrl;
4110 restore_flags(flags);
4111}
4112
4113static int 3641static int
4114rs_ioctl(struct tty_struct *tty, struct file * file, 3642rs_ioctl(struct tty_struct *tty, struct file * file,
4115 unsigned int cmd, unsigned long arg) 3643 unsigned int cmd, unsigned long arg)
@@ -4124,49 +3652,45 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
4124 } 3652 }
4125 3653
4126 switch (cmd) { 3654 switch (cmd) {
4127 case TIOCMGET: 3655 case TIOCGSERIAL:
4128 return get_modem_info(info, (unsigned int *) arg); 3656 return get_serial_info(info,
4129 case TIOCMBIS: 3657 (struct serial_struct *) arg);
4130 case TIOCMBIC: 3658 case TIOCSSERIAL:
4131 case TIOCMSET: 3659 return set_serial_info(info,
4132 return set_modem_info(info, cmd, (unsigned int *) arg); 3660 (struct serial_struct *) arg);
4133 case TIOCGSERIAL: 3661 case TIOCSERGETLSR: /* Get line status register */
4134 return get_serial_info(info, 3662 return get_lsr_info(info, (unsigned int *) arg);
4135 (struct serial_struct *) arg); 3663
4136 case TIOCSSERIAL: 3664 case TIOCSERGSTRUCT:
4137 return set_serial_info(info, 3665 if (copy_to_user((struct e100_serial *) arg,
4138 (struct serial_struct *) arg); 3666 info, sizeof(struct e100_serial)))
4139 case TIOCSERGETLSR: /* Get line status register */ 3667 return -EFAULT;
4140 return get_lsr_info(info, (unsigned int *) arg); 3668 return 0;
4141
4142 case TIOCSERGSTRUCT:
4143 if (copy_to_user((struct e100_serial *) arg,
4144 info, sizeof(struct e100_serial)))
4145 return -EFAULT;
4146 return 0;
4147 3669
4148#if defined(CONFIG_ETRAX_RS485) 3670#if defined(CONFIG_ETRAX_RS485)
4149 case TIOCSERSETRS485: 3671 case TIOCSERSETRS485:
4150 { 3672 {
4151 struct rs485_control rs485ctrl; 3673 struct rs485_control rs485ctrl;
4152 if (copy_from_user(&rs485ctrl, (struct rs485_control*)arg, sizeof(rs485ctrl))) 3674 if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
4153 return -EFAULT; 3675 sizeof(rs485ctrl)))
3676 return -EFAULT;
4154 3677
4155 return e100_enable_rs485(tty, &rs485ctrl); 3678 return e100_enable_rs485(tty, &rs485ctrl);
4156 } 3679 }
4157 3680
4158 case TIOCSERWRRS485: 3681 case TIOCSERWRRS485:
4159 { 3682 {
4160 struct rs485_write rs485wr; 3683 struct rs485_write rs485wr;
4161 if (copy_from_user(&rs485wr, (struct rs485_write*)arg, sizeof(rs485wr))) 3684 if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
4162 return -EFAULT; 3685 sizeof(rs485wr)))
3686 return -EFAULT;
4163 3687
4164 return e100_write_rs485(tty, 1, rs485wr.outc, rs485wr.outc_size); 3688 return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
4165 } 3689 }
4166#endif 3690#endif
4167 3691
4168 default: 3692 default:
4169 return -ENOIOCTLCMD; 3693 return -ENOIOCTLCMD;
4170 } 3694 }
4171 return 0; 3695 return 0;
4172} 3696}
@@ -4191,46 +3715,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
4191 3715
4192} 3716}
4193 3717
4194/* In debugport.c - register a console write function that uses the normal
4195 * serial driver
4196 */
4197typedef int (*debugport_write_function)(int i, const char *buf, unsigned int len);
4198
4199extern debugport_write_function debug_write_function;
4200
4201static int rs_debug_write_function(int i, const char *buf, unsigned int len)
4202{
4203 int cnt;
4204 int written = 0;
4205 struct tty_struct *tty;
4206 static int recurse_cnt = 0;
4207
4208 tty = rs_table[i].tty;
4209 if (tty) {
4210 unsigned long flags;
4211 if (recurse_cnt > 5) /* We skip this debug output */
4212 return 1;
4213
4214 local_irq_save(flags);
4215 recurse_cnt++;
4216 local_irq_restore(flags);
4217 do {
4218 cnt = rs_write(tty, 0, buf + written, len);
4219 if (cnt >= 0) {
4220 written += cnt;
4221 buf += cnt;
4222 len -= cnt;
4223 } else
4224 len = cnt;
4225 } while(len > 0);
4226 local_irq_save(flags);
4227 recurse_cnt--;
4228 local_irq_restore(flags);
4229 return 1;
4230 }
4231 return 0;
4232}
4233
4234/* 3718/*
4235 * ------------------------------------------------------------ 3719 * ------------------------------------------------------------
4236 * rs_close() 3720 * rs_close()
@@ -4252,11 +3736,10 @@ rs_close(struct tty_struct *tty, struct file * filp)
4252 3736
4253 /* interrupts are disabled for this entire function */ 3737 /* interrupts are disabled for this entire function */
4254 3738
4255 save_flags(flags); 3739 local_irq_save(flags);
4256 cli();
4257 3740
4258 if (tty_hung_up_p(filp)) { 3741 if (tty_hung_up_p(filp)) {
4259 restore_flags(flags); 3742 local_irq_restore(flags);
4260 return; 3743 return;
4261 } 3744 }
4262 3745
@@ -4283,7 +3766,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
4283 info->count = 0; 3766 info->count = 0;
4284 } 3767 }
4285 if (info->count) { 3768 if (info->count) {
4286 restore_flags(flags); 3769 local_irq_restore(flags);
4287 return; 3770 return;
4288 } 3771 }
4289 info->flags |= ASYNC_CLOSING; 3772 info->flags |= ASYNC_CLOSING;
@@ -4337,7 +3820,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
4337 } 3820 }
4338 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 3821 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
4339 wake_up_interruptible(&info->close_wait); 3822 wake_up_interruptible(&info->close_wait);
4340 restore_flags(flags); 3823 local_irq_restore(flags);
4341 3824
4342 /* port closed */ 3825 /* port closed */
4343 3826
@@ -4359,6 +3842,28 @@ rs_close(struct tty_struct *tty, struct file * filp)
4359#endif 3842#endif
4360 } 3843 }
4361#endif 3844#endif
3845
3846 /*
3847 * Release any allocated DMA irq's.
3848 */
3849 if (info->dma_in_enabled) {
3850 free_irq(info->dma_in_irq_nbr, info);
3851 cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
3852 info->uses_dma_in = 0;
3853#ifdef SERIAL_DEBUG_OPEN
3854 printk(KERN_DEBUG "DMA irq '%s' freed\n",
3855 info->dma_in_irq_description);
3856#endif
3857 }
3858 if (info->dma_out_enabled) {
3859 free_irq(info->dma_out_irq_nbr, info);
3860 cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
3861 info->uses_dma_out = 0;
3862#ifdef SERIAL_DEBUG_OPEN
3863 printk(KERN_DEBUG "DMA irq '%s' freed\n",
3864 info->dma_out_irq_description);
3865#endif
3866 }
4362} 3867}
4363 3868
4364/* 3869/*
@@ -4433,8 +3938,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
4433 */ 3938 */
4434 if (tty_hung_up_p(filp) || 3939 if (tty_hung_up_p(filp) ||
4435 (info->flags & ASYNC_CLOSING)) { 3940 (info->flags & ASYNC_CLOSING)) {
4436 if (info->flags & ASYNC_CLOSING) 3941 wait_event_interruptible(info->close_wait,
4437 interruptible_sleep_on(&info->close_wait); 3942 !(info->flags & ASYNC_CLOSING));
4438#ifdef SERIAL_DO_RESTART 3943#ifdef SERIAL_DO_RESTART
4439 if (info->flags & ASYNC_HUP_NOTIFY) 3944 if (info->flags & ASYNC_HUP_NOTIFY)
4440 return -EAGAIN; 3945 return -EAGAIN;
@@ -4472,21 +3977,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
4472 printk("block_til_ready before block: ttyS%d, count = %d\n", 3977 printk("block_til_ready before block: ttyS%d, count = %d\n",
4473 info->line, info->count); 3978 info->line, info->count);
4474#endif 3979#endif
4475 save_flags(flags); 3980 local_irq_save(flags);
4476 cli();
4477 if (!tty_hung_up_p(filp)) { 3981 if (!tty_hung_up_p(filp)) {
4478 extra_count++; 3982 extra_count++;
4479 info->count--; 3983 info->count--;
4480 } 3984 }
4481 restore_flags(flags); 3985 local_irq_restore(flags);
4482 info->blocked_open++; 3986 info->blocked_open++;
4483 while (1) { 3987 while (1) {
4484 save_flags(flags); 3988 local_irq_save(flags);
4485 cli();
4486 /* assert RTS and DTR */ 3989 /* assert RTS and DTR */
4487 e100_rts(info, 1); 3990 e100_rts(info, 1);
4488 e100_dtr(info, 1); 3991 e100_dtr(info, 1);
4489 restore_flags(flags); 3992 local_irq_restore(flags);
4490 set_current_state(TASK_INTERRUPTIBLE); 3993 set_current_state(TASK_INTERRUPTIBLE);
4491 if (tty_hung_up_p(filp) || 3994 if (tty_hung_up_p(filp) ||
4492 !(info->flags & ASYNC_INITIALIZED)) { 3995 !(info->flags & ASYNC_INITIALIZED)) {
@@ -4528,6 +4031,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
4528 return 0; 4031 return 0;
4529} 4032}
4530 4033
4034static void
4035deinit_port(struct e100_serial *info)
4036{
4037 if (info->dma_out_enabled) {
4038 cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
4039 free_irq(info->dma_out_irq_nbr, info);
4040 }
4041 if (info->dma_in_enabled) {
4042 cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
4043 free_irq(info->dma_in_irq_nbr, info);
4044 }
4045}
4046
4531/* 4047/*
4532 * This routine is called whenever a serial port is opened. 4048 * This routine is called whenever a serial port is opened.
4533 * It performs the serial-specific initialization for the tty structure. 4049 * It performs the serial-specific initialization for the tty structure.
@@ -4538,9 +4054,9 @@ rs_open(struct tty_struct *tty, struct file * filp)
4538 struct e100_serial *info; 4054 struct e100_serial *info;
4539 int retval, line; 4055 int retval, line;
4540 unsigned long page; 4056 unsigned long page;
4057 int allocated_resources = 0;
4541 4058
4542 /* find which port we want to open */ 4059 /* find which port we want to open */
4543
4544 line = tty->index; 4060 line = tty->index;
4545 4061
4546 if (line < 0 || line >= NR_PORTS) 4062 if (line < 0 || line >= NR_PORTS)
@@ -4580,8 +4096,8 @@ rs_open(struct tty_struct *tty, struct file * filp)
4580 */ 4096 */
4581 if (tty_hung_up_p(filp) || 4097 if (tty_hung_up_p(filp) ||
4582 (info->flags & ASYNC_CLOSING)) { 4098 (info->flags & ASYNC_CLOSING)) {
4583 if (info->flags & ASYNC_CLOSING) 4099 wait_event_interruptible(info->close_wait,
4584 interruptible_sleep_on(&info->close_wait); 4100 !(info->flags & ASYNC_CLOSING));
4585#ifdef SERIAL_DO_RESTART 4101#ifdef SERIAL_DO_RESTART
4586 return ((info->flags & ASYNC_HUP_NOTIFY) ? 4102 return ((info->flags & ASYNC_HUP_NOTIFY) ?
4587 -EAGAIN : -ERESTARTSYS); 4103 -EAGAIN : -ERESTARTSYS);
@@ -4591,12 +4107,85 @@ rs_open(struct tty_struct *tty, struct file * filp)
4591 } 4107 }
4592 4108
4593 /* 4109 /*
4110 * If DMA is enabled try to allocate the irq's.
4111 */
4112 if (info->count == 1) {
4113 allocated_resources = 1;
4114 if (info->dma_in_enabled) {
4115 if (request_irq(info->dma_in_irq_nbr,
4116 rec_interrupt,
4117 info->dma_in_irq_flags,
4118 info->dma_in_irq_description,
4119 info)) {
4120 printk(KERN_WARNING "DMA irq '%s' busy; "
4121 "falling back to non-DMA mode\n",
4122 info->dma_in_irq_description);
4123 /* Make sure we never try to use DMA in */
4124 /* for the port again. */
4125 info->dma_in_enabled = 0;
4126 } else if (cris_request_dma(info->dma_in_nbr,
4127 info->dma_in_irq_description,
4128 DMA_VERBOSE_ON_ERROR,
4129 info->dma_owner)) {
4130 free_irq(info->dma_in_irq_nbr, info);
4131 printk(KERN_WARNING "DMA '%s' busy; "
4132 "falling back to non-DMA mode\n",
4133 info->dma_in_irq_description);
4134 /* Make sure we never try to use DMA in */
4135 /* for the port again. */
4136 info->dma_in_enabled = 0;
4137 }
4138#ifdef SERIAL_DEBUG_OPEN
4139 else
4140 printk(KERN_DEBUG "DMA irq '%s' allocated\n",
4141 info->dma_in_irq_description);
4142#endif
4143 }
4144 if (info->dma_out_enabled) {
4145 if (request_irq(info->dma_out_irq_nbr,
4146 tr_interrupt,
4147 info->dma_out_irq_flags,
4148 info->dma_out_irq_description,
4149 info)) {
4150 printk(KERN_WARNING "DMA irq '%s' busy; "
4151 "falling back to non-DMA mode\n",
4152 info->dma_out_irq_description);
4153 /* Make sure we never try to use DMA out */
4154 /* for the port again. */
4155 info->dma_out_enabled = 0;
4156 } else if (cris_request_dma(info->dma_out_nbr,
4157 info->dma_out_irq_description,
4158 DMA_VERBOSE_ON_ERROR,
4159 info->dma_owner)) {
4160 free_irq(info->dma_out_irq_nbr, info);
4161 printk(KERN_WARNING "DMA '%s' busy; "
4162 "falling back to non-DMA mode\n",
4163 info->dma_out_irq_description);
4164 /* Make sure we never try to use DMA out */
4165 /* for the port again. */
4166 info->dma_out_enabled = 0;
4167 }
4168#ifdef SERIAL_DEBUG_OPEN
4169 else
4170 printk(KERN_DEBUG "DMA irq '%s' allocated\n",
4171 info->dma_out_irq_description);
4172#endif
4173 }
4174 }
4175
4176 /*
4594 * Start up the serial port 4177 * Start up the serial port
4595 */ 4178 */
4596 4179
4597 retval = startup(info); 4180 retval = startup(info);
4598 if (retval) 4181 if (retval) {
4182 if (allocated_resources)
4183 deinit_port(info);
4184
4185 /* FIXME Decrease count info->count here too? */
4599 return retval; 4186 return retval;
4187 }
4188
4600 4189
4601 retval = block_til_ready(tty, filp, info); 4190 retval = block_til_ready(tty, filp, info);
4602 if (retval) { 4191 if (retval) {
@@ -4604,6 +4193,9 @@ rs_open(struct tty_struct *tty, struct file * filp)
4604 printk("rs_open returning after block_til_ready with %d\n", 4193 printk("rs_open returning after block_til_ready with %d\n",
4605 retval); 4194 retval);
4606#endif 4195#endif
4196 if (allocated_resources)
4197 deinit_port(info);
4198
4607 return retval; 4199 return retval;
4608 } 4200 }
4609 4201
@@ -4793,6 +4385,8 @@ static const struct tty_operations rs_ops = {
4793 .send_xchar = rs_send_xchar, 4385 .send_xchar = rs_send_xchar,
4794 .wait_until_sent = rs_wait_until_sent, 4386 .wait_until_sent = rs_wait_until_sent,
4795 .read_proc = rs_read_proc, 4387 .read_proc = rs_read_proc,
4388 .tiocmget = rs_tiocmget,
4389 .tiocmset = rs_tiocmset
4796}; 4390};
4797 4391
4798static int __init 4392static int __init
@@ -4810,9 +4404,27 @@ rs_init(void)
4810 /* Setup the timed flush handler system */ 4404 /* Setup the timed flush handler system */
4811 4405
4812#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER) 4406#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
4813 init_timer(&flush_timer); 4407 setup_timer(&flush_timer, timed_flush_handler, 0);
4814 flush_timer.function = timed_flush_handler; 4408 mod_timer(&flush_timer, jiffies + 5);
4815 mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS); 4409#endif
4410
4411#if defined(CONFIG_ETRAX_RS485)
4412#if defined(CONFIG_ETRAX_RS485_ON_PA)
4413 if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
4414 rs485_pa_bit)) {
4415 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4416 "RS485 pin\n");
4417 return -EBUSY;
4418 }
4419#endif
4420#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
4421 if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
4422 rs485_port_g_bit)) {
4423 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
4424 "RS485 pin\n");
4425 return -EBUSY;
4426 }
4427#endif
4816#endif 4428#endif
4817 4429
4818 /* Initialize the tty_driver structure */ 4430 /* Initialize the tty_driver structure */
@@ -4839,6 +4451,16 @@ rs_init(void)
4839 /* do some initializing for the separate ports */ 4451 /* do some initializing for the separate ports */
4840 4452
4841 for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) { 4453 for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
4454 if (info->enabled) {
4455 if (cris_request_io_interface(info->io_if,
4456 info->io_if_description)) {
4457 printk(KERN_CRIT "ETRAX100LX async serial: "
4458 "Could not allocate IO pins for "
4459 "%s, port %d\n",
4460 info->io_if_description, i);
4461 info->enabled = 0;
4462 }
4463 }
4842 info->uses_dma_in = 0; 4464 info->uses_dma_in = 0;
4843 info->uses_dma_out = 0; 4465 info->uses_dma_out = 0;
4844 info->line = i; 4466 info->line = i;
@@ -4872,7 +4494,7 @@ rs_init(void)
4872 info->rs485.delay_rts_before_send = 0; 4494 info->rs485.delay_rts_before_send = 0;
4873 info->rs485.enabled = 0; 4495 info->rs485.enabled = 0;
4874#endif 4496#endif
4875 INIT_WORK(&info->work, do_softint, info); 4497 INIT_WORK(&info->work, do_softint);
4876 4498
4877 if (info->enabled) { 4499 if (info->enabled) {
4878 printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n", 4500 printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n",
@@ -4890,64 +4512,17 @@ rs_init(void)
4890#endif 4512#endif
4891 4513
4892#ifndef CONFIG_SVINTO_SIM 4514#ifndef CONFIG_SVINTO_SIM
4515#ifndef CONFIG_ETRAX_KGDB
4893 /* Not needed in simulator. May only complicate stuff. */ 4516 /* Not needed in simulator. May only complicate stuff. */
4894 /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */ 4517 /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
4895 4518
4896 if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", NULL)) 4519 if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
4897 panic("irq8"); 4520 IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
4898 4521 panic("%s: Failed to request irq8", __FUNCTION__);
4899#ifdef CONFIG_ETRAX_SERIAL_PORT0
4900#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
4901 if (request_irq(SER0_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 0 dma tr", NULL))
4902 panic("irq22");
4903#endif
4904#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
4905 if (request_irq(SER0_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 0 dma rec", NULL))
4906 panic("irq23");
4907#endif
4908#endif
4909
4910#ifdef CONFIG_ETRAX_SERIAL_PORT1
4911#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
4912 if (request_irq(SER1_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 1 dma tr", NULL))
4913 panic("irq24");
4914#endif
4915#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
4916 if (request_irq(SER1_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 1 dma rec", NULL))
4917 panic("irq25");
4918#endif
4919#endif
4920#ifdef CONFIG_ETRAX_SERIAL_PORT2
4921 /* DMA Shared with par0 (and SCSI0 and ATA) */
4922#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
4923 if (request_irq(SER2_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma tr", NULL))
4924 panic("irq18");
4925#endif
4926#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
4927 if (request_irq(SER2_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma rec", NULL))
4928 panic("irq19");
4929#endif
4930#endif
4931#ifdef CONFIG_ETRAX_SERIAL_PORT3
4932 /* DMA Shared with par1 (and SCSI1 and Extern DMA 0) */
4933#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
4934 if (request_irq(SER3_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma tr", NULL))
4935 panic("irq20");
4936#endif
4937#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
4938 if (request_irq(SER3_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma rec", NULL))
4939 panic("irq21");
4940#endif
4941#endif
4942 4522
4943#ifdef CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
4944 if (request_irq(TIMER1_IRQ_NBR, timeout_interrupt, IRQF_SHARED | IRQF_DISABLED,
4945 "fast serial dma timeout", NULL)) {
4946 printk(KERN_CRIT "err: timer1 irq\n");
4947 }
4948#endif 4523#endif
4949#endif /* CONFIG_SVINTO_SIM */ 4524#endif /* CONFIG_SVINTO_SIM */
4950 debug_write_function = rs_debug_write_function; 4525
4951 return 0; 4526 return 0;
4952} 4527}
4953 4528
diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h
new file mode 100644
index 000000000000..ccd0f32b7372
--- /dev/null
+++ b/drivers/serial/crisv10.h
@@ -0,0 +1,146 @@
1/*
2 * serial.h: Arch-dep definitions for the Etrax100 serial driver.
3 *
4 * Copyright (C) 1998-2007 Axis Communications AB
5 */
6
7#ifndef _ETRAX_SERIAL_H
8#define _ETRAX_SERIAL_H
9
10#include <linux/circ_buf.h>
11#include <asm/termios.h>
12#include <asm/dma.h>
13#include <asm/arch/io_interface_mux.h>
14
15/* Software state per channel */
16
17#ifdef __KERNEL__
18/*
19 * This is our internal structure for each serial port's state.
20 *
21 * Many fields are paralleled by the structure used by the serial_struct
22 * structure.
23 *
24 * For definitions of the flags field, see tty.h
25 */
26
27#define SERIAL_RECV_DESCRIPTORS 8
28
29struct etrax_recv_buffer {
30 struct etrax_recv_buffer *next;
31 unsigned short length;
32 unsigned char error;
33 unsigned char pad;
34
35 unsigned char buffer[0];
36};
37
38struct e100_serial {
39 int baud;
40 volatile u8 *port; /* R_SERIALx_CTRL */
41 u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */
42
43 /* Output registers */
44 volatile u8 *oclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
45 volatile u32 *ofirstadr; /* adr to R_DMA_CHx_FIRST */
46 volatile u8 *ocmdadr; /* adr to R_DMA_CHx_CMD */
47 const volatile u8 *ostatusadr; /* adr to R_DMA_CHx_STATUS */
48
49 /* Input registers */
50 volatile u8 *iclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
51 volatile u32 *ifirstadr; /* adr to R_DMA_CHx_FIRST */
52 volatile u8 *icmdadr; /* adr to R_DMA_CHx_CMD */
53 volatile u32 *idescradr; /* adr to R_DMA_CHx_DESCR */
54
55 int flags; /* defined in tty.h */
56
57 u8 rx_ctrl; /* shadow for R_SERIALx_REC_CTRL */
58 u8 tx_ctrl; /* shadow for R_SERIALx_TR_CTRL */
59 u8 iseteop; /* bit number for R_SET_EOP for the input dma */
60 int enabled; /* Set to 1 if the port is enabled in HW config */
61
62 u8 dma_out_enabled; /* Set to 1 if DMA should be used */
63 u8 dma_in_enabled; /* Set to 1 if DMA should be used */
64
65 /* end of fields defined in rs_table[] in .c-file */
66 int dma_owner;
67 unsigned int dma_in_nbr;
68 unsigned int dma_out_nbr;
69 unsigned int dma_in_irq_nbr;
70 unsigned int dma_out_irq_nbr;
71 unsigned long dma_in_irq_flags;
72 unsigned long dma_out_irq_flags;
73 char *dma_in_irq_description;
74 char *dma_out_irq_description;
75
76 enum cris_io_interface io_if;
77 char *io_if_description;
78
79 u8 uses_dma_in; /* Set to 1 if DMA is used */
80 u8 uses_dma_out; /* Set to 1 if DMA is used */
81 u8 forced_eop; /* a fifo eop has been forced */
82 int baud_base; /* For special baudrates */
83 int custom_divisor; /* For special baudrates */
84 struct etrax_dma_descr tr_descr;
85 struct etrax_dma_descr rec_descr[SERIAL_RECV_DESCRIPTORS];
86 int cur_rec_descr;
87
88 volatile int tr_running; /* 1 if output is running */
89
90 struct tty_struct *tty;
91 int read_status_mask;
92 int ignore_status_mask;
93 int x_char; /* xon/xoff character */
94 int close_delay;
95 unsigned short closing_wait;
96 unsigned short closing_wait2;
97 unsigned long event;
98 unsigned long last_active;
99 int line;
100 int type; /* PORT_ETRAX */
101 int count; /* # of fd on device */
102 int blocked_open; /* # of blocked opens */
103 struct circ_buf xmit;
104 struct etrax_recv_buffer *first_recv_buffer;
105 struct etrax_recv_buffer *last_recv_buffer;
106 unsigned int recv_cnt;
107 unsigned int max_recv_cnt;
108
109 struct work_struct work;
110 struct async_icount icount; /* error-statistics etc.*/
111 struct ktermios normal_termios;
112 struct ktermios callout_termios;
113 wait_queue_head_t open_wait;
114 wait_queue_head_t close_wait;
115
116 unsigned long char_time_usec; /* The time for 1 char, in usecs */
117 unsigned long flush_time_usec; /* How often we should flush */
118 unsigned long last_tx_active_usec; /* Last tx usec in the jiffies */
119 unsigned long last_tx_active; /* Last tx time in jiffies */
120 unsigned long last_rx_active_usec; /* Last rx usec in the jiffies */
121 unsigned long last_rx_active; /* Last rx time in jiffies */
122
123 int break_detected_cnt;
124 int errorcode;
125
126#ifdef CONFIG_ETRAX_RS485
127 struct rs485_control rs485; /* RS-485 support */
128#endif
129};
130
131/* this PORT is not in the standard serial.h. it's not actually used for
132 * anything since we only have one type of async serial-port anyway in this
133 * system.
134 */
135
136#define PORT_ETRAX 1
137
138/*
139 * Events are used to schedule things to happen at timer-interrupt
140 * time, instead of at rs interrupt time.
141 */
142#define RS_EVENT_WRITE_WAKEUP 0
143
144#endif /* __KERNEL__ */
145
146#endif /* !_ETRAX_SERIAL_H */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 89769ce16f88..b31f4431849b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -457,10 +457,11 @@ done:
457EXPORT_SYMBOL_GPL(spi_register_master); 457EXPORT_SYMBOL_GPL(spi_register_master);
458 458
459 459
460static int __unregister(struct device *dev, void *unused) 460static int __unregister(struct device *dev, void *master_dev)
461{ 461{
462 /* note: before about 2.6.14-rc1 this would corrupt memory: */ 462 /* note: before about 2.6.14-rc1 this would corrupt memory: */
463 spi_unregister_device(to_spi_device(dev)); 463 if (dev != master_dev)
464 spi_unregister_device(to_spi_device(dev));
464 return 0; 465 return 0;
465} 466}
466 467
@@ -478,7 +479,8 @@ void spi_unregister_master(struct spi_master *master)
478{ 479{
479 int dummy; 480 int dummy;
480 481
481 dummy = device_for_each_child(master->dev.parent, NULL, __unregister); 482 dummy = device_for_each_child(master->dev.parent, &master->dev,
483 __unregister);
482 device_unregister(&master->dev); 484 device_unregister(&master->dev);
483} 485}
484EXPORT_SYMBOL_GPL(spi_unregister_master); 486EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index cc5094f37dd3..363ac8e68821 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -24,6 +24,7 @@
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/io.h>
27#include <asm/gpio.h> 28#include <asm/gpio.h>
28 29
29 30
@@ -74,7 +75,6 @@ struct txx9spi {
74 struct list_head queue; 75 struct list_head queue;
75 wait_queue_head_t waitq; 76 wait_queue_head_t waitq;
76 void __iomem *membase; 77 void __iomem *membase;
77 int irq;
78 int baseclk; 78 int baseclk;
79 struct clk *clk; 79 struct clk *clk;
80 u32 max_speed_hz, min_speed_hz; 80 u32 max_speed_hz, min_speed_hz;
@@ -350,12 +350,12 @@ static int __init txx9spi_probe(struct platform_device *dev)
350 struct resource *res; 350 struct resource *res;
351 int ret = -ENODEV; 351 int ret = -ENODEV;
352 u32 mcr; 352 u32 mcr;
353 int irq;
353 354
354 master = spi_alloc_master(&dev->dev, sizeof(*c)); 355 master = spi_alloc_master(&dev->dev, sizeof(*c));
355 if (!master) 356 if (!master)
356 return ret; 357 return ret;
357 c = spi_master_get_devdata(master); 358 c = spi_master_get_devdata(master);
358 c->irq = -1;
359 platform_set_drvdata(dev, master); 359 platform_set_drvdata(dev, master);
360 360
361 INIT_WORK(&c->work, txx9spi_work); 361 INIT_WORK(&c->work, txx9spi_work);
@@ -381,32 +381,36 @@ static int __init txx9spi_probe(struct platform_device *dev)
381 381
382 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 382 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
383 if (!res) 383 if (!res)
384 goto exit; 384 goto exit_busy;
385 c->membase = ioremap(res->start, res->end - res->start + 1); 385 if (!devm_request_mem_region(&dev->dev,
386 res->start, res->end - res->start + 1,
387 "spi_txx9"))
388 goto exit_busy;
389 c->membase = devm_ioremap(&dev->dev,
390 res->start, res->end - res->start + 1);
386 if (!c->membase) 391 if (!c->membase)
387 goto exit; 392 goto exit_busy;
388 393
389 /* enter config mode */ 394 /* enter config mode */
390 mcr = txx9spi_rd(c, TXx9_SPMCR); 395 mcr = txx9spi_rd(c, TXx9_SPMCR);
391 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); 396 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
392 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); 397 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
393 398
394 c->irq = platform_get_irq(dev, 0); 399 irq = platform_get_irq(dev, 0);
395 if (c->irq < 0) 400 if (irq < 0)
396 goto exit; 401 goto exit_busy;
397 ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c); 402 ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0,
398 if (ret) { 403 "spi_txx9", c);
399 c->irq = -1; 404 if (ret)
400 goto exit; 405 goto exit;
401 }
402 406
403 c->workqueue = create_singlethread_workqueue(master->dev.parent->bus_id); 407 c->workqueue = create_singlethread_workqueue(master->dev.parent->bus_id);
404 if (!c->workqueue) 408 if (!c->workqueue)
405 goto exit; 409 goto exit_busy;
406 c->last_chipselect = -1; 410 c->last_chipselect = -1;
407 411
408 dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", 412 dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
409 (unsigned long long)res->start, c->irq, 413 (unsigned long long)res->start, irq,
410 (c->baseclk + 500000) / 1000000); 414 (c->baseclk + 500000) / 1000000);
411 415
412 master->bus_num = dev->id; 416 master->bus_num = dev->id;
@@ -418,13 +422,11 @@ static int __init txx9spi_probe(struct platform_device *dev)
418 if (ret) 422 if (ret)
419 goto exit; 423 goto exit;
420 return 0; 424 return 0;
425exit_busy:
426 ret = -EBUSY;
421exit: 427exit:
422 if (c->workqueue) 428 if (c->workqueue)
423 destroy_workqueue(c->workqueue); 429 destroy_workqueue(c->workqueue);
424 if (c->irq >= 0)
425 free_irq(c->irq, c);
426 if (c->membase)
427 iounmap(c->membase);
428 if (c->clk) { 430 if (c->clk) {
429 clk_disable(c->clk); 431 clk_disable(c->clk);
430 clk_put(c->clk); 432 clk_put(c->clk);
@@ -442,8 +444,6 @@ static int __exit txx9spi_remove(struct platform_device *dev)
442 spi_unregister_master(master); 444 spi_unregister_master(master);
443 platform_set_drvdata(dev, NULL); 445 platform_set_drvdata(dev, NULL);
444 destroy_workqueue(c->workqueue); 446 destroy_workqueue(c->workqueue);
445 free_irq(c->irq, c);
446 iounmap(c->membase);
447 clk_disable(c->clk); 447 clk_disable(c->clk);
448 clk_put(c->clk); 448 clk_put(c->clk);
449 spi_master_put(master); 449 spi_master_put(master);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index 6da58ca48b33..455991fbe28f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -107,8 +107,11 @@ static ssize_t tle62x0_status_show(struct device *dev,
107 107
108 mutex_lock(&st->lock); 108 mutex_lock(&st->lock);
109 ret = tle62x0_read(st); 109 ret = tle62x0_read(st);
110
111 dev_dbg(dev, "tle62x0_read() returned %d\n", ret); 110 dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
111 if (ret < 0) {
112 mutex_unlock(&st->lock);
113 return ret;
114 }
112 115
113 for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) { 116 for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
114 fault <<= 8; 117 fault <<= 8;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 6bfdba6a213f..1f7ab15df36d 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1215,20 +1215,18 @@ static int keyspan_chars_in_buffer (struct usb_serial_port *port)
1215 1215
1216static int keyspan_open (struct usb_serial_port *port, struct file *filp) 1216static int keyspan_open (struct usb_serial_port *port, struct file *filp)
1217{ 1217{
1218 struct keyspan_port_private *p_priv; 1218 struct keyspan_port_private *p_priv;
1219 struct keyspan_serial_private *s_priv; 1219 struct keyspan_serial_private *s_priv;
1220 struct usb_serial *serial = port->serial; 1220 struct usb_serial *serial = port->serial;
1221 const struct keyspan_device_details *d_details; 1221 const struct keyspan_device_details *d_details;
1222 int i, err; 1222 int i, err;
1223 int baud_rate, device_port;
1224 struct urb *urb; 1223 struct urb *urb;
1225 unsigned int cflag;
1226 1224
1227 s_priv = usb_get_serial_data(serial); 1225 s_priv = usb_get_serial_data(serial);
1228 p_priv = usb_get_serial_port_data(port); 1226 p_priv = usb_get_serial_port_data(port);
1229 d_details = p_priv->device_details; 1227 d_details = p_priv->device_details;
1230 1228
1231 dbg("%s - port%d.", __FUNCTION__, port->number); 1229 dbg("%s - port%d.", __FUNCTION__, port->number);
1232 1230
1233 /* Set some sane defaults */ 1231 /* Set some sane defaults */
1234 p_priv->rts_state = 1; 1232 p_priv->rts_state = 1;
@@ -1249,7 +1247,7 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp)
1249 urb->dev = serial->dev; 1247 urb->dev = serial->dev;
1250 1248
1251 /* make sure endpoint data toggle is synchronized with the device */ 1249 /* make sure endpoint data toggle is synchronized with the device */
1252 1250
1253 usb_clear_halt(urb->dev, urb->pipe); 1251 usb_clear_halt(urb->dev, urb->pipe);
1254 1252
1255 if ((err = usb_submit_urb(urb, GFP_KERNEL)) != 0) { 1253 if ((err = usb_submit_urb(urb, GFP_KERNEL)) != 0) {
@@ -1265,30 +1263,6 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp)
1265 /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), 0); */ 1263 /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), 0); */
1266 } 1264 }
1267 1265
1268 /* get the terminal config for the setup message now so we don't
1269 * need to send 2 of them */
1270
1271 cflag = port->tty->termios->c_cflag;
1272 device_port = port->number - port->serial->minor;
1273
1274 /* Baud rate calculation takes baud rate as an integer
1275 so other rates can be generated if desired. */
1276 baud_rate = tty_get_baud_rate(port->tty);
1277 /* If no match or invalid, leave as default */
1278 if (baud_rate >= 0
1279 && d_details->calculate_baud_rate(baud_rate, d_details->baudclk,
1280 NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
1281 p_priv->baud = baud_rate;
1282 }
1283
1284 /* set CTS/RTS handshake etc. */
1285 p_priv->cflag = cflag;
1286 p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none;
1287
1288 keyspan_send_setup(port, 1);
1289 //mdelay(100);
1290 //keyspan_set_termios(port, NULL);
1291
1292 return (0); 1266 return (0);
1293} 1267}
1294 1268
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index cc4b60f899ca..7d86e9eae915 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -503,7 +503,7 @@ config FB_VALKYRIE
503 503
504config FB_CT65550 504config FB_CT65550
505 bool "Chips 65550 display support" 505 bool "Chips 65550 display support"
506 depends on (FB = y) && PPC32 506 depends on (FB = y) && PPC32 && PCI
507 select FB_CFB_FILLRECT 507 select FB_CFB_FILLRECT
508 select FB_CFB_COPYAREA 508 select FB_CFB_COPYAREA
509 select FB_CFB_IMAGEBLIT 509 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index b9b572b293d4..2e552d5bbb5d 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -183,8 +183,8 @@ static struct fb_videomode default_mode_LCD __initdata = {
183 .vmode = FB_VMODE_NONINTERLACED, 183 .vmode = FB_VMODE_NONINTERLACED,
184}; 184};
185 185
186struct fb_videomode *default_mode = &default_mode_CRT; 186struct fb_videomode *default_mode __initdata = &default_mode_CRT;
187struct fb_var_screeninfo *default_var = &default_var_CRT; 187struct fb_var_screeninfo *default_var __initdata = &default_var_CRT;
188 188
189static int flat_panel_enabled = 0; 189static int flat_panel_enabled = 0;
190 190
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 6c227f9592a5..ca13c48d19b0 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -33,7 +33,7 @@ void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
33 33
34#define MSR_LX_GLD_CONFIG 0x48002001 34#define MSR_LX_GLD_CONFIG 0x48002001
35#define MSR_LX_GLCP_DOTPLL 0x4c000015 35#define MSR_LX_GLCP_DOTPLL 0x4c000015
36#define MSR_LX_DF_PADSEL 0x48000011 36#define MSR_LX_DF_PADSEL 0x48002011
37#define MSR_LX_DC_SPARE 0x80000011 37#define MSR_LX_DC_SPARE 0x80000011
38#define MSR_LX_DF_GLCONFIG 0x48002001 38#define MSR_LX_DF_GLCONFIG 0x48002001
39 39
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index b3463ddcfd60..75836aa83191 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -727,7 +727,7 @@ static int ps3fb_blank(int blank, struct fb_info *info)
727 727
728static int ps3fb_get_vblank(struct fb_vblank *vblank) 728static int ps3fb_get_vblank(struct fb_vblank *vblank)
729{ 729{
730 memset(vblank, 0, sizeof(&vblank)); 730 memset(vblank, 0, sizeof(*vblank));
731 vblank->flags = FB_VBLANK_HAVE_VSYNC; 731 vblank->flags = FB_VBLANK_HAVE_VSYNC;
732 return 0; 732 return 0;
733} 733}
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index a5333c190789..b829dc7c5edf 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -540,7 +540,7 @@ s1d13xxxfb_probe(struct platform_device *pdev)
540 int ret = 0; 540 int ret = 0;
541 u8 revision; 541 u8 revision;
542 542
543 dbg("probe called: device is %p\n", dev); 543 dbg("probe called: device is %p\n", pdev);
544 544
545 printk(KERN_INFO "Epson S1D13XXX FB Driver\n"); 545 printk(KERN_INFO "Epson S1D13XXX FB Driver\n");
546 546
@@ -753,8 +753,11 @@ static struct platform_driver s1d13xxxfb_driver = {
753static int __init 753static int __init
754s1d13xxxfb_init(void) 754s1d13xxxfb_init(void)
755{ 755{
756
757#ifndef MODULE
756 if (fb_get_options("s1d13xxxfb", NULL)) 758 if (fb_get_options("s1d13xxxfb", NULL))
757 return -ENODEV; 759 return -ENODEV;
760#endif
758 761
759 return platform_driver_register(&s1d13xxxfb_driver); 762 return platform_driver_register(&s1d13xxxfb_driver);
760} 763}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index bc7d23683735..37bd24b8d83b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1248,7 +1248,6 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in
1248 if(found_mode) { 1248 if(found_mode) {
1249 ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo, 1249 ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo,
1250 ivideo->sisfb_mode_idx, ivideo->currentvbflags); 1250 ivideo->sisfb_mode_idx, ivideo->currentvbflags);
1251 ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
1252 } else { 1251 } else {
1253 ivideo->sisfb_mode_idx = -1; 1252 ivideo->sisfb_mode_idx = -1;
1254 } 1253 }
@@ -1260,6 +1259,8 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in
1260 return -EINVAL; 1259 return -EINVAL;
1261 } 1260 }
1262 1261
1262 ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
1263
1263 if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) { 1264 if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) {
1264 ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx; 1265 ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx;
1265 ivideo->refresh_rate = 60; 1266 ivideo->refresh_rate = 60;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index b983d262ab78..d1d6c0facd54 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -926,8 +926,10 @@ static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count,
926 int start, struct fb_info *info) 926 int start, struct fb_info *info)
927{ 927{
928 struct uvesafb_ktask *task; 928 struct uvesafb_ktask *task;
929#ifdef CONFIG_X86
929 struct uvesafb_par *par = info->par; 930 struct uvesafb_par *par = info->par;
930 int i = par->mode_idx; 931 int i = par->mode_idx;
932#endif
931 int err = 0; 933 int err = 0;
932 934
933 /* 935 /*
@@ -1103,11 +1105,11 @@ static int uvesafb_pan_display(struct fb_var_screeninfo *var,
1103 1105
1104static int uvesafb_blank(int blank, struct fb_info *info) 1106static int uvesafb_blank(int blank, struct fb_info *info)
1105{ 1107{
1106 struct uvesafb_par *par = info->par;
1107 struct uvesafb_ktask *task; 1108 struct uvesafb_ktask *task;
1108 int err = 1; 1109 int err = 1;
1109
1110#ifdef CONFIG_X86 1110#ifdef CONFIG_X86
1111 struct uvesafb_par *par = info->par;
1112
1111 if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) { 1113 if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) {
1112 int loop = 10000; 1114 int loop = 10000;
1113 u8 seq = 0, crtc17 = 0; 1115 u8 seq = 0, crtc17 = 0;
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 299e274d241a..b63b5e044a4c 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -233,7 +233,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
233{ 233{
234 int count, err; 234 int count, err;
235 235
236 memset(st, 0, sizeof(st)); 236 memset(st, 0, sizeof(*st));
237 237
238 count = 0; 238 count = 0;
239 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100); 239 err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);