diff options
Diffstat (limited to 'drivers')
70 files changed, 3961 insertions, 999 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index f12898d53078..e99471d3232b 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -8,6 +8,7 @@ obj-y += power/ | |||
8 | obj-$(CONFIG_FW_LOADER) += firmware_class.o | 8 | obj-$(CONFIG_FW_LOADER) += firmware_class.o |
9 | obj-$(CONFIG_NUMA) += node.o | 9 | obj-$(CONFIG_NUMA) += node.o |
10 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o | 10 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o |
11 | obj-$(CONFIG_SMP) += topology.o | ||
11 | 12 | ||
12 | ifeq ($(CONFIG_DEBUG_DRIVER),y) | 13 | ifeq ($(CONFIG_DEBUG_DRIVER),y) |
13 | EXTRA_CFLAGS += -DDEBUG | 14 | EXTRA_CFLAGS += -DDEBUG |
diff --git a/drivers/base/topology.c b/drivers/base/topology.c new file mode 100644 index 000000000000..915810f6237e --- /dev/null +++ b/drivers/base/topology.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * driver/base/topology.c - Populate sysfs with cpu topology information | ||
3 | * | ||
4 | * Written by: Zhang Yanmin, Intel Corporation | ||
5 | * | ||
6 | * Copyright (C) 2006, Intel Corp. | ||
7 | * | ||
8 | * All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | */ | ||
26 | #include <linux/sysdev.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/cpu.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/topology.h> | ||
32 | |||
33 | #define define_one_ro(_name) \ | ||
34 | static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) | ||
35 | |||
36 | #define define_id_show_func(name) \ | ||
37 | static ssize_t show_##name(struct sys_device *dev, char *buf) \ | ||
38 | { \ | ||
39 | unsigned int cpu = dev->id; \ | ||
40 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ | ||
41 | } | ||
42 | |||
43 | #define define_siblings_show_func(name) \ | ||
44 | static ssize_t show_##name(struct sys_device *dev, char *buf) \ | ||
45 | { \ | ||
46 | ssize_t len = -1; \ | ||
47 | unsigned int cpu = dev->id; \ | ||
48 | len = cpumask_scnprintf(buf, NR_CPUS+1, topology_##name(cpu)); \ | ||
49 | return (len + sprintf(buf + len, "\n")); \ | ||
50 | } | ||
51 | |||
52 | #ifdef topology_physical_package_id | ||
53 | define_id_show_func(physical_package_id); | ||
54 | define_one_ro(physical_package_id); | ||
55 | #define ref_physical_package_id_attr &attr_physical_package_id.attr, | ||
56 | #else | ||
57 | #define ref_physical_package_id_attr | ||
58 | #endif | ||
59 | |||
60 | #ifdef topology_core_id | ||
61 | define_id_show_func(core_id); | ||
62 | define_one_ro(core_id); | ||
63 | #define ref_core_id_attr &attr_core_id.attr, | ||
64 | #else | ||
65 | #define ref_core_id_attr | ||
66 | #endif | ||
67 | |||
68 | #ifdef topology_thread_siblings | ||
69 | define_siblings_show_func(thread_siblings); | ||
70 | define_one_ro(thread_siblings); | ||
71 | #define ref_thread_siblings_attr &attr_thread_siblings.attr, | ||
72 | #else | ||
73 | #define ref_thread_siblings_attr | ||
74 | #endif | ||
75 | |||
76 | #ifdef topology_core_siblings | ||
77 | define_siblings_show_func(core_siblings); | ||
78 | define_one_ro(core_siblings); | ||
79 | #define ref_core_siblings_attr &attr_core_siblings.attr, | ||
80 | #else | ||
81 | #define ref_core_siblings_attr | ||
82 | #endif | ||
83 | |||
84 | static struct attribute *default_attrs[] = { | ||
85 | ref_physical_package_id_attr | ||
86 | ref_core_id_attr | ||
87 | ref_thread_siblings_attr | ||
88 | ref_core_siblings_attr | ||
89 | NULL | ||
90 | }; | ||
91 | |||
92 | static struct attribute_group topology_attr_group = { | ||
93 | .attrs = default_attrs, | ||
94 | .name = "topology" | ||
95 | }; | ||
96 | |||
97 | /* Add/Remove cpu_topology interface for CPU device */ | ||
98 | static int __cpuinit topology_add_dev(struct sys_device * sys_dev) | ||
99 | { | ||
100 | sysfs_create_group(&sys_dev->kobj, &topology_attr_group); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int __cpuinit topology_remove_dev(struct sys_device * sys_dev) | ||
105 | { | ||
106 | sysfs_remove_group(&sys_dev->kobj, &topology_attr_group); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, | ||
111 | unsigned long action, void *hcpu) | ||
112 | { | ||
113 | unsigned int cpu = (unsigned long)hcpu; | ||
114 | struct sys_device *sys_dev; | ||
115 | |||
116 | sys_dev = get_cpu_sysdev(cpu); | ||
117 | switch (action) { | ||
118 | case CPU_ONLINE: | ||
119 | topology_add_dev(sys_dev); | ||
120 | break; | ||
121 | case CPU_DEAD: | ||
122 | topology_remove_dev(sys_dev); | ||
123 | break; | ||
124 | } | ||
125 | return NOTIFY_OK; | ||
126 | } | ||
127 | |||
128 | static struct notifier_block topology_cpu_notifier = | ||
129 | { | ||
130 | .notifier_call = topology_cpu_callback, | ||
131 | }; | ||
132 | |||
133 | static int __cpuinit topology_sysfs_init(void) | ||
134 | { | ||
135 | int i; | ||
136 | |||
137 | for_each_online_cpu(i) { | ||
138 | topology_cpu_callback(&topology_cpu_notifier, CPU_ONLINE, | ||
139 | (void *)(long)i); | ||
140 | } | ||
141 | |||
142 | register_cpu_notifier(&topology_cpu_notifier); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | device_initcall(topology_sysfs_init); | ||
148 | |||
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index a3614e6a68d0..4ada1268b40d 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -882,7 +882,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i | |||
882 | card->card_number, dev->bus->number, dev->devfn); | 882 | card->card_number, dev->bus->number, dev->devfn); |
883 | 883 | ||
884 | if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) && | 884 | if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) && |
885 | !pci_set_dma_mask(dev, 0xffffffffLL)) { | 885 | pci_set_dma_mask(dev, 0xffffffffLL)) { |
886 | printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); | 886 | printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); |
887 | return -ENOMEM; | 887 | return -ENOMEM; |
888 | } | 888 | } |
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 39c61a71176e..cc7acf877dc0 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -1233,7 +1233,7 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
1233 | } | 1233 | } |
1234 | info->idle_stats.recv_idle = jiffies; | 1234 | info->idle_stats.recv_idle = jiffies; |
1235 | } | 1235 | } |
1236 | schedule_delayed_work(&tty->buf.work, 1); | 1236 | tty_schedule_flip(tty); |
1237 | } | 1237 | } |
1238 | /* end of service */ | 1238 | /* end of service */ |
1239 | cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f)); | 1239 | cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f)); |
@@ -1606,7 +1606,7 @@ cyz_handle_rx(struct cyclades_port *info, | |||
1606 | } | 1606 | } |
1607 | #endif | 1607 | #endif |
1608 | info->idle_stats.recv_idle = jiffies; | 1608 | info->idle_stats.recv_idle = jiffies; |
1609 | schedule_delayed_work(&tty->buf.work, 1); | 1609 | tty_schedule_flip(tty); |
1610 | } | 1610 | } |
1611 | /* Update rx_get */ | 1611 | /* Update rx_get */ |
1612 | cy_writel(&buf_ctrl->rx_get, new_rx_get); | 1612 | cy_writel(&buf_ctrl->rx_get, new_rx_get); |
@@ -1809,7 +1809,7 @@ cyz_handle_cmd(struct cyclades_card *cinfo) | |||
1809 | if(delta_count) | 1809 | if(delta_count) |
1810 | cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP); | 1810 | cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP); |
1811 | if(special_count) | 1811 | if(special_count) |
1812 | schedule_delayed_work(&tty->buf.work, 1); | 1812 | tty_schedule_flip(tty); |
1813 | } | 1813 | } |
1814 | } | 1814 | } |
1815 | 1815 | ||
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 3f3ac039f4d9..57539d8f9f7c 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -359,7 +359,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes) | |||
359 | } | 359 | } |
360 | } | 360 | } |
361 | 361 | ||
362 | schedule_delayed_work(&tty->buf.work, 1); | 362 | tty_schedule_flip(tty); |
363 | 363 | ||
364 | info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; | 364 | info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; |
365 | release_pio_buffer(pio_buf); | 365 | release_pio_buffer(pio_buf); |
@@ -426,7 +426,7 @@ static inline void receive_chars_dma_done(struct esp_struct *info, | |||
426 | } | 426 | } |
427 | tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag); | 427 | tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag); |
428 | } | 428 | } |
429 | schedule_delayed_work(&tty->buf.work, 1); | 429 | tty_schedule_flip(tty); |
430 | } | 430 | } |
431 | 431 | ||
432 | if (dma_bytes != num_bytes) { | 432 | if (dma_bytes != num_bytes) { |
diff --git a/drivers/char/ip2/i2cmd.c b/drivers/char/ip2/i2cmd.c index cb8f4198e9a3..e7af647800b6 100644 --- a/drivers/char/ip2/i2cmd.c +++ b/drivers/char/ip2/i2cmd.c | |||
@@ -139,7 +139,6 @@ static UCHAR ct79[] = { 2, BYP, 0x4F,0 }; // XMIT_NOW | |||
139 | //static UCHAR ct86[]={ 2, BTH, 0x56,0 }; // RCV_ENABLE | 139 | //static UCHAR ct86[]={ 2, BTH, 0x56,0 }; // RCV_ENABLE |
140 | static UCHAR ct87[] = { 1, BYP, 0x57 }; // HW_TEST | 140 | static UCHAR ct87[] = { 1, BYP, 0x57 }; // HW_TEST |
141 | //static UCHAR ct88[]={ 3, BTH, 0x58,0,0 }; // RCV_THRESHOLD | 141 | //static UCHAR ct88[]={ 3, BTH, 0x58,0,0 }; // RCV_THRESHOLD |
142 | static UCHAR ct89[]={ 1, BYP, 0x59 }; // DSS_NOW | ||
143 | //static UCHAR ct90[]={ 3, BYP, 0x5A,0,0 }; // Set SILO | 142 | //static UCHAR ct90[]={ 3, BYP, 0x5A,0,0 }; // Set SILO |
144 | //static UCHAR ct91[]={ 2, BYP, 0x5B,0 }; // timed break | 143 | //static UCHAR ct91[]={ 2, BYP, 0x5B,0 }; // timed break |
145 | 144 | ||
diff --git a/drivers/char/ip2main.c b/drivers/char/ip2main.c index 56e93a5a1e24..48fcfba37bfa 100644 --- a/drivers/char/ip2main.c +++ b/drivers/char/ip2main.c | |||
@@ -2906,65 +2906,16 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg ) | |||
2906 | rc = -EINVAL; | 2906 | rc = -EINVAL; |
2907 | break; | 2907 | break; |
2908 | case 3: // Trace device | 2908 | case 3: // Trace device |
2909 | if ( cmd == 1 ) { | 2909 | /* |
2910 | rc = put_user(iiSendPendingMail, pIndex++ ); | 2910 | * akpm: This used to write a whole bunch of function addresses |
2911 | rc = put_user(i2InitChannels, pIndex++ ); | 2911 | * to userspace, which generated lots of put_user() warnings. |
2912 | rc = put_user(i2QueueNeeds, pIndex++ ); | 2912 | * I killed it all. Just return "success" and don't do |
2913 | rc = put_user(i2QueueCommands, pIndex++ ); | 2913 | * anything. |
2914 | rc = put_user(i2GetStatus, pIndex++ ); | 2914 | */ |
2915 | rc = put_user(i2Input, pIndex++ ); | 2915 | if (cmd == 1) |
2916 | rc = put_user(i2InputFlush, pIndex++ ); | 2916 | rc = 0; |
2917 | rc = put_user(i2Output, pIndex++ ); | 2917 | else |
2918 | rc = put_user(i2FlushOutput, pIndex++ ); | ||
2919 | rc = put_user(i2DrainWakeup, pIndex++ ); | ||
2920 | rc = put_user(i2DrainOutput, pIndex++ ); | ||
2921 | rc = put_user(i2OutputFree, pIndex++ ); | ||
2922 | rc = put_user(i2StripFifo, pIndex++ ); | ||
2923 | rc = put_user(i2StuffFifoBypass, pIndex++ ); | ||
2924 | rc = put_user(i2StuffFifoFlow, pIndex++ ); | ||
2925 | rc = put_user(i2StuffFifoInline, pIndex++ ); | ||
2926 | rc = put_user(i2ServiceBoard, pIndex++ ); | ||
2927 | rc = put_user(serviceOutgoingFifo, pIndex++ ); | ||
2928 | // rc = put_user(ip2_init, pIndex++ ); | ||
2929 | rc = put_user(ip2_init_board, pIndex++ ); | ||
2930 | rc = put_user(find_eisa_board, pIndex++ ); | ||
2931 | rc = put_user(set_irq, pIndex++ ); | ||
2932 | rc = put_user(ip2_interrupt, pIndex++ ); | ||
2933 | rc = put_user(ip2_poll, pIndex++ ); | ||
2934 | rc = put_user(service_all_boards, pIndex++ ); | ||
2935 | rc = put_user(do_input, pIndex++ ); | ||
2936 | rc = put_user(do_status, pIndex++ ); | ||
2937 | #ifndef IP2DEBUG_OPEN | ||
2938 | rc = put_user(0, pIndex++ ); | ||
2939 | #else | ||
2940 | rc = put_user(open_sanity_check, pIndex++ ); | ||
2941 | #endif | ||
2942 | rc = put_user(ip2_open, pIndex++ ); | ||
2943 | rc = put_user(ip2_close, pIndex++ ); | ||
2944 | rc = put_user(ip2_hangup, pIndex++ ); | ||
2945 | rc = put_user(ip2_write, pIndex++ ); | ||
2946 | rc = put_user(ip2_putchar, pIndex++ ); | ||
2947 | rc = put_user(ip2_flush_chars, pIndex++ ); | ||
2948 | rc = put_user(ip2_write_room, pIndex++ ); | ||
2949 | rc = put_user(ip2_chars_in_buf, pIndex++ ); | ||
2950 | rc = put_user(ip2_flush_buffer, pIndex++ ); | ||
2951 | |||
2952 | //rc = put_user(ip2_wait_until_sent, pIndex++ ); | ||
2953 | rc = put_user(0, pIndex++ ); | ||
2954 | |||
2955 | rc = put_user(ip2_throttle, pIndex++ ); | ||
2956 | rc = put_user(ip2_unthrottle, pIndex++ ); | ||
2957 | rc = put_user(ip2_ioctl, pIndex++ ); | ||
2958 | rc = put_user(0, pIndex++ ); | ||
2959 | rc = put_user(get_serial_info, pIndex++ ); | ||
2960 | rc = put_user(set_serial_info, pIndex++ ); | ||
2961 | rc = put_user(ip2_set_termios, pIndex++ ); | ||
2962 | rc = put_user(ip2_set_line_discipline, pIndex++ ); | ||
2963 | rc = put_user(set_params, pIndex++ ); | ||
2964 | } else { | ||
2965 | rc = -EINVAL; | 2918 | rc = -EINVAL; |
2966 | } | ||
2967 | |||
2968 | break; | 2919 | break; |
2969 | 2920 | ||
2970 | default: | 2921 | default: |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 0097f06fa67b..d745004281d0 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -481,7 +481,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len) | |||
481 | } | 481 | } |
482 | 482 | ||
483 | if ((addr->channel == IPMI_BMC_CHANNEL) | 483 | if ((addr->channel == IPMI_BMC_CHANNEL) |
484 | || (addr->channel >= IPMI_NUM_CHANNELS) | 484 | || (addr->channel >= IPMI_MAX_CHANNELS) |
485 | || (addr->channel < 0)) | 485 | || (addr->channel < 0)) |
486 | return -EINVAL; | 486 | return -EINVAL; |
487 | 487 | ||
@@ -1321,7 +1321,7 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1321 | unsigned char ipmb_seq; | 1321 | unsigned char ipmb_seq; |
1322 | long seqid; | 1322 | long seqid; |
1323 | 1323 | ||
1324 | if (addr->channel >= IPMI_NUM_CHANNELS) { | 1324 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1325 | spin_lock_irqsave(&intf->counter_lock, flags); | 1325 | spin_lock_irqsave(&intf->counter_lock, flags); |
1326 | intf->sent_invalid_commands++; | 1326 | intf->sent_invalid_commands++; |
1327 | spin_unlock_irqrestore(&intf->counter_lock, flags); | 1327 | spin_unlock_irqrestore(&intf->counter_lock, flags); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 6ed213bd702c..e59b638766ef 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -1270,36 +1270,36 @@ static int try_init_port(int intf_num, struct smi_info **new_info) | |||
1270 | return 0; | 1270 | return 0; |
1271 | } | 1271 | } |
1272 | 1272 | ||
1273 | static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset) | 1273 | static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) |
1274 | { | 1274 | { |
1275 | return readb((io->addr)+(offset * io->regspacing)); | 1275 | return readb((io->addr)+(offset * io->regspacing)); |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static void mem_outb(struct si_sm_io *io, unsigned int offset, | 1278 | static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, |
1279 | unsigned char b) | 1279 | unsigned char b) |
1280 | { | 1280 | { |
1281 | writeb(b, (io->addr)+(offset * io->regspacing)); | 1281 | writeb(b, (io->addr)+(offset * io->regspacing)); |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset) | 1284 | static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) |
1285 | { | 1285 | { |
1286 | return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) | 1286 | return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) |
1287 | && 0xff; | 1287 | && 0xff; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | static void mem_outw(struct si_sm_io *io, unsigned int offset, | 1290 | static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, |
1291 | unsigned char b) | 1291 | unsigned char b) |
1292 | { | 1292 | { |
1293 | writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); | 1293 | writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset) | 1296 | static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) |
1297 | { | 1297 | { |
1298 | return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) | 1298 | return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) |
1299 | && 0xff; | 1299 | && 0xff; |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | static void mem_outl(struct si_sm_io *io, unsigned int offset, | 1302 | static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, |
1303 | unsigned char b) | 1303 | unsigned char b) |
1304 | { | 1304 | { |
1305 | writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); | 1305 | writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); |
@@ -1349,16 +1349,16 @@ static int mem_setup(struct smi_info *info) | |||
1349 | upon the register size. */ | 1349 | upon the register size. */ |
1350 | switch (info->io.regsize) { | 1350 | switch (info->io.regsize) { |
1351 | case 1: | 1351 | case 1: |
1352 | info->io.inputb = mem_inb; | 1352 | info->io.inputb = intf_mem_inb; |
1353 | info->io.outputb = mem_outb; | 1353 | info->io.outputb = intf_mem_outb; |
1354 | break; | 1354 | break; |
1355 | case 2: | 1355 | case 2: |
1356 | info->io.inputb = mem_inw; | 1356 | info->io.inputb = intf_mem_inw; |
1357 | info->io.outputb = mem_outw; | 1357 | info->io.outputb = intf_mem_outw; |
1358 | break; | 1358 | break; |
1359 | case 4: | 1359 | case 4: |
1360 | info->io.inputb = mem_inl; | 1360 | info->io.inputb = intf_mem_inl; |
1361 | info->io.outputb = mem_outl; | 1361 | info->io.outputb = intf_mem_outl; |
1362 | break; | 1362 | break; |
1363 | #ifdef readq | 1363 | #ifdef readq |
1364 | case 8: | 1364 | case 8: |
diff --git a/drivers/char/rio/cirrus.h b/drivers/char/rio/cirrus.h index 217ff09f2fa1..89bd94eb45be 100644 --- a/drivers/char/rio/cirrus.h +++ b/drivers/char/rio/cirrus.h | |||
@@ -40,148 +40,6 @@ | |||
40 | #endif | 40 | #endif |
41 | #define _cirrus_h 1 | 41 | #define _cirrus_h 1 |
42 | 42 | ||
43 | #ifdef RTA | ||
44 | #define TO_UART RX | ||
45 | #define TO_DRIVER TX | ||
46 | #endif | ||
47 | |||
48 | #ifdef HOST | ||
49 | #define TO_UART TX | ||
50 | #define TO_DRIVER RX | ||
51 | #endif | ||
52 | #ifdef RTA | ||
53 | /* Miscellaneous defines for CIRRUS addresses and related logic for | ||
54 | interrupts etc. | ||
55 | */ | ||
56 | #define MAP(a) ((short *)(cirrus_base + (a))) | ||
57 | #define outp(a,b) (*MAP (a) =(b)) | ||
58 | #define inp(a) ((*MAP (a)) & 0xff) | ||
59 | #define CIRRUS_FIRST (short*)0x7300 | ||
60 | #define CIRRUS_SECOND (short*)0x7200 | ||
61 | #define CIRRUS_THIRD (short*)0x7100 | ||
62 | #define CIRRUS_FOURTH (short*)0x7000 | ||
63 | #define PORTS_ON_CIRRUS 4 | ||
64 | #define CIRRUS_FIFO_SIZE 12 | ||
65 | #define SPACE 0x20 | ||
66 | #define TAB 0x09 | ||
67 | #define LINE_FEED 0x0a | ||
68 | #define CARRIAGE_RETURN 0x0d | ||
69 | #define BACKSPACE 0x08 | ||
70 | #define SPACES_IN_TABS 8 | ||
71 | #define SEND_ESCAPE 0x00 | ||
72 | #define START_BREAK 0x81 | ||
73 | #define TIMER_TICK 0x82 | ||
74 | #define STOP_BREAK 0x83 | ||
75 | #define BASE(a) ((a) < 4 ? (short*)CIRRUS_FIRST : ((a) < 8 ? (short *)CIRRUS_SECOND : ((a) < 12 ? (short*)CIRRUS_THIRD : (short *)CIRRUS_FOURTH))) | ||
76 | #define txack1 ((short *)0x7104) | ||
77 | #define rxack1 ((short *)0x7102) | ||
78 | #define mdack1 ((short *)0x7106) | ||
79 | #define txack2 ((short *)0x7006) | ||
80 | #define rxack2 ((short *)0x7004) | ||
81 | #define mdack2 ((short *)0x7100) | ||
82 | #define int_latch ((short *) 0x7800) | ||
83 | #define int_status ((short *) 0x7c00) | ||
84 | #define tx1_pending 0x20 | ||
85 | #define rx1_pending 0x10 | ||
86 | #define md1_pending 0x40 | ||
87 | #define tx2_pending 0x02 | ||
88 | #define rx2_pending 0x01 | ||
89 | #define md2_pending 0x40 | ||
90 | #define module1_bits 0x07 | ||
91 | #define module1_modern 0x08 | ||
92 | #define module2_bits 0x70 | ||
93 | #define module2_modern 0x80 | ||
94 | #define module_blank 0xf | ||
95 | #define rs232_d25 0x0 | ||
96 | #define rs232_rj45 0x1 | ||
97 | #define rs422_d25 0x3 | ||
98 | #define parallel 0x5 | ||
99 | |||
100 | #define CLK0 0x00 | ||
101 | #define CLK1 0x01 | ||
102 | #define CLK2 0x02 | ||
103 | #define CLK3 0x03 | ||
104 | #define CLK4 0x04 | ||
105 | |||
106 | #define CIRRUS_REVC 0x42 | ||
107 | #define CIRRUS_REVE 0x44 | ||
108 | |||
109 | #define TURNON 1 | ||
110 | #define TURNOFF 0 | ||
111 | |||
112 | /* The list of CIRRUS registers. | ||
113 | NB. These registers are relative values on 8 bit boundaries whereas | ||
114 | on the RTA's the CIRRUS registers are on word boundaries. Use pointer | ||
115 | arithmetic (short *) to obtain the real addresses required */ | ||
116 | #define ccr 0x05 /* Channel Command Register */ | ||
117 | #define ier 0x06 /* Interrupt Enable Register */ | ||
118 | #define cor1 0x08 /* Channel Option Register 1 */ | ||
119 | #define cor2 0x09 /* Channel Option Register 2 */ | ||
120 | #define cor3 0x0a /* Channel Option Register 3 */ | ||
121 | #define cor4 0x1e /* Channel Option Register 4 */ | ||
122 | #define cor5 0x1f /* Channel Option Register 5 */ | ||
123 | |||
124 | #define ccsr 0x0b /* Channel Control Status Register */ | ||
125 | #define rdcr 0x0e /* Receive Data Count Register */ | ||
126 | #define tdcr 0x12 /* Transmit Data Count Register */ | ||
127 | #define mcor1 0x15 /* Modem Change Option Register 1 */ | ||
128 | #define mcor2 0x16 /* Modem Change Option Regsiter 2 */ | ||
129 | |||
130 | #define livr 0x18 /* Local Interrupt Vector Register */ | ||
131 | #define schr1 0x1a /* Special Character Register 1 */ | ||
132 | #define schr2 0x1b /* Special Character Register 2 */ | ||
133 | #define schr3 0x1c /* Special Character Register 3 */ | ||
134 | #define schr4 0x1d /* Special Character Register 4 */ | ||
135 | |||
136 | #define rtr 0x20 /* Receive Timer Register */ | ||
137 | #define rtpr 0x21 /* Receive Timeout Period Register */ | ||
138 | #define lnc 0x24 /* Lnext character */ | ||
139 | |||
140 | #define rivr 0x43 /* Receive Interrupt Vector Register */ | ||
141 | #define tivr 0x42 /* Transmit Interrupt Vector Register */ | ||
142 | #define mivr 0x41 /* Modem Interrupt Vector Register */ | ||
143 | #define gfrcr 0x40 /* Global Firmware Revision code Reg */ | ||
144 | #define ricr 0x44 /* Receive Interrupting Channel Reg */ | ||
145 | #define ticr 0x45 /* Transmit Interrupting Channel Reg */ | ||
146 | #define micr 0x46 /* Modem Interrupting Channel Register */ | ||
147 | |||
148 | #define gcr 0x4b /* Global configuration register */ | ||
149 | #define misr 0x4c /* Modem interrupt status register */ | ||
150 | |||
151 | #define rbusr 0x59 | ||
152 | #define tbusr 0x5a | ||
153 | #define mbusr 0x5b | ||
154 | |||
155 | #define eoir 0x60 /* End Of Interrupt Register */ | ||
156 | #define rdsr 0x62 /* Receive Data / Status Register */ | ||
157 | #define tdr 0x63 /* Transmit Data Register */ | ||
158 | #define svrr 0x67 /* Service Request Register */ | ||
159 | |||
160 | #define car 0x68 /* Channel Access Register */ | ||
161 | #define mir 0x69 /* Modem Interrupt Register */ | ||
162 | #define tir 0x6a /* Transmit Interrupt Register */ | ||
163 | #define rir 0x6b /* Receive Interrupt Register */ | ||
164 | #define msvr1 0x6c /* Modem Signal Value Register 1 */ | ||
165 | #define msvr2 0x6d /* Modem Signal Value Register 2 */ | ||
166 | #define psvr 0x6f /* Printer Signal Value Register */ | ||
167 | |||
168 | #define tbpr 0x72 /* Transmit Baud Rate Period Register */ | ||
169 | #define tcor 0x76 /* Transmit Clock Option Register */ | ||
170 | |||
171 | #define rbpr 0x78 /* Receive Baud Rate Period Register */ | ||
172 | #define rber 0x7a /* Receive Baud Rate Extension Register */ | ||
173 | #define rcor 0x7c /* Receive Clock Option Register */ | ||
174 | #define ppr 0x7e /* Prescalar Period Register */ | ||
175 | |||
176 | /* Misc registers used for forcing the 1400 out of its reset woes */ | ||
177 | #define airl 0x6d | ||
178 | #define airm 0x6e | ||
179 | #define airh 0x6f | ||
180 | #define btcr 0x66 | ||
181 | #define mtcr 0x6c | ||
182 | #define tber 0x74 | ||
183 | |||
184 | #endif /* #ifdef RTA */ | ||
185 | 43 | ||
186 | 44 | ||
187 | /* Bit fields for particular registers */ | 45 | /* Bit fields for particular registers */ |
diff --git a/drivers/char/rio/defaults.h b/drivers/char/rio/defaults.h index 5b600c32ac02..d55c2f6a9877 100644 --- a/drivers/char/rio/defaults.h +++ b/drivers/char/rio/defaults.h | |||
@@ -45,13 +45,6 @@ static char *_rio_defaults_h_sccs = "@(#)defaults.h 1.1"; | |||
45 | #define MILLISECOND (int) (1000/64) /* 15.625 low ticks */ | 45 | #define MILLISECOND (int) (1000/64) /* 15.625 low ticks */ |
46 | #define SECOND (int) 15625 /* Low priority ticks */ | 46 | #define SECOND (int) 15625 /* Low priority ticks */ |
47 | 47 | ||
48 | #ifdef RTA | ||
49 | #define RX_LIMIT (ushort) 3 | ||
50 | #endif | ||
51 | #ifdef HOST | ||
52 | #define RX_LIMIT (ushort) 1 | ||
53 | #endif | ||
54 | |||
55 | #define LINK_TIMEOUT (int) (POLL_PERIOD / 2) | 48 | #define LINK_TIMEOUT (int) (POLL_PERIOD / 2) |
56 | 49 | ||
57 | 50 | ||
diff --git a/drivers/char/rio/link.h b/drivers/char/rio/link.h index bfba5b0c033e..48d68ca7f825 100644 --- a/drivers/char/rio/link.h +++ b/drivers/char/rio/link.h | |||
@@ -102,30 +102,14 @@ | |||
102 | /* | 102 | /* |
103 | ** LED stuff | 103 | ** LED stuff |
104 | */ | 104 | */ |
105 | #if defined(RTA) | ||
106 | #define LED_OFF ((ushort) 0) /* LED off */ | ||
107 | #define LED_RED ((ushort) 1) /* LED Red */ | ||
108 | #define LED_GREEN ((ushort) 2) /* LED Green */ | ||
109 | #define LED_ORANGE ((ushort) 4) /* LED Orange */ | ||
110 | #define LED_1TO8_OPEN ((ushort) 1) /* Port 1->8 LED on */ | ||
111 | #define LED_9TO16_OPEN ((ushort) 2) /* Port 9->16 LED on */ | ||
112 | #define LED_SET_COLOUR(colour) (link->led = (colour)) | ||
113 | #define LED_OR_COLOUR(colour) (link->led |= (colour)) | ||
114 | #define LED_TIMEOUT(time) (link->led_timeout = RioTimePlus(RioTime(),(time))) | ||
115 | #else | ||
116 | #define LED_SET_COLOUR(colour) | 105 | #define LED_SET_COLOUR(colour) |
117 | #define LED_OR_COLOUR(colour) | 106 | #define LED_OR_COLOUR(colour) |
118 | #define LED_TIMEOUT(time) | 107 | #define LED_TIMEOUT(time) |
119 | #endif /* RTA */ | ||
120 | 108 | ||
121 | struct LPB { | 109 | struct LPB { |
122 | WORD link_number; /* Link Number */ | 110 | WORD link_number; /* Link Number */ |
123 | Channel_ptr in_ch; /* Link In Channel */ | 111 | Channel_ptr in_ch; /* Link In Channel */ |
124 | Channel_ptr out_ch; /* Link Out Channel */ | 112 | Channel_ptr out_ch; /* Link Out Channel */ |
125 | #ifdef RTA | ||
126 | uchar stat_led; /* Port open leds */ | ||
127 | uchar led; /* True, light led! */ | ||
128 | #endif | ||
129 | BYTE attached_serial[4]; /* Attached serial number */ | 113 | BYTE attached_serial[4]; /* Attached serial number */ |
130 | BYTE attached_host_serial[4]; | 114 | BYTE attached_host_serial[4]; |
131 | /* Serial number of Host who | 115 | /* Serial number of Host who |
@@ -144,30 +128,12 @@ struct LPB { | |||
144 | WORD WaitNoBoot; /* Secs to hold off booting */ | 128 | WORD WaitNoBoot; /* Secs to hold off booting */ |
145 | PKT_ptr add_packet_list; /* Add packets to here */ | 129 | PKT_ptr add_packet_list; /* Add packets to here */ |
146 | PKT_ptr remove_packet_list; /* Send packets from here */ | 130 | PKT_ptr remove_packet_list; /* Send packets from here */ |
147 | #ifdef RTA | ||
148 | #ifdef DCIRRUS | ||
149 | #define QBUFS_PER_REDIRECT (4 / PKTS_PER_BUFFER + 1) | ||
150 | #else | ||
151 | #define QBUFS_PER_REDIRECT (8 / PKTS_PER_BUFFER + 1) | ||
152 | #endif | ||
153 | PKT_ptr_ptr rd_add; /* Add a new Packet here */ | ||
154 | Q_BUF_ptr rd_add_qb; /* Pointer to the add Q buf */ | ||
155 | PKT_ptr_ptr rd_add_st_qbb; /* Pointer to start of the Q's buf */ | ||
156 | PKT_ptr_ptr rd_add_end_qbb; /* Pointer to the end of the Q's buf */ | ||
157 | PKT_ptr_ptr rd_remove; /* Remove a Packet here */ | ||
158 | Q_BUF_ptr rd_remove_qb; /* Pointer to the remove Q buf */ | ||
159 | PKT_ptr_ptr rd_remove_st_qbb; /* Pointer to the start of the Q buf */ | ||
160 | PKT_ptr_ptr rd_remove_end_qbb; /* Pointer to the end of the Q buf */ | ||
161 | ushort pkts_in_q; /* Packets in queue */ | ||
162 | #endif | ||
163 | 131 | ||
164 | Channel_ptr lrt_fail_chan; /* Lrt's failure channel */ | 132 | Channel_ptr lrt_fail_chan; /* Lrt's failure channel */ |
165 | Channel_ptr ltt_fail_chan; /* Ltt's failure channel */ | 133 | Channel_ptr ltt_fail_chan; /* Ltt's failure channel */ |
166 | 134 | ||
167 | #if defined (HOST) || defined (INKERNEL) | ||
168 | /* RUP structure for HOST to driver communications */ | 135 | /* RUP structure for HOST to driver communications */ |
169 | struct RUP rup; | 136 | struct RUP rup; |
170 | #endif | ||
171 | struct RUP link_rup; /* RUP for the link (POLL, | 137 | struct RUP link_rup; /* RUP for the link (POLL, |
172 | topology etc.) */ | 138 | topology etc.) */ |
173 | WORD attached_link; /* Number of attached link */ | 139 | WORD attached_link; /* Number of attached link */ |
diff --git a/drivers/char/rio/list.h b/drivers/char/rio/list.h index 36aad4c9cb3a..79b853140ae5 100644 --- a/drivers/char/rio/list.h +++ b/drivers/char/rio/list.h | |||
@@ -44,8 +44,6 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9"; | |||
44 | 44 | ||
45 | #define PKT_IN_USE 0x1 | 45 | #define PKT_IN_USE 0x1 |
46 | 46 | ||
47 | #ifdef INKERNEL | ||
48 | |||
49 | #define ZERO_PTR (ushort) 0x8000 | 47 | #define ZERO_PTR (ushort) 0x8000 |
50 | #define CaD PortP->Caddr | 48 | #define CaD PortP->Caddr |
51 | 49 | ||
@@ -54,143 +52,5 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9"; | |||
54 | ** to by the TxAdd pointer has PKT_IN_USE clear in its address. | 52 | ** to by the TxAdd pointer has PKT_IN_USE clear in its address. |
55 | */ | 53 | */ |
56 | 54 | ||
57 | #ifndef linux | ||
58 | #if defined( MIPS ) && !defined( MIPSEISA ) | ||
59 | /* May the shoes of the Devil dance on your grave for creating this */ | ||
60 | #define can_add_transmit(PacketP,PortP) \ | ||
61 | (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,RINDW(PortP->TxAdd))) \ | ||
62 | & (PKT_IN_USE<<2))) | ||
63 | |||
64 | #elif defined(MIPSEISA) || defined(nx6000) || \ | ||
65 | defined(drs6000) || defined(UWsparc) | ||
66 | |||
67 | #define can_add_transmit(PacketP,PortP) \ | ||
68 | (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,RINDW(PortP->TxAdd))) \ | ||
69 | & PKT_IN_USE)) | ||
70 | |||
71 | #else | ||
72 | #define can_add_transmit(PacketP,PortP) \ | ||
73 | (!((uint)(PacketP = (struct PKT *)RIO_PTR(CaD,*PortP->TxAdd)) \ | ||
74 | & PKT_IN_USE)) | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | ** To add a packet to the queue, you set the PKT_IN_USE bit in the address, | ||
79 | ** and then move the TxAdd pointer along one position to point to the next | ||
80 | ** packet pointer. You must wrap the pointer from the end back to the start. | ||
81 | */ | ||
82 | #if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc) | ||
83 | # define add_transmit(PortP) \ | ||
84 | WINDW(PortP->TxAdd,RINDW(PortP->TxAdd) | PKT_IN_USE);\ | ||
85 | if (PortP->TxAdd == PortP->TxEnd)\ | ||
86 | PortP->TxAdd = PortP->TxStart;\ | ||
87 | else\ | ||
88 | PortP->TxAdd++;\ | ||
89 | WWORD(PortP->PhbP->tx_add , RIO_OFF(CaD,PortP->TxAdd)); | ||
90 | #elif defined(AIX) | ||
91 | # define add_transmit(PortP) \ | ||
92 | {\ | ||
93 | register ushort *TxAddP = (ushort *)RIO_PTR(Cad,PortP->TxAddO);\ | ||
94 | WINDW( TxAddP, RINDW( TxAddP ) | PKT_IN_USE );\ | ||
95 | if (PortP->TxAddO == PortP->TxEndO )\ | ||
96 | PortP->TxAddO = PortP->TxStartO;\ | ||
97 | else\ | ||
98 | PortP->TxAddO += sizeof(ushort);\ | ||
99 | WWORD(((PHB *)RIO_PTR(Cad,PortP->PhbO))->tx_add , PortP->TxAddO );\ | ||
100 | } | ||
101 | #else | ||
102 | # define add_transmit(PortP) \ | ||
103 | *PortP->TxAdd |= PKT_IN_USE;\ | ||
104 | if (PortP->TxAdd == PortP->TxEnd)\ | ||
105 | PortP->TxAdd = PortP->TxStart;\ | ||
106 | else\ | ||
107 | PortP->TxAdd++;\ | ||
108 | PortP->PhbP->tx_add = RIO_OFF(CaD,PortP->TxAdd); | ||
109 | #endif | ||
110 | |||
111 | /* | ||
112 | ** can_remove_receive( PacketP, PortP ) returns non-zero if PKT_IN_USE is set | ||
113 | ** for the next packet on the queue. It will also set PacketP to point to the | ||
114 | ** relevant packet, [having cleared the PKT_IN_USE bit]. If PKT_IN_USE is clear, | ||
115 | ** then can_remove_receive() returns 0. | ||
116 | */ | ||
117 | #if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc) | ||
118 | # define can_remove_receive(PacketP,PortP) \ | ||
119 | ((RINDW(PortP->RxRemove) & PKT_IN_USE) ? \ | ||
120 | (PacketP=(struct PKT *)RIO_PTR(CaD,(RINDW(PortP->RxRemove) & ~PKT_IN_USE))):0) | ||
121 | #elif defined(AIX) | ||
122 | # define can_remove_receive(PacketP,PortP) \ | ||
123 | ((RINDW((ushort *)RIO_PTR(Cad,PortP->RxRemoveO)) & PKT_IN_USE) ? \ | ||
124 | (PacketP=(struct PKT *)RIO_PTR(Cad,RINDW((ushort *)RIO_PTR(Cad,PortP->RxRemoveO)) & ~PKT_IN_USE)):0) | ||
125 | #else | ||
126 | # define can_remove_receive(PacketP,PortP) \ | ||
127 | ((*PortP->RxRemove & PKT_IN_USE) ? \ | ||
128 | (PacketP=(struct PKT *)RIO_PTR(CaD,(*PortP->RxRemove & ~PKT_IN_USE))):0) | ||
129 | #endif | ||
130 | |||
131 | |||
132 | /* | ||
133 | ** Will God see it within his heart to forgive us for this thing that | ||
134 | ** we have created? To remove a packet from the receive queue you clear | ||
135 | ** its PKT_IN_USE bit, and then bump the pointers. Once the pointers | ||
136 | ** get to the end, they must be wrapped back to the start. | ||
137 | */ | ||
138 | #if defined(MIPS) || defined(nx6000) || defined(drs6000) || defined(UWsparc) | ||
139 | # define remove_receive(PortP) \ | ||
140 | WINDW(PortP->RxRemove, (RINDW(PortP->RxRemove) & ~PKT_IN_USE));\ | ||
141 | if (PortP->RxRemove == PortP->RxEnd)\ | ||
142 | PortP->RxRemove = PortP->RxStart;\ | ||
143 | else\ | ||
144 | PortP->RxRemove++;\ | ||
145 | WWORD(PortP->PhbP->rx_remove , RIO_OFF(CaD,PortP->RxRemove)); | ||
146 | #elif defined(AIX) | ||
147 | # define remove_receive(PortP) \ | ||
148 | {\ | ||
149 | register ushort *RxRemoveP = (ushort *)RIO_PTR(Cad,PortP->RxRemoveO);\ | ||
150 | WINDW( RxRemoveP, RINDW( RxRemoveP ) & ~PKT_IN_USE );\ | ||
151 | if (PortP->RxRemoveO == PortP->RxEndO)\ | ||
152 | PortP->RxRemoveO = PortP->RxStartO;\ | ||
153 | else\ | ||
154 | PortP->RxRemoveO += sizeof(ushort);\ | ||
155 | WWORD(((PHB *)RIO_PTR(Cad,PortP->PhbO))->rx_remove, PortP->RxRemoveO );\ | ||
156 | } | ||
157 | #else | ||
158 | # define remove_receive(PortP) \ | ||
159 | *PortP->RxRemove &= ~PKT_IN_USE;\ | ||
160 | if (PortP->RxRemove == PortP->RxEnd)\ | ||
161 | PortP->RxRemove = PortP->RxStart;\ | ||
162 | else\ | ||
163 | PortP->RxRemove++;\ | ||
164 | PortP->PhbP->rx_remove = RIO_OFF(CaD,PortP->RxRemove); | ||
165 | #endif | ||
166 | #endif | ||
167 | |||
168 | |||
169 | #else /* !IN_KERNEL */ | ||
170 | |||
171 | #define ZERO_PTR NULL | ||
172 | |||
173 | |||
174 | #ifdef HOST | ||
175 | /* #define can_remove_transmit(pkt,phb) ((((char*)pkt = (*(char**)(phb->tx_remove))-1) || 1)) && (*phb->u3.s2.tx_remove_ptr & PKT_IN_USE)) */ | ||
176 | #define remove_transmit(phb) *phb->u3.s2.tx_remove_ptr &= ~(ushort)PKT_IN_USE;\ | ||
177 | if (phb->tx_remove == phb->tx_end)\ | ||
178 | phb->tx_remove = phb->tx_start;\ | ||
179 | else\ | ||
180 | phb->tx_remove++; | ||
181 | #define can_add_receive(phb) !(*phb->u4.s2.rx_add_ptr & PKT_IN_USE) | ||
182 | #define add_receive(pkt,phb) *phb->rx_add = pkt;\ | ||
183 | *phb->u4.s2.rx_add_ptr |= PKT_IN_USE;\ | ||
184 | if (phb->rx_add == phb->rx_end)\ | ||
185 | phb->rx_add = phb->rx_start;\ | ||
186 | else\ | ||
187 | phb->rx_add++; | ||
188 | #endif | ||
189 | #endif | ||
190 | |||
191 | #ifdef RTA | ||
192 | #define splx(oldspl) if ((oldspl) == 0) spl0() | ||
193 | #endif | ||
194 | |||
195 | #endif /* ifndef _list.h */ | 55 | #endif /* ifndef _list.h */ |
196 | /*********** end of file ***********/ | 56 | /*********** end of file ***********/ |
diff --git a/drivers/char/rio/parmmap.h b/drivers/char/rio/parmmap.h index fe4e00567065..e24acc1d1844 100644 --- a/drivers/char/rio/parmmap.h +++ b/drivers/char/rio/parmmap.h | |||
@@ -78,14 +78,9 @@ struct PARM_MAP { | |||
78 | WORD idle_count; /* Idle time counter */ | 78 | WORD idle_count; /* Idle time counter */ |
79 | WORD busy_count; /* Busy counter */ | 79 | WORD busy_count; /* Busy counter */ |
80 | WORD idle_control; /* Control Idle Process */ | 80 | WORD idle_control; /* Control Idle Process */ |
81 | #if defined(HOST) || defined(INKERNEL) | ||
82 | WORD tx_intr; /* TX interrupt pending */ | 81 | WORD tx_intr; /* TX interrupt pending */ |
83 | WORD rx_intr; /* RX interrupt pending */ | 82 | WORD rx_intr; /* RX interrupt pending */ |
84 | WORD rup_intr; /* RUP interrupt pending */ | 83 | WORD rup_intr; /* RUP interrupt pending */ |
85 | #endif | ||
86 | #if defined(RTA) | ||
87 | WORD dying_count; /* Count of processes dead */ | ||
88 | #endif | ||
89 | }; | 84 | }; |
90 | 85 | ||
91 | #endif | 86 | #endif |
diff --git a/drivers/char/rio/phb.h b/drivers/char/rio/phb.h index 3baebf8513af..2663ca0306e2 100644 --- a/drivers/char/rio/phb.h +++ b/drivers/char/rio/phb.h | |||
@@ -44,17 +44,6 @@ | |||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | 46 | ||
47 | /************************************************* | ||
48 | * Set the LIMIT values. | ||
49 | ************************************************/ | ||
50 | #ifdef RTA | ||
51 | #define RX_LIMIT (ushort) 3 | ||
52 | #endif | ||
53 | #ifdef HOST | ||
54 | #define RX_LIMIT (ushort) 1 | ||
55 | #endif | ||
56 | |||
57 | |||
58 | /************************************************* | 47 | /************************************************* |
59 | * Handshake asserted. Deasserted by the LTT(s) | 48 | * Handshake asserted. Deasserted by the LTT(s) |
60 | ************************************************/ | 49 | ************************************************/ |
@@ -69,11 +58,7 @@ | |||
69 | /************************************************* | 58 | /************************************************* |
70 | * Maximum number of PHB's | 59 | * Maximum number of PHB's |
71 | ************************************************/ | 60 | ************************************************/ |
72 | #if defined (HOST) || defined (INKERNEL) | ||
73 | #define MAX_PHB ((ushort) 128) /* range 0-127 */ | 61 | #define MAX_PHB ((ushort) 128) /* range 0-127 */ |
74 | #else | ||
75 | #define MAX_PHB ((ushort) 8) /* range 0-7 */ | ||
76 | #endif | ||
77 | 62 | ||
78 | /************************************************* | 63 | /************************************************* |
79 | * Defines for the mode fields | 64 | * Defines for the mode fields |
@@ -139,141 +124,23 @@ | |||
139 | * the start. The pointer tx_add points to a SPACE to put a Packet. | 124 | * the start. The pointer tx_add points to a SPACE to put a Packet. |
140 | * The pointer tx_remove points to the next Packet to remove | 125 | * The pointer tx_remove points to the next Packet to remove |
141 | *************************************************************************/ | 126 | *************************************************************************/ |
142 | #ifndef INKERNEL | ||
143 | #define src_unit u2.s2.unit | ||
144 | #define src_port u2.s2.port | ||
145 | #define dest_unit u1.s1.unit | ||
146 | #define dest_port u1.s1.port | ||
147 | #endif | ||
148 | #ifdef HOST | ||
149 | #define tx_start u3.s1.tx_start_ptr_ptr | ||
150 | #define tx_add u3.s1.tx_add_ptr_ptr | ||
151 | #define tx_end u3.s1.tx_end_ptr_ptr | ||
152 | #define tx_remove u3.s1.tx_remove_ptr_ptr | ||
153 | #define rx_start u4.s1.rx_start_ptr_ptr | ||
154 | #define rx_add u4.s1.rx_add_ptr_ptr | ||
155 | #define rx_end u4.s1.rx_end_ptr_ptr | ||
156 | #define rx_remove u4.s1.rx_remove_ptr_ptr | ||
157 | #endif | ||
158 | typedef struct PHB PHB; | 127 | typedef struct PHB PHB; |
159 | struct PHB { | 128 | struct PHB { |
160 | #ifdef RTA | ||
161 | ushort port; | ||
162 | #endif | ||
163 | #ifdef INKERNEL | ||
164 | WORD source; | 129 | WORD source; |
165 | #else | ||
166 | union { | ||
167 | ushort source; /* Complete source */ | ||
168 | struct { | ||
169 | unsigned char unit; /* Source unit */ | ||
170 | unsigned char port; /* Source port */ | ||
171 | } s2; | ||
172 | } u2; | ||
173 | #endif | ||
174 | WORD handshake; | 130 | WORD handshake; |
175 | WORD status; | 131 | WORD status; |
176 | NUMBER timeout; /* Maximum of 1.9 seconds */ | 132 | NUMBER timeout; /* Maximum of 1.9 seconds */ |
177 | WORD link; /* Send down this link */ | 133 | WORD link; /* Send down this link */ |
178 | #ifdef INKERNEL | ||
179 | WORD destination; | 134 | WORD destination; |
180 | #else | ||
181 | union { | ||
182 | ushort destination; /* Complete destination */ | ||
183 | struct { | ||
184 | unsigned char unit; /* Destination unit */ | ||
185 | unsigned char port; /* Destination port */ | ||
186 | } s1; | ||
187 | } u1; | ||
188 | #endif | ||
189 | #ifdef RTA | ||
190 | ushort tx_pkts_added; | ||
191 | ushort tx_pkts_removed; | ||
192 | Q_BUF_ptr tx_q_start; /* Start of the Q list chain */ | ||
193 | short num_tx_q_bufs; /* Number of Q buffers in the chain */ | ||
194 | PKT_ptr_ptr tx_add; /* Add a new Packet here */ | ||
195 | Q_BUF_ptr tx_add_qb; /* Pointer to the add Q buf */ | ||
196 | PKT_ptr_ptr tx_add_st_qbb; /* Pointer to start of the Q's buf */ | ||
197 | PKT_ptr_ptr tx_add_end_qbb; /* Pointer to the end of the Q's buf */ | ||
198 | PKT_ptr_ptr tx_remove; /* Remove a Packet here */ | ||
199 | Q_BUF_ptr tx_remove_qb; /* Pointer to the remove Q buf */ | ||
200 | PKT_ptr_ptr tx_remove_st_qbb; /* Pointer to the start of the Q buf */ | ||
201 | PKT_ptr_ptr tx_remove_end_qbb; /* Pointer to the end of the Q buf */ | ||
202 | #endif | ||
203 | #ifdef INKERNEL | ||
204 | PKT_ptr_ptr tx_start; | 135 | PKT_ptr_ptr tx_start; |
205 | PKT_ptr_ptr tx_end; | 136 | PKT_ptr_ptr tx_end; |
206 | PKT_ptr_ptr tx_add; | 137 | PKT_ptr_ptr tx_add; |
207 | PKT_ptr_ptr tx_remove; | 138 | PKT_ptr_ptr tx_remove; |
208 | #endif | ||
209 | #ifdef HOST | ||
210 | union { | ||
211 | struct { | ||
212 | PKT_ptr_ptr tx_start_ptr_ptr; | ||
213 | PKT_ptr_ptr tx_end_ptr_ptr; | ||
214 | PKT_ptr_ptr tx_add_ptr_ptr; | ||
215 | PKT_ptr_ptr tx_remove_ptr_ptr; | ||
216 | } s1; | ||
217 | struct { | ||
218 | ushort *tx_start_ptr; | ||
219 | ushort *tx_end_ptr; | ||
220 | ushort *tx_add_ptr; | ||
221 | ushort *tx_remove_ptr; | ||
222 | } s2; | ||
223 | } u3; | ||
224 | #endif | ||
225 | 139 | ||
226 | #ifdef RTA | ||
227 | ushort rx_pkts_added; | ||
228 | ushort rx_pkts_removed; | ||
229 | Q_BUF_ptr rx_q_start; /* Start of the Q list chain */ | ||
230 | short num_rx_q_bufs; /* Number of Q buffers in the chain */ | ||
231 | PKT_ptr_ptr rx_add; /* Add a new Packet here */ | ||
232 | Q_BUF_ptr rx_add_qb; /* Pointer to the add Q buf */ | ||
233 | PKT_ptr_ptr rx_add_st_qbb; /* Pointer to start of the Q's buf */ | ||
234 | PKT_ptr_ptr rx_add_end_qbb; /* Pointer to the end of the Q's buf */ | ||
235 | PKT_ptr_ptr rx_remove; /* Remove a Packet here */ | ||
236 | Q_BUF_ptr rx_remove_qb; /* Pointer to the remove Q buf */ | ||
237 | PKT_ptr_ptr rx_remove_st_qbb; /* Pointer to the start of the Q buf */ | ||
238 | PKT_ptr_ptr rx_remove_end_qbb; /* Pointer to the end of the Q buf */ | ||
239 | #endif | ||
240 | #ifdef INKERNEL | ||
241 | PKT_ptr_ptr rx_start; | 140 | PKT_ptr_ptr rx_start; |
242 | PKT_ptr_ptr rx_end; | 141 | PKT_ptr_ptr rx_end; |
243 | PKT_ptr_ptr rx_add; | 142 | PKT_ptr_ptr rx_add; |
244 | PKT_ptr_ptr rx_remove; | 143 | PKT_ptr_ptr rx_remove; |
245 | #endif | ||
246 | #ifdef HOST | ||
247 | union { | ||
248 | struct { | ||
249 | PKT_ptr_ptr rx_start_ptr_ptr; | ||
250 | PKT_ptr_ptr rx_end_ptr_ptr; | ||
251 | PKT_ptr_ptr rx_add_ptr_ptr; | ||
252 | PKT_ptr_ptr rx_remove_ptr_ptr; | ||
253 | } s1; | ||
254 | struct { | ||
255 | ushort *rx_start_ptr; | ||
256 | ushort *rx_end_ptr; | ||
257 | ushort *rx_add_ptr; | ||
258 | ushort *rx_remove_ptr; | ||
259 | } s2; | ||
260 | } u4; | ||
261 | #endif | ||
262 | |||
263 | #ifdef RTA /* some fields for the remotes */ | ||
264 | ushort flush_count; /* Count of write flushes */ | ||
265 | ushort txmode; /* Modes for tx */ | ||
266 | ushort rxmode; /* Modes for rx */ | ||
267 | ushort portmode; /* Generic modes */ | ||
268 | ushort column; /* TAB3 column count */ | ||
269 | ushort tx_subscript; /* (TX) Subscript into data field */ | ||
270 | ushort rx_subscript; /* (RX) Subscript into data field */ | ||
271 | PKT_ptr rx_incomplete; /* Hold an incomplete packet here */ | ||
272 | ushort modem_bits; /* Modem bits to mask */ | ||
273 | ushort lastModem; /* Modem control lines. */ | ||
274 | ushort addr; /* Address for sub commands */ | ||
275 | ushort MonitorTstate; /* TRUE if monitoring tstop */ | ||
276 | #endif | ||
277 | 144 | ||
278 | }; | 145 | }; |
279 | 146 | ||
diff --git a/drivers/char/rio/pkt.h b/drivers/char/rio/pkt.h index 882fd429ac2e..7011e52e82db 100644 --- a/drivers/char/rio/pkt.h +++ b/drivers/char/rio/pkt.h | |||
@@ -70,39 +70,12 @@ | |||
70 | #define CONTROL_DATA_WNDW (DATA_WNDW << 8) | 70 | #define CONTROL_DATA_WNDW (DATA_WNDW << 8) |
71 | 71 | ||
72 | struct PKT { | 72 | struct PKT { |
73 | #ifdef INKERNEL | ||
74 | BYTE dest_unit; /* Destination Unit Id */ | 73 | BYTE dest_unit; /* Destination Unit Id */ |
75 | BYTE dest_port; /* Destination POrt */ | 74 | BYTE dest_port; /* Destination POrt */ |
76 | BYTE src_unit; /* Source Unit Id */ | 75 | BYTE src_unit; /* Source Unit Id */ |
77 | BYTE src_port; /* Source POrt */ | 76 | BYTE src_port; /* Source POrt */ |
78 | #else | ||
79 | union { | ||
80 | ushort destination; /* Complete destination */ | ||
81 | struct { | ||
82 | unsigned char unit; /* Destination unit */ | ||
83 | unsigned char port; /* Destination port */ | ||
84 | } s1; | ||
85 | } u1; | ||
86 | union { | ||
87 | ushort source; /* Complete source */ | ||
88 | struct { | ||
89 | unsigned char unit; /* Source unit */ | ||
90 | unsigned char port; /* Source port */ | ||
91 | } s2; | ||
92 | } u2; | ||
93 | #endif | ||
94 | #ifdef INKERNEL | ||
95 | BYTE len; | 77 | BYTE len; |
96 | BYTE control; | 78 | BYTE control; |
97 | #else | ||
98 | union { | ||
99 | ushort control; | ||
100 | struct { | ||
101 | unsigned char len; | ||
102 | unsigned char control; | ||
103 | } s3; | ||
104 | } u3; | ||
105 | #endif | ||
106 | BYTE data[PKT_MAX_DATA_LEN]; | 79 | BYTE data[PKT_MAX_DATA_LEN]; |
107 | /* Actual data :-) */ | 80 | /* Actual data :-) */ |
108 | WORD csum; /* C-SUM */ | 81 | WORD csum; /* C-SUM */ |
diff --git a/drivers/char/rio/qbuf.h b/drivers/char/rio/qbuf.h index acd9e8e5307d..391ffc335535 100644 --- a/drivers/char/rio/qbuf.h +++ b/drivers/char/rio/qbuf.h | |||
@@ -46,11 +46,7 @@ static char *_rio_qbuf_h_sccs = "@(#)qbuf.h 1.1"; | |||
46 | 46 | ||
47 | 47 | ||
48 | 48 | ||
49 | #ifdef HOST | ||
50 | #define PKTS_PER_BUFFER 1 | ||
51 | #else | ||
52 | #define PKTS_PER_BUFFER (220 / PKT_LENGTH) | 49 | #define PKTS_PER_BUFFER (220 / PKT_LENGTH) |
53 | #endif | ||
54 | 50 | ||
55 | typedef struct Q_BUF Q_BUF; | 51 | typedef struct Q_BUF Q_BUF; |
56 | struct Q_BUF { | 52 | struct Q_BUF { |
diff --git a/drivers/char/rio/riotypes.h b/drivers/char/rio/riotypes.h index 9b67e2468bec..46084d5c7e98 100644 --- a/drivers/char/rio/riotypes.h +++ b/drivers/char/rio/riotypes.h | |||
@@ -43,9 +43,6 @@ | |||
43 | #endif | 43 | #endif |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #ifdef INKERNEL | ||
47 | |||
48 | #if !defined(MIPSAT) | ||
49 | typedef unsigned short NUMBER_ptr; | 46 | typedef unsigned short NUMBER_ptr; |
50 | typedef unsigned short WORD_ptr; | 47 | typedef unsigned short WORD_ptr; |
51 | typedef unsigned short BYTE_ptr; | 48 | typedef unsigned short BYTE_ptr; |
@@ -65,69 +62,6 @@ typedef unsigned short RUP_ptr; | |||
65 | typedef unsigned short short_ptr; | 62 | typedef unsigned short short_ptr; |
66 | typedef unsigned short u_short_ptr; | 63 | typedef unsigned short u_short_ptr; |
67 | typedef unsigned short ushort_ptr; | 64 | typedef unsigned short ushort_ptr; |
68 | #else | ||
69 | /* MIPSAT types */ | ||
70 | typedef char RIO_POINTER[8]; | ||
71 | typedef RIO_POINTER NUMBER_ptr; | ||
72 | typedef RIO_POINTER WORD_ptr; | ||
73 | typedef RIO_POINTER BYTE_ptr; | ||
74 | typedef RIO_POINTER char_ptr; | ||
75 | typedef RIO_POINTER Channel_ptr; | ||
76 | typedef RIO_POINTER FREE_LIST_ptr_ptr; | ||
77 | typedef RIO_POINTER FREE_LIST_ptr; | ||
78 | typedef RIO_POINTER LPB_ptr; | ||
79 | typedef RIO_POINTER Process_ptr; | ||
80 | typedef RIO_POINTER PHB_ptr; | ||
81 | typedef RIO_POINTER PKT_ptr; | ||
82 | typedef RIO_POINTER PKT_ptr_ptr; | ||
83 | typedef RIO_POINTER Q_BUF_ptr; | ||
84 | typedef RIO_POINTER Q_BUF_ptr_ptr; | ||
85 | typedef RIO_POINTER ROUTE_STR_ptr; | ||
86 | typedef RIO_POINTER RUP_ptr; | ||
87 | typedef RIO_POINTER short_ptr; | ||
88 | typedef RIO_POINTER u_short_ptr; | ||
89 | typedef RIO_POINTER ushort_ptr; | ||
90 | #endif | ||
91 | |||
92 | #else /* not INKERNEL */ | ||
93 | typedef unsigned char BYTE; | ||
94 | typedef unsigned short WORD; | ||
95 | typedef unsigned long DWORD; | ||
96 | typedef short NUMBER; | ||
97 | typedef short *NUMBER_ptr; | ||
98 | typedef unsigned short *WORD_ptr; | ||
99 | typedef unsigned char *BYTE_ptr; | ||
100 | typedef unsigned char uchar; | ||
101 | typedef unsigned short ushort; | ||
102 | typedef unsigned int uint; | ||
103 | typedef unsigned long ulong; | ||
104 | typedef unsigned char u_char; | ||
105 | typedef unsigned short u_short; | ||
106 | typedef unsigned int u_int; | ||
107 | typedef unsigned long u_long; | ||
108 | typedef unsigned short ERROR; | ||
109 | typedef unsigned long ID; | ||
110 | typedef char *char_ptr; | ||
111 | typedef Channel *Channel_ptr; | ||
112 | typedef struct FREE_LIST *FREE_LIST_ptr; | ||
113 | typedef struct FREE_LIST **FREE_LIST_ptr_ptr; | ||
114 | typedef struct LPB *LPB_ptr; | ||
115 | typedef struct Process *Process_ptr; | ||
116 | typedef struct PHB *PHB_ptr; | ||
117 | typedef struct PKT *PKT_ptr; | ||
118 | typedef struct PKT **PKT_ptr_ptr; | ||
119 | typedef struct Q_BUF *Q_BUF_ptr; | ||
120 | typedef struct Q_BUF **Q_BUF_ptr_ptr; | ||
121 | typedef struct ROUTE_STR *ROUTE_STR_ptr; | ||
122 | typedef struct RUP *RUP_ptr; | ||
123 | typedef short *short_ptr; | ||
124 | typedef u_short *u_short_ptr; | ||
125 | typedef ushort *ushort_ptr; | ||
126 | typedef struct PKT PKT; | ||
127 | typedef struct LPB LPB; | ||
128 | typedef struct RUP RUP; | ||
129 | #endif | ||
130 | |||
131 | 65 | ||
132 | #endif /* __riotypes__ */ | 66 | #endif /* __riotypes__ */ |
133 | 67 | ||
diff --git a/drivers/char/rio/rup.h b/drivers/char/rio/rup.h index 8d44fec91dd5..f74f67c6f702 100644 --- a/drivers/char/rio/rup.h +++ b/drivers/char/rio/rup.h | |||
@@ -43,12 +43,7 @@ | |||
43 | #endif | 43 | #endif |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #if defined( HOST ) || defined( INKERNEL ) | ||
47 | #define MAX_RUP ((short) 16) | 46 | #define MAX_RUP ((short) 16) |
48 | #endif | ||
49 | #ifdef RTA | ||
50 | #define MAX_RUP ((short) 1) | ||
51 | #endif | ||
52 | 47 | ||
53 | #define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */ | 48 | #define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */ |
54 | 49 | ||
diff --git a/drivers/char/rio/sam.h b/drivers/char/rio/sam.h index 31494054b213..6f754e19015d 100644 --- a/drivers/char/rio/sam.h +++ b/drivers/char/rio/sam.h | |||
@@ -43,10 +43,6 @@ | |||
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | 45 | ||
46 | #if !defined( HOST ) && !defined( INKERNEL ) | ||
47 | #define RTA 1 | ||
48 | #endif | ||
49 | |||
50 | #define NUM_FREE_LIST_UNITS 500 | 46 | #define NUM_FREE_LIST_UNITS 500 |
51 | 47 | ||
52 | #ifndef FALSE | 48 | #ifndef FALSE |
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 0949dcef0697..7edc6a4dbdc4 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c | |||
@@ -433,7 +433,7 @@ static void rp_do_receive(struct r_port *info, | |||
433 | count += ToRecv; | 433 | count += ToRecv; |
434 | } | 434 | } |
435 | /* Push the data up to the tty layer */ | 435 | /* Push the data up to the tty layer */ |
436 | ld->receive_buf(tty, cbuf, fbuf, count); | 436 | ld->receive_buf(tty, chead, fhead, count); |
437 | done: | 437 | done: |
438 | tty_ldisc_deref(ld); | 438 | tty_ldisc_deref(ld); |
439 | } | 439 | } |
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index 64bf89cb574f..c2490e270f1f 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
@@ -931,7 +931,7 @@ static int sx_set_real_termios (void *ptr) | |||
931 | case CS6:sx_write_channel_byte (port, hi_mask, 0x3f);break; | 931 | case CS6:sx_write_channel_byte (port, hi_mask, 0x3f);break; |
932 | case CS5:sx_write_channel_byte (port, hi_mask, 0x1f);break; | 932 | case CS5:sx_write_channel_byte (port, hi_mask, 0x1f);break; |
933 | default: | 933 | default: |
934 | printk (KERN_INFO "sx: Invalid wordsize: %d\n", CFLAG & CSIZE); | 934 | printk (KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE); |
935 | break; | 935 | break; |
936 | } | 936 | } |
937 | 937 | ||
@@ -958,7 +958,7 @@ static int sx_set_real_termios (void *ptr) | |||
958 | } else { | 958 | } else { |
959 | set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); | 959 | set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags); |
960 | } | 960 | } |
961 | sx_dprintk (SX_DEBUG_TERMIOS, "iflags: %x(%d) ", | 961 | sx_dprintk (SX_DEBUG_TERMIOS, "iflags: %x(%d) ", |
962 | port->gs.tty->termios->c_iflag, | 962 | port->gs.tty->termios->c_iflag, |
963 | I_OTHER(port->gs.tty)); | 963 | I_OTHER(port->gs.tty)); |
964 | 964 | ||
@@ -973,7 +973,7 @@ static int sx_set_real_termios (void *ptr) | |||
973 | } else { | 973 | } else { |
974 | clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); | 974 | clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags); |
975 | } | 975 | } |
976 | sx_dprintk (SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", | 976 | sx_dprintk (SX_DEBUG_TERMIOS, "oflags: %x(%d)\n", |
977 | port->gs.tty->termios->c_oflag, | 977 | port->gs.tty->termios->c_oflag, |
978 | O_OTHER(port->gs.tty)); | 978 | O_OTHER(port->gs.tty)); |
979 | /* port->c_dcd = sx_get_CD (port); */ | 979 | /* port->c_dcd = sx_get_CD (port); */ |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index eb8b5be4e249..076e07c1da38 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -253,6 +253,7 @@ static void tty_buffer_free_all(struct tty_struct *tty) | |||
253 | 253 | ||
254 | static void tty_buffer_init(struct tty_struct *tty) | 254 | static void tty_buffer_init(struct tty_struct *tty) |
255 | { | 255 | { |
256 | spin_lock_init(&tty->buf.lock); | ||
256 | tty->buf.head = NULL; | 257 | tty->buf.head = NULL; |
257 | tty->buf.tail = NULL; | 258 | tty->buf.tail = NULL; |
258 | tty->buf.free = NULL; | 259 | tty->buf.free = NULL; |
@@ -266,6 +267,7 @@ static struct tty_buffer *tty_buffer_alloc(size_t size) | |||
266 | p->used = 0; | 267 | p->used = 0; |
267 | p->size = size; | 268 | p->size = size; |
268 | p->next = NULL; | 269 | p->next = NULL; |
270 | p->active = 0; | ||
269 | p->char_buf_ptr = (char *)(p->data); | 271 | p->char_buf_ptr = (char *)(p->data); |
270 | p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; | 272 | p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; |
271 | /* printk("Flip create %p\n", p); */ | 273 | /* printk("Flip create %p\n", p); */ |
@@ -312,25 +314,36 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) | |||
312 | 314 | ||
313 | int tty_buffer_request_room(struct tty_struct *tty, size_t size) | 315 | int tty_buffer_request_room(struct tty_struct *tty, size_t size) |
314 | { | 316 | { |
315 | struct tty_buffer *b = tty->buf.tail, *n; | 317 | struct tty_buffer *b, *n; |
316 | int left = 0; | 318 | int left; |
319 | unsigned long flags; | ||
320 | |||
321 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
317 | 322 | ||
318 | /* OPTIMISATION: We could keep a per tty "zero" sized buffer to | 323 | /* OPTIMISATION: We could keep a per tty "zero" sized buffer to |
319 | remove this conditional if its worth it. This would be invisible | 324 | remove this conditional if its worth it. This would be invisible |
320 | to the callers */ | 325 | to the callers */ |
321 | if(b != NULL) | 326 | if ((b = tty->buf.tail) != NULL) { |
322 | left = b->size - b->used; | 327 | left = b->size - b->used; |
323 | if(left >= size) | 328 | b->active = 1; |
324 | return size; | 329 | } else |
325 | /* This is the slow path - looking for new buffers to use */ | 330 | left = 0; |
326 | n = tty_buffer_find(tty, size); | 331 | |
327 | if(n == NULL) | 332 | if (left < size) { |
328 | return left; | 333 | /* This is the slow path - looking for new buffers to use */ |
329 | if(b != NULL) | 334 | if ((n = tty_buffer_find(tty, size)) != NULL) { |
330 | b->next = n; | 335 | if (b != NULL) { |
331 | else | 336 | b->next = n; |
332 | tty->buf.head = n; | 337 | b->active = 0; |
333 | tty->buf.tail = n; | 338 | } else |
339 | tty->buf.head = n; | ||
340 | tty->buf.tail = n; | ||
341 | n->active = 1; | ||
342 | } else | ||
343 | size = left; | ||
344 | } | ||
345 | |||
346 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
334 | return size; | 347 | return size; |
335 | } | 348 | } |
336 | 349 | ||
@@ -396,10 +409,12 @@ EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags); | |||
396 | int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) | 409 | int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) |
397 | { | 410 | { |
398 | int space = tty_buffer_request_room(tty, size); | 411 | int space = tty_buffer_request_room(tty, size); |
399 | struct tty_buffer *tb = tty->buf.tail; | 412 | if (likely(space)) { |
400 | *chars = tb->char_buf_ptr + tb->used; | 413 | struct tty_buffer *tb = tty->buf.tail; |
401 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); | 414 | *chars = tb->char_buf_ptr + tb->used; |
402 | tb->used += space; | 415 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); |
416 | tb->used += space; | ||
417 | } | ||
403 | return space; | 418 | return space; |
404 | } | 419 | } |
405 | 420 | ||
@@ -416,10 +431,12 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); | |||
416 | int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) | 431 | int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) |
417 | { | 432 | { |
418 | int space = tty_buffer_request_room(tty, size); | 433 | int space = tty_buffer_request_room(tty, size); |
419 | struct tty_buffer *tb = tty->buf.tail; | 434 | if (likely(space)) { |
420 | *chars = tb->char_buf_ptr + tb->used; | 435 | struct tty_buffer *tb = tty->buf.tail; |
421 | *flags = tb->flag_buf_ptr + tb->used; | 436 | *chars = tb->char_buf_ptr + tb->used; |
422 | tb->used += space; | 437 | *flags = tb->flag_buf_ptr + tb->used; |
438 | tb->used += space; | ||
439 | } | ||
423 | return space; | 440 | return space; |
424 | } | 441 | } |
425 | 442 | ||
@@ -2747,20 +2764,20 @@ static void flush_to_ldisc(void *private_) | |||
2747 | schedule_delayed_work(&tty->buf.work, 1); | 2764 | schedule_delayed_work(&tty->buf.work, 1); |
2748 | goto out; | 2765 | goto out; |
2749 | } | 2766 | } |
2750 | spin_lock_irqsave(&tty->read_lock, flags); | 2767 | spin_lock_irqsave(&tty->buf.lock, flags); |
2751 | while((tbuf = tty->buf.head) != NULL) { | 2768 | while((tbuf = tty->buf.head) != NULL && !tbuf->active) { |
2752 | tty->buf.head = tbuf->next; | 2769 | tty->buf.head = tbuf->next; |
2753 | if (tty->buf.head == NULL) | 2770 | if (tty->buf.head == NULL) |
2754 | tty->buf.tail = NULL; | 2771 | tty->buf.tail = NULL; |
2755 | spin_unlock_irqrestore(&tty->read_lock, flags); | 2772 | spin_unlock_irqrestore(&tty->buf.lock, flags); |
2756 | /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */ | 2773 | /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */ |
2757 | disc->receive_buf(tty, tbuf->char_buf_ptr, | 2774 | disc->receive_buf(tty, tbuf->char_buf_ptr, |
2758 | tbuf->flag_buf_ptr, | 2775 | tbuf->flag_buf_ptr, |
2759 | tbuf->used); | 2776 | tbuf->used); |
2760 | spin_lock_irqsave(&tty->read_lock, flags); | 2777 | spin_lock_irqsave(&tty->buf.lock, flags); |
2761 | tty_buffer_free(tty, tbuf); | 2778 | tty_buffer_free(tty, tbuf); |
2762 | } | 2779 | } |
2763 | spin_unlock_irqrestore(&tty->read_lock, flags); | 2780 | spin_unlock_irqrestore(&tty->buf.lock, flags); |
2764 | out: | 2781 | out: |
2765 | tty_ldisc_deref(disc); | 2782 | tty_ldisc_deref(disc); |
2766 | } | 2783 | } |
@@ -2852,6 +2869,12 @@ EXPORT_SYMBOL(tty_get_baud_rate); | |||
2852 | 2869 | ||
2853 | void tty_flip_buffer_push(struct tty_struct *tty) | 2870 | void tty_flip_buffer_push(struct tty_struct *tty) |
2854 | { | 2871 | { |
2872 | unsigned long flags; | ||
2873 | spin_lock_irqsave(&tty->buf.lock, flags); | ||
2874 | if (tty->buf.tail != NULL) | ||
2875 | tty->buf.tail->active = 0; | ||
2876 | spin_unlock_irqrestore(&tty->buf.lock, flags); | ||
2877 | |||
2855 | if (tty->low_latency) | 2878 | if (tty->low_latency) |
2856 | flush_to_ldisc((void *) tty); | 2879 | flush_to_ldisc((void *) tty); |
2857 | else | 2880 | else |
diff --git a/drivers/char/watchdog/sbc_epx_c3.c b/drivers/char/watchdog/sbc_epx_c3.c index 951764614ebf..7a4dfb95d087 100644 --- a/drivers/char/watchdog/sbc_epx_c3.c +++ b/drivers/char/watchdog/sbc_epx_c3.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/reboot.h> | 26 | #include <linux/reboot.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/ioport.h> | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
30 | 31 | ||
@@ -181,11 +182,14 @@ static int __init watchdog_init(void) | |||
181 | { | 182 | { |
182 | int ret; | 183 | int ret; |
183 | 184 | ||
185 | if (!request_region(EPXC3_WATCHDOG_CTL_REG, 2, "epxc3_watchdog")) | ||
186 | return -EBUSY; | ||
187 | |||
184 | ret = register_reboot_notifier(&epx_c3_notifier); | 188 | ret = register_reboot_notifier(&epx_c3_notifier); |
185 | if (ret) { | 189 | if (ret) { |
186 | printk(KERN_ERR PFX "cannot register reboot notifier " | 190 | printk(KERN_ERR PFX "cannot register reboot notifier " |
187 | "(err=%d)\n", ret); | 191 | "(err=%d)\n", ret); |
188 | return ret; | 192 | goto out; |
189 | } | 193 | } |
190 | 194 | ||
191 | ret = misc_register(&epx_c3_miscdev); | 195 | ret = misc_register(&epx_c3_miscdev); |
@@ -193,18 +197,23 @@ static int __init watchdog_init(void) | |||
193 | printk(KERN_ERR PFX "cannot register miscdev on minor=%d " | 197 | printk(KERN_ERR PFX "cannot register miscdev on minor=%d " |
194 | "(err=%d)\n", WATCHDOG_MINOR, ret); | 198 | "(err=%d)\n", WATCHDOG_MINOR, ret); |
195 | unregister_reboot_notifier(&epx_c3_notifier); | 199 | unregister_reboot_notifier(&epx_c3_notifier); |
196 | return ret; | 200 | goto out; |
197 | } | 201 | } |
198 | 202 | ||
199 | printk(banner); | 203 | printk(banner); |
200 | 204 | ||
201 | return 0; | 205 | return 0; |
206 | |||
207 | out: | ||
208 | release_region(EPXC3_WATCHDOG_CTL_REG, 2); | ||
209 | return ret; | ||
202 | } | 210 | } |
203 | 211 | ||
204 | static void __exit watchdog_exit(void) | 212 | static void __exit watchdog_exit(void) |
205 | { | 213 | { |
206 | misc_deregister(&epx_c3_miscdev); | 214 | misc_deregister(&epx_c3_miscdev); |
207 | unregister_reboot_notifier(&epx_c3_notifier); | 215 | unregister_reboot_notifier(&epx_c3_notifier); |
216 | release_region(EPXC3_WATCHDOG_CTL_REG, 2); | ||
208 | } | 217 | } |
209 | 218 | ||
210 | module_init(watchdog_init); | 219 | module_init(watchdog_init); |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 4819e7fc00dd..d94331c1e5b0 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -46,7 +46,7 @@ config EDAC_MM_EDAC | |||
46 | 46 | ||
47 | config EDAC_AMD76X | 47 | config EDAC_AMD76X |
48 | tristate "AMD 76x (760, 762, 768)" | 48 | tristate "AMD 76x (760, 762, 768)" |
49 | depends on EDAC_MM_EDAC && PCI | 49 | depends on EDAC_MM_EDAC && PCI && X86_32 |
50 | help | 50 | help |
51 | Support for error detection and correction on the AMD 76x | 51 | Support for error detection and correction on the AMD 76x |
52 | series of chipsets used with the Athlon processor. | 52 | series of chipsets used with the Athlon processor. |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 770a5a633079..c454ded2b060 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -1039,10 +1039,10 @@ MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); | |||
1039 | 1039 | ||
1040 | 1040 | ||
1041 | static struct pci_driver e752x_driver = { | 1041 | static struct pci_driver e752x_driver = { |
1042 | name: BS_MOD_STR, | 1042 | .name = BS_MOD_STR, |
1043 | probe: e752x_init_one, | 1043 | .probe = e752x_init_one, |
1044 | remove: __devexit_p(e752x_remove_one), | 1044 | .remove = __devexit_p(e752x_remove_one), |
1045 | id_table: e752x_pci_tbl, | 1045 | .id_table = e752x_pci_tbl, |
1046 | }; | 1046 | }; |
1047 | 1047 | ||
1048 | 1048 | ||
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 4be9bd0a1267..b10ee4698b1d 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | 15 | ||
16 | #include <linux/config.h> | 16 | #include <linux/config.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
19 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 1c81174595b3..d633081fa4c5 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -52,9 +52,9 @@ config IDE | |||
52 | 52 | ||
53 | if IDE | 53 | if IDE |
54 | 54 | ||
55 | config IDE_MAX_HWIFS | 55 | config IDE_MAX_HWIFS |
56 | int "Max IDE interfaces" | 56 | int "Max IDE interfaces" |
57 | depends on ALPHA || SUPERH | 57 | depends on ALPHA || SUPERH || IA64 |
58 | default 4 | 58 | default 4 |
59 | help | 59 | help |
60 | This is the maximum number of IDE hardware interfaces that will | 60 | This is the maximum number of IDE hardware interfaces that will |
@@ -162,8 +162,8 @@ config BLK_DEV_IDECS | |||
162 | tristate "PCMCIA IDE support" | 162 | tristate "PCMCIA IDE support" |
163 | depends on PCMCIA | 163 | depends on PCMCIA |
164 | help | 164 | help |
165 | Support for outboard IDE disks, tape drives, and CD-ROM drives | 165 | Support for Compact Flash cards, outboard IDE disks, tape drives, |
166 | connected through a PCMCIA card. | 166 | and CD-ROM drives connected through a PCMCIA card. |
167 | 167 | ||
168 | config BLK_DEV_IDECD | 168 | config BLK_DEV_IDECD |
169 | tristate "Include IDE/ATAPI CDROM support" | 169 | tristate "Include IDE/ATAPI CDROM support" |
@@ -267,7 +267,7 @@ config IDE_TASK_IOCTL | |||
267 | help | 267 | help |
268 | This is a direct raw access to the media. It is a complex but | 268 | This is a direct raw access to the media. It is a complex but |
269 | elegant solution to test and validate the domain of the hardware and | 269 | elegant solution to test and validate the domain of the hardware and |
270 | perform below the driver data recover if needed. This is the most | 270 | perform below the driver data recovery if needed. This is the most |
271 | basic form of media-forensics. | 271 | basic form of media-forensics. |
272 | 272 | ||
273 | If you are unsure, say N here. | 273 | If you are unsure, say N here. |
@@ -525,7 +525,7 @@ config BLK_DEV_CS5520 | |||
525 | tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" | 525 | tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" |
526 | depends on EXPERIMENTAL | 526 | depends on EXPERIMENTAL |
527 | help | 527 | help |
528 | Include support for PIO tuning an virtual DMA on the Cyrix MediaGX | 528 | Include support for PIO tuning and virtual DMA on the Cyrix MediaGX |
529 | 5510/5520 chipset. This will automatically be detected and | 529 | 5510/5520 chipset. This will automatically be detected and |
530 | configured if found. | 530 | configured if found. |
531 | 531 | ||
@@ -662,7 +662,7 @@ config PDC202XX_BURST | |||
662 | 662 | ||
663 | It was originally designed for the PDC20246/Ultra33, whose BIOS will | 663 | It was originally designed for the PDC20246/Ultra33, whose BIOS will |
664 | only setup UDMA on the first two PDC20246 cards. It has also been | 664 | only setup UDMA on the first two PDC20246 cards. It has also been |
665 | used succesfully on a PDC20265/Ultra100, allowing use of UDMA modes | 665 | used successfully on a PDC20265/Ultra100, allowing use of UDMA modes |
666 | when the PDC20265 BIOS has been disabled (for faster boot up). | 666 | when the PDC20265 BIOS has been disabled (for faster boot up). |
667 | 667 | ||
668 | Please read the comments at the top of | 668 | Please read the comments at the top of |
@@ -673,13 +673,6 @@ config PDC202XX_BURST | |||
673 | config BLK_DEV_PDC202XX_NEW | 673 | config BLK_DEV_PDC202XX_NEW |
674 | tristate "PROMISE PDC202{68|69|70|71|75|76|77} support" | 674 | tristate "PROMISE PDC202{68|69|70|71|75|76|77} support" |
675 | 675 | ||
676 | # FIXME - probably wants to be one for old and for new | ||
677 | config PDC202XX_FORCE | ||
678 | bool "Enable controller even if disabled by BIOS" | ||
679 | depends on BLK_DEV_PDC202XX_NEW | ||
680 | help | ||
681 | Enable the PDC202xx controller even if it has been disabled in the BIOS setup. | ||
682 | |||
683 | config BLK_DEV_SVWKS | 676 | config BLK_DEV_SVWKS |
684 | tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support" | 677 | tristate "ServerWorks OSB4/CSB5/CSB6 chipsets support" |
685 | help | 678 | help |
@@ -722,7 +715,7 @@ config BLK_DEV_SIS5513 | |||
722 | config BLK_DEV_SLC90E66 | 715 | config BLK_DEV_SLC90E66 |
723 | tristate "SLC90E66 chipset support" | 716 | tristate "SLC90E66 chipset support" |
724 | help | 717 | help |
725 | This driver ensures (U)DMA support for Victroy66 SouthBridges for | 718 | This driver ensures (U)DMA support for Victory66 SouthBridges for |
726 | SMsC with Intel NorthBridges. This is an Ultra66 based chipset. | 719 | SMsC with Intel NorthBridges. This is an Ultra66 based chipset. |
727 | The nice thing about it is that you can mix Ultra/DMA/PIO devices | 720 | The nice thing about it is that you can mix Ultra/DMA/PIO devices |
728 | and it will handle timing cycles. Since this is an improved | 721 | and it will handle timing cycles. Since this is an improved |
@@ -1060,7 +1053,7 @@ config IDEDMA_IVB | |||
1060 | in that mode with an 80c ribbon. | 1053 | in that mode with an 80c ribbon. |
1061 | 1054 | ||
1062 | If you are experiencing compatibility or performance problems, you | 1055 | If you are experiencing compatibility or performance problems, you |
1063 | MAY try to answering Y here. However, it does not necessarily solve | 1056 | MAY try to answer Y here. However, it does not necessarily solve |
1064 | any of your problems, it could even cause more of them. | 1057 | any of your problems, it could even cause more of them. |
1065 | 1058 | ||
1066 | It is normally safe to answer Y; however, the default is N. | 1059 | It is normally safe to answer Y; however, the default is N. |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index ca25f9e3d0f4..6c60a9d2afd8 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive) | |||
776 | ide_id_has_flush_cache_ext(id)); | 776 | ide_id_has_flush_cache_ext(id)); |
777 | 777 | ||
778 | printk(KERN_INFO "%s: cache flushes %ssupported\n", | 778 | printk(KERN_INFO "%s: cache flushes %ssupported\n", |
779 | drive->name, barrier ? "" : "not"); | 779 | drive->name, barrier ? "" : "not "); |
780 | 780 | ||
781 | if (barrier) { | 781 | if (barrier) { |
782 | ordered = QUEUE_ORDERED_DRAIN_FLUSH; | 782 | ordered = QUEUE_ORDERED_DRAIN_FLUSH; |
@@ -889,11 +889,7 @@ static void idedisk_setup (ide_drive_t *drive) | |||
889 | if (drive->id_read == 0) | 889 | if (drive->id_read == 0) |
890 | return; | 890 | return; |
891 | 891 | ||
892 | /* | 892 | if (drive->removable) { |
893 | * CompactFlash cards and their brethern look just like hard drives | ||
894 | * to us, but they are removable and don't have a doorlock mechanism. | ||
895 | */ | ||
896 | if (drive->removable && !(drive->is_flash)) { | ||
897 | /* | 893 | /* |
898 | * Removable disks (eg. SYQUEST); ignore 'WD' drives | 894 | * Removable disks (eg. SYQUEST); ignore 'WD' drives |
899 | */ | 895 | */ |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 8d50df4526a4..c01615dec202 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -55,8 +55,8 @@ | |||
55 | #include <asm/io.h> | 55 | #include <asm/io.h> |
56 | #include <asm/bitops.h> | 56 | #include <asm/bitops.h> |
57 | 57 | ||
58 | int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, | 58 | static int __ide_end_request(ide_drive_t *drive, struct request *rq, |
59 | int nr_sectors) | 59 | int uptodate, int nr_sectors) |
60 | { | 60 | { |
61 | int ret = 1; | 61 | int ret = 1; |
62 | 62 | ||
@@ -91,7 +91,6 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, | |||
91 | 91 | ||
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
94 | EXPORT_SYMBOL(__ide_end_request); | ||
95 | 94 | ||
96 | /** | 95 | /** |
97 | * ide_end_request - complete an IDE I/O | 96 | * ide_end_request - complete an IDE I/O |
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index af7af958ab3e..b72dde70840a 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -1243,6 +1243,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) | |||
1243 | */ | 1243 | */ |
1244 | if (stat == 0xff) | 1244 | if (stat == 0xff) |
1245 | return -ENODEV; | 1245 | return -ENODEV; |
1246 | touch_softlockup_watchdog(); | ||
1246 | } | 1247 | } |
1247 | return -EBUSY; | 1248 | return -EBUSY; |
1248 | } | 1249 | } |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index e7425546b4b1..427d1c204174 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -125,45 +125,6 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | /** | 127 | /** |
128 | * drive_is_flashcard - check for compact flash | ||
129 | * @drive: drive to check | ||
130 | * | ||
131 | * CompactFlash cards and their brethern pretend to be removable | ||
132 | * hard disks, except: | ||
133 | * (1) they never have a slave unit, and | ||
134 | * (2) they don't have doorlock mechanisms. | ||
135 | * This test catches them, and is invoked elsewhere when setting | ||
136 | * appropriate config bits. | ||
137 | * | ||
138 | * FIXME: This treatment is probably applicable for *all* PCMCIA (PC CARD) | ||
139 | * devices, so in linux 2.3.x we should change this to just treat all | ||
140 | * PCMCIA drives this way, and get rid of the model-name tests below | ||
141 | * (too big of an interface change for 2.4.x). | ||
142 | * At that time, we might also consider parameterizing the timeouts and | ||
143 | * retries, since these are MUCH faster than mechanical drives. -M.Lord | ||
144 | */ | ||
145 | |||
146 | static inline int drive_is_flashcard (ide_drive_t *drive) | ||
147 | { | ||
148 | struct hd_driveid *id = drive->id; | ||
149 | |||
150 | if (drive->removable) { | ||
151 | if (id->config == 0x848a) return 1; /* CompactFlash */ | ||
152 | if (!strncmp(id->model, "KODAK ATA_FLASH", 15) /* Kodak */ | ||
153 | || !strncmp(id->model, "Hitachi CV", 10) /* Hitachi */ | ||
154 | || !strncmp(id->model, "SunDisk SDCFB", 13) /* old SanDisk */ | ||
155 | || !strncmp(id->model, "SanDisk SDCFB", 13) /* SanDisk */ | ||
156 | || !strncmp(id->model, "HAGIWARA HPC", 12) /* Hagiwara */ | ||
157 | || !strncmp(id->model, "LEXAR ATA_FLASH", 15) /* Lexar */ | ||
158 | || !strncmp(id->model, "ATA_FLASH", 9)) /* Simple Tech */ | ||
159 | { | ||
160 | return 1; /* yes, it is a flash memory card */ | ||
161 | } | ||
162 | } | ||
163 | return 0; /* no, it is not a flash memory card */ | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * do_identify - identify a drive | 128 | * do_identify - identify a drive |
168 | * @drive: drive to identify | 129 | * @drive: drive to identify |
169 | * @cmd: command used | 130 | * @cmd: command used |
@@ -278,13 +239,17 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd) | |||
278 | /* | 239 | /* |
279 | * Not an ATAPI device: looks like a "regular" hard disk | 240 | * Not an ATAPI device: looks like a "regular" hard disk |
280 | */ | 241 | */ |
281 | if (id->config & (1<<7)) | 242 | |
243 | /* | ||
244 | * 0x848a = CompactFlash device | ||
245 | * These are *not* removable in Linux definition of the term | ||
246 | */ | ||
247 | |||
248 | if ((id->config != 0x848a) && (id->config & (1<<7))) | ||
282 | drive->removable = 1; | 249 | drive->removable = 1; |
283 | 250 | ||
284 | if (drive_is_flashcard(drive)) | ||
285 | drive->is_flash = 1; | ||
286 | drive->media = ide_disk; | 251 | drive->media = ide_disk; |
287 | printk("%s DISK drive\n", (drive->is_flash) ? "CFA" : "ATA" ); | 252 | printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" ); |
288 | QUIRK_LIST(drive); | 253 | QUIRK_LIST(drive); |
289 | return; | 254 | return; |
290 | 255 | ||
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index afeb02bbb722..b2cc43702f65 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -242,7 +242,6 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index) | |||
242 | drive->name[2] = 'a' + (index * MAX_DRIVES) + unit; | 242 | drive->name[2] = 'a' + (index * MAX_DRIVES) + unit; |
243 | drive->max_failures = IDE_DEFAULT_MAX_FAILURES; | 243 | drive->max_failures = IDE_DEFAULT_MAX_FAILURES; |
244 | drive->using_dma = 0; | 244 | drive->using_dma = 0; |
245 | drive->is_flash = 0; | ||
246 | drive->vdma = 0; | 245 | drive->vdma = 0; |
247 | INIT_LIST_HEAD(&drive->list); | 246 | INIT_LIST_HEAD(&drive->list); |
248 | init_completion(&drive->gendev_rel_comp); | 247 | init_completion(&drive->gendev_rel_comp); |
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index a21b1e11eef4..c743e68c33aa 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c | |||
@@ -262,6 +262,21 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch | |||
262 | else | 262 | else |
263 | pci_set_drvdata(dev, (void *) aec6xxx_34_base); | 263 | pci_set_drvdata(dev, (void *) aec6xxx_34_base); |
264 | 264 | ||
265 | /* These are necessary to get AEC6280 Macintosh cards to work */ | ||
266 | if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) || | ||
267 | (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)) { | ||
268 | u8 reg49h = 0, reg4ah = 0; | ||
269 | /* Clear reset and test bits. */ | ||
270 | pci_read_config_byte(dev, 0x49, ®49h); | ||
271 | pci_write_config_byte(dev, 0x49, reg49h & ~0x30); | ||
272 | /* Enable chip interrupt output. */ | ||
273 | pci_read_config_byte(dev, 0x4a, ®4ah); | ||
274 | pci_write_config_byte(dev, 0x4a, reg4ah & ~0x01); | ||
275 | /* Enable burst mode. */ | ||
276 | pci_read_config_byte(dev, 0x4a, ®4ah); | ||
277 | pci_write_config_byte(dev, 0x4a, reg4ah | 0x80); | ||
278 | } | ||
279 | |||
265 | return dev->irq; | 280 | return dev->irq; |
266 | } | 281 | } |
267 | 282 | ||
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 7b589d948bf9..940bdd4c5784 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1288,6 +1288,10 @@ static void __devinit hpt37x_clocking(ide_hwif_t *hwif) | |||
1288 | goto init_hpt37X_done; | 1288 | goto init_hpt37X_done; |
1289 | } | 1289 | } |
1290 | } | 1290 | } |
1291 | if (!pci_get_drvdata(dev)) { | ||
1292 | printk("No Clock Stabilization!!!\n"); | ||
1293 | return; | ||
1294 | } | ||
1291 | pll_recal: | 1295 | pll_recal: |
1292 | if (adjust & 1) | 1296 | if (adjust & 1) |
1293 | pll -= (adjust >> 1); | 1297 | pll -= (adjust >> 1); |
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 108fda83fea4..38f41b377ff6 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c | |||
@@ -733,7 +733,7 @@ static void __devinit it8212_disable_raid(struct pci_dev *dev) | |||
733 | 733 | ||
734 | pci_write_config_dword(dev,0x4C, 0x02040204); | 734 | pci_write_config_dword(dev,0x4C, 0x02040204); |
735 | pci_write_config_byte(dev, 0x42, 0x36); | 735 | pci_write_config_byte(dev, 0x42, 0x36); |
736 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0); | 736 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); |
737 | } | 737 | } |
738 | 738 | ||
739 | static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name) | 739 | static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const char *name) |
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c index fe06ebb0e5bf..acd63173199b 100644 --- a/drivers/ide/pci/pdc202xx_new.c +++ b/drivers/ide/pci/pdc202xx_new.c | |||
@@ -420,9 +420,6 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { | |||
420 | .init_hwif = init_hwif_pdc202new, | 420 | .init_hwif = init_hwif_pdc202new, |
421 | .channels = 2, | 421 | .channels = 2, |
422 | .autodma = AUTODMA, | 422 | .autodma = AUTODMA, |
423 | #ifndef CONFIG_PDC202XX_FORCE | ||
424 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
425 | #endif | ||
426 | .bootable = OFF_BOARD, | 423 | .bootable = OFF_BOARD, |
427 | },{ /* 3 */ | 424 | },{ /* 3 */ |
428 | .name = "PDC20271", | 425 | .name = "PDC20271", |
@@ -447,9 +444,6 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = { | |||
447 | .init_hwif = init_hwif_pdc202new, | 444 | .init_hwif = init_hwif_pdc202new, |
448 | .channels = 2, | 445 | .channels = 2, |
449 | .autodma = AUTODMA, | 446 | .autodma = AUTODMA, |
450 | #ifndef CONFIG_PDC202XX_FORCE | ||
451 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
452 | #endif | ||
453 | .bootable = OFF_BOARD, | 447 | .bootable = OFF_BOARD, |
454 | },{ /* 6 */ | 448 | },{ /* 6 */ |
455 | .name = "PDC20277", | 449 | .name = "PDC20277", |
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c index ad9d95817f95..6f8f8645b02c 100644 --- a/drivers/ide/pci/pdc202xx_old.c +++ b/drivers/ide/pci/pdc202xx_old.c | |||
@@ -786,9 +786,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { | |||
786 | .init_dma = init_dma_pdc202xx, | 786 | .init_dma = init_dma_pdc202xx, |
787 | .channels = 2, | 787 | .channels = 2, |
788 | .autodma = AUTODMA, | 788 | .autodma = AUTODMA, |
789 | #ifndef CONFIG_PDC202XX_FORCE | ||
790 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
791 | #endif | ||
792 | .bootable = OFF_BOARD, | 789 | .bootable = OFF_BOARD, |
793 | .extra = 16, | 790 | .extra = 16, |
794 | },{ /* 1 */ | 791 | },{ /* 1 */ |
@@ -799,9 +796,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { | |||
799 | .init_dma = init_dma_pdc202xx, | 796 | .init_dma = init_dma_pdc202xx, |
800 | .channels = 2, | 797 | .channels = 2, |
801 | .autodma = AUTODMA, | 798 | .autodma = AUTODMA, |
802 | #ifndef CONFIG_PDC202XX_FORCE | ||
803 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
804 | #endif | ||
805 | .bootable = OFF_BOARD, | 799 | .bootable = OFF_BOARD, |
806 | .extra = 48, | 800 | .extra = 48, |
807 | .flags = IDEPCI_FLAG_FORCE_PDC, | 801 | .flags = IDEPCI_FLAG_FORCE_PDC, |
@@ -813,9 +807,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { | |||
813 | .init_dma = init_dma_pdc202xx, | 807 | .init_dma = init_dma_pdc202xx, |
814 | .channels = 2, | 808 | .channels = 2, |
815 | .autodma = AUTODMA, | 809 | .autodma = AUTODMA, |
816 | #ifndef CONFIG_PDC202XX_FORCE | ||
817 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
818 | #endif | ||
819 | .bootable = OFF_BOARD, | 810 | .bootable = OFF_BOARD, |
820 | .extra = 48, | 811 | .extra = 48, |
821 | },{ /* 3 */ | 812 | },{ /* 3 */ |
@@ -826,9 +817,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { | |||
826 | .init_dma = init_dma_pdc202xx, | 817 | .init_dma = init_dma_pdc202xx, |
827 | .channels = 2, | 818 | .channels = 2, |
828 | .autodma = AUTODMA, | 819 | .autodma = AUTODMA, |
829 | #ifndef CONFIG_PDC202XX_FORCE | ||
830 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
831 | #endif | ||
832 | .bootable = OFF_BOARD, | 820 | .bootable = OFF_BOARD, |
833 | .extra = 48, | 821 | .extra = 48, |
834 | .flags = IDEPCI_FLAG_FORCE_PDC, | 822 | .flags = IDEPCI_FLAG_FORCE_PDC, |
@@ -840,9 +828,6 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = { | |||
840 | .init_dma = init_dma_pdc202xx, | 828 | .init_dma = init_dma_pdc202xx, |
841 | .channels = 2, | 829 | .channels = 2, |
842 | .autodma = AUTODMA, | 830 | .autodma = AUTODMA, |
843 | #ifndef CONFIG_PDC202XX_FORCE | ||
844 | .enablebits = {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, | ||
845 | #endif | ||
846 | .bootable = OFF_BOARD, | 831 | .bootable = OFF_BOARD, |
847 | .extra = 48, | 832 | .extra = 48, |
848 | } | 833 | } |
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c index b3e77df63cef..e9b83e1a3028 100644 --- a/drivers/ide/pci/piix.c +++ b/drivers/ide/pci/piix.c | |||
@@ -135,6 +135,7 @@ static u8 piix_ratemask (ide_drive_t *drive) | |||
135 | case PCI_DEVICE_ID_INTEL_ICH6_19: | 135 | case PCI_DEVICE_ID_INTEL_ICH6_19: |
136 | case PCI_DEVICE_ID_INTEL_ICH7_21: | 136 | case PCI_DEVICE_ID_INTEL_ICH7_21: |
137 | case PCI_DEVICE_ID_INTEL_ESB2_18: | 137 | case PCI_DEVICE_ID_INTEL_ESB2_18: |
138 | case PCI_DEVICE_ID_INTEL_ICH8_6: | ||
138 | mode = 3; | 139 | mode = 3; |
139 | break; | 140 | break; |
140 | /* UDMA 66 capable */ | 141 | /* UDMA 66 capable */ |
@@ -449,6 +450,7 @@ static unsigned int __devinit init_chipset_piix (struct pci_dev *dev, const char | |||
449 | case PCI_DEVICE_ID_INTEL_ICH6_19: | 450 | case PCI_DEVICE_ID_INTEL_ICH6_19: |
450 | case PCI_DEVICE_ID_INTEL_ICH7_21: | 451 | case PCI_DEVICE_ID_INTEL_ICH7_21: |
451 | case PCI_DEVICE_ID_INTEL_ESB2_18: | 452 | case PCI_DEVICE_ID_INTEL_ESB2_18: |
453 | case PCI_DEVICE_ID_INTEL_ICH8_6: | ||
452 | { | 454 | { |
453 | unsigned int extra = 0; | 455 | unsigned int extra = 0; |
454 | pci_read_config_dword(dev, 0x54, &extra); | 456 | pci_read_config_dword(dev, 0x54, &extra); |
@@ -575,6 +577,7 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = { | |||
575 | /* 21 */ DECLARE_PIIX_DEV("ICH7"), | 577 | /* 21 */ DECLARE_PIIX_DEV("ICH7"), |
576 | /* 22 */ DECLARE_PIIX_DEV("ICH4"), | 578 | /* 22 */ DECLARE_PIIX_DEV("ICH4"), |
577 | /* 23 */ DECLARE_PIIX_DEV("ESB2"), | 579 | /* 23 */ DECLARE_PIIX_DEV("ESB2"), |
580 | /* 24 */ DECLARE_PIIX_DEV("ICH8M"), | ||
578 | }; | 581 | }; |
579 | 582 | ||
580 | /** | 583 | /** |
@@ -651,6 +654,7 @@ static struct pci_device_id piix_pci_tbl[] = { | |||
651 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 21}, | 654 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 21}, |
652 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 22}, | 655 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 22}, |
653 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 23}, | 656 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_18, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 23}, |
657 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 24}, | ||
654 | { 0, }, | 658 | { 0, }, |
655 | }; | 659 | }; |
656 | MODULE_DEVICE_TABLE(pci, piix_pci_tbl); | 660 | MODULE_DEVICE_TABLE(pci, piix_pci_tbl); |
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 1b85ce166af8..11fe537e2f6f 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h | |||
@@ -216,7 +216,7 @@ struct Layer1 { | |||
216 | #define GROUP_TEI 127 | 216 | #define GROUP_TEI 127 |
217 | #define TEI_SAPI 63 | 217 | #define TEI_SAPI 63 |
218 | #define CTRL_SAPI 0 | 218 | #define CTRL_SAPI 0 |
219 | #define PACKET_NOACK 250 | 219 | #define PACKET_NOACK 7 |
220 | 220 | ||
221 | /* Layer2 Flags */ | 221 | /* Layer2 Flags */ |
222 | 222 | ||
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c index 3314a5a19854..94c9afb7017c 100644 --- a/drivers/isdn/sc/ioctl.c +++ b/drivers/isdn/sc/ioctl.c | |||
@@ -71,14 +71,14 @@ int sc_ioctl(int card, scs_ioctl *data) | |||
71 | /* | 71 | /* |
72 | * Get the SRec from user space | 72 | * Get the SRec from user space |
73 | */ | 73 | */ |
74 | if (copy_from_user(srec, data->dataptr, sizeof(srec))) { | 74 | if (copy_from_user(srec, data->dataptr, SCIOC_SRECSIZE)) { |
75 | kfree(rcvmsg); | 75 | kfree(rcvmsg); |
76 | kfree(srec); | 76 | kfree(srec); |
77 | return -EFAULT; | 77 | return -EFAULT; |
78 | } | 78 | } |
79 | 79 | ||
80 | status = send_and_receive(card, CMPID, cmReqType2, cmReqClass0, cmReqLoadProc, | 80 | status = send_and_receive(card, CMPID, cmReqType2, cmReqClass0, cmReqLoadProc, |
81 | 0, sizeof(srec), srec, rcvmsg, SAR_TIMEOUT); | 81 | 0, SCIOC_SRECSIZE, srec, rcvmsg, SAR_TIMEOUT); |
82 | kfree(rcvmsg); | 82 | kfree(rcvmsg); |
83 | kfree(srec); | 83 | kfree(srec); |
84 | 84 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 653d4dcbee23..d05e3125d298 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1024,7 +1024,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1024 | rdev-> sb_size = (rdev->sb_size | bmask)+1; | 1024 | rdev-> sb_size = (rdev->sb_size | bmask)+1; |
1025 | 1025 | ||
1026 | if (refdev == 0) | 1026 | if (refdev == 0) |
1027 | return 1; | 1027 | ret = 1; |
1028 | else { | 1028 | else { |
1029 | __u64 ev1, ev2; | 1029 | __u64 ev1, ev2; |
1030 | struct mdp_superblock_1 *refsb = | 1030 | struct mdp_superblock_1 *refsb = |
@@ -1044,7 +1044,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1044 | ev2 = le64_to_cpu(refsb->events); | 1044 | ev2 = le64_to_cpu(refsb->events); |
1045 | 1045 | ||
1046 | if (ev1 > ev2) | 1046 | if (ev1 > ev2) |
1047 | return 1; | 1047 | ret = 1; |
1048 | else | ||
1049 | ret = 0; | ||
1048 | } | 1050 | } |
1049 | if (minor_version) | 1051 | if (minor_version) |
1050 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; | 1052 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; |
@@ -1058,7 +1060,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1058 | 1060 | ||
1059 | if (le32_to_cpu(sb->size) > rdev->size*2) | 1061 | if (le32_to_cpu(sb->size) > rdev->size*2) |
1060 | return -EINVAL; | 1062 | return -EINVAL; |
1061 | return 0; | 1063 | return ret; |
1062 | } | 1064 | } |
1063 | 1065 | ||
1064 | static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | 1066 | static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) |
@@ -1081,7 +1083,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1081 | mddev->size = le64_to_cpu(sb->size)/2; | 1083 | mddev->size = le64_to_cpu(sb->size)/2; |
1082 | mddev->events = le64_to_cpu(sb->events); | 1084 | mddev->events = le64_to_cpu(sb->events); |
1083 | mddev->bitmap_offset = 0; | 1085 | mddev->bitmap_offset = 0; |
1084 | mddev->default_bitmap_offset = 1024; | 1086 | mddev->default_bitmap_offset = 1024 >> 9; |
1085 | 1087 | ||
1086 | mddev->recovery_cp = le64_to_cpu(sb->resync_offset); | 1088 | mddev->recovery_cp = le64_to_cpu(sb->resync_offset); |
1087 | memcpy(mddev->uuid, sb->set_uuid, 16); | 1089 | memcpy(mddev->uuid, sb->set_uuid, 16); |
@@ -1162,7 +1164,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1162 | sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); | 1164 | sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); |
1163 | 1165 | ||
1164 | sb->raid_disks = cpu_to_le32(mddev->raid_disks); | 1166 | sb->raid_disks = cpu_to_le32(mddev->raid_disks); |
1165 | sb->size = cpu_to_le64(mddev->size); | 1167 | sb->size = cpu_to_le64(mddev->size<<1); |
1166 | 1168 | ||
1167 | if (mddev->bitmap && mddev->bitmap_file == NULL) { | 1169 | if (mddev->bitmap && mddev->bitmap_file == NULL) { |
1168 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); | 1170 | sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); |
@@ -2942,6 +2944,8 @@ static int get_array_info(mddev_t * mddev, void __user * arg) | |||
2942 | info.ctime = mddev->ctime; | 2944 | info.ctime = mddev->ctime; |
2943 | info.level = mddev->level; | 2945 | info.level = mddev->level; |
2944 | info.size = mddev->size; | 2946 | info.size = mddev->size; |
2947 | if (info.size != mddev->size) /* overflow */ | ||
2948 | info.size = -1; | ||
2945 | info.nr_disks = nr; | 2949 | info.nr_disks = nr; |
2946 | info.raid_disks = mddev->raid_disks; | 2950 | info.raid_disks = mddev->raid_disks; |
2947 | info.md_minor = mddev->md_minor; | 2951 | info.md_minor = mddev->md_minor; |
@@ -3523,7 +3527,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) | |||
3523 | ) | 3527 | ) |
3524 | return -EINVAL; | 3528 | return -EINVAL; |
3525 | /* Check there is only one change */ | 3529 | /* Check there is only one change */ |
3526 | if (mddev->size != info->size) cnt++; | 3530 | if (info->size >= 0 && mddev->size != info->size) cnt++; |
3527 | if (mddev->raid_disks != info->raid_disks) cnt++; | 3531 | if (mddev->raid_disks != info->raid_disks) cnt++; |
3528 | if (mddev->layout != info->layout) cnt++; | 3532 | if (mddev->layout != info->layout) cnt++; |
3529 | if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; | 3533 | if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; |
@@ -3540,7 +3544,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) | |||
3540 | else | 3544 | else |
3541 | return mddev->pers->reconfig(mddev, info->layout, -1); | 3545 | return mddev->pers->reconfig(mddev, info->layout, -1); |
3542 | } | 3546 | } |
3543 | if (mddev->size != info->size) | 3547 | if (info->size >= 0 && mddev->size != info->size) |
3544 | rv = update_size(mddev, info->size); | 3548 | rv = update_size(mddev, info->size); |
3545 | 3549 | ||
3546 | if (mddev->raid_disks != info->raid_disks) | 3550 | if (mddev->raid_disks != info->raid_disks) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d03f99cf4b7d..678f4dbbea1d 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -372,7 +372,7 @@ out_free_conf: | |||
372 | kfree(conf); | 372 | kfree(conf); |
373 | mddev->private = NULL; | 373 | mddev->private = NULL; |
374 | out: | 374 | out: |
375 | return 1; | 375 | return -ENOMEM; |
376 | } | 376 | } |
377 | 377 | ||
378 | static int raid0_stop (mddev_t *mddev) | 378 | static int raid0_stop (mddev_t *mddev) |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 9130d051b474..ab90a6d12020 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -565,6 +565,8 @@ rb_out: | |||
565 | 565 | ||
566 | if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL) | 566 | if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL) |
567 | atomic_inc(&conf->mirrors[disk].rdev->nr_pending); | 567 | atomic_inc(&conf->mirrors[disk].rdev->nr_pending); |
568 | else | ||
569 | disk = -1; | ||
568 | rcu_read_unlock(); | 570 | rcu_read_unlock(); |
569 | 571 | ||
570 | return disk; | 572 | return disk; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 25976bfb6f9c..2dba305daf3c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -350,7 +350,8 @@ static void shrink_stripes(raid5_conf_t *conf) | |||
350 | while (drop_one_stripe(conf)) | 350 | while (drop_one_stripe(conf)) |
351 | ; | 351 | ; |
352 | 352 | ||
353 | kmem_cache_destroy(conf->slab_cache); | 353 | if (conf->slab_cache) |
354 | kmem_cache_destroy(conf->slab_cache); | ||
354 | conf->slab_cache = NULL; | 355 | conf->slab_cache = NULL; |
355 | } | 356 | } |
356 | 357 | ||
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index ed2abb2e2e2d..cd477ebf2ee4 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
@@ -366,7 +366,8 @@ static void shrink_stripes(raid6_conf_t *conf) | |||
366 | while (drop_one_stripe(conf)) | 366 | while (drop_one_stripe(conf)) |
367 | ; | 367 | ; |
368 | 368 | ||
369 | kmem_cache_destroy(conf->slab_cache); | 369 | if (conf->slab_cache) |
370 | kmem_cache_destroy(conf->slab_cache); | ||
370 | conf->slab_cache = NULL; | 371 | conf->slab_cache = NULL; |
371 | } | 372 | } |
372 | 373 | ||
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h index 90628562851e..184974cc734d 100644 --- a/drivers/message/i2o/core.h +++ b/drivers/message/i2o/core.h | |||
@@ -60,4 +60,7 @@ extern void i2o_iop_remove(struct i2o_controller *); | |||
60 | #define I2O_IN_PORT 0x40 | 60 | #define I2O_IN_PORT 0x40 |
61 | #define I2O_OUT_PORT 0x44 | 61 | #define I2O_OUT_PORT 0x44 |
62 | 62 | ||
63 | /* Motorola/Freescale specific register offset */ | ||
64 | #define I2O_MOTOROLA_PORT_OFFSET 0x10400 | ||
65 | |||
63 | #define I2O_IRQ_OUTBOUND_POST 0x00000008 | 66 | #define I2O_IRQ_OUTBOUND_POST 0x00000008 |
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index d698d7709c31..4f1515cae5dc 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c | |||
@@ -88,6 +88,11 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
88 | struct device *dev = &pdev->dev; | 88 | struct device *dev = &pdev->dev; |
89 | int i; | 89 | int i; |
90 | 90 | ||
91 | if (pci_request_regions(pdev, OSM_DESCRIPTION)) { | ||
92 | printk(KERN_ERR "%s: device already claimed\n", c->name); | ||
93 | return -ENODEV; | ||
94 | } | ||
95 | |||
91 | for (i = 0; i < 6; i++) { | 96 | for (i = 0; i < 6; i++) { |
92 | /* Skip I/O spaces */ | 97 | /* Skip I/O spaces */ |
93 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { | 98 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { |
@@ -163,6 +168,24 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
163 | c->in_port = c->base.virt + I2O_IN_PORT; | 168 | c->in_port = c->base.virt + I2O_IN_PORT; |
164 | c->out_port = c->base.virt + I2O_OUT_PORT; | 169 | c->out_port = c->base.virt + I2O_OUT_PORT; |
165 | 170 | ||
171 | /* Motorola/Freescale chip does not follow spec */ | ||
172 | if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) { | ||
173 | /* Check if CPU is enabled */ | ||
174 | if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) { | ||
175 | printk(KERN_INFO "%s: MPC82XX needs CPU running to " | ||
176 | "service I2O.\n", c->name); | ||
177 | i2o_pci_free(c); | ||
178 | return -ENODEV; | ||
179 | } else { | ||
180 | c->irq_status += I2O_MOTOROLA_PORT_OFFSET; | ||
181 | c->irq_mask += I2O_MOTOROLA_PORT_OFFSET; | ||
182 | c->in_port += I2O_MOTOROLA_PORT_OFFSET; | ||
183 | c->out_port += I2O_MOTOROLA_PORT_OFFSET; | ||
184 | printk(KERN_INFO "%s: MPC82XX workarounds activated.\n", | ||
185 | c->name); | ||
186 | } | ||
187 | } | ||
188 | |||
166 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { | 189 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { |
167 | i2o_pci_free(c); | 190 | i2o_pci_free(c); |
168 | return -ENOMEM; | 191 | return -ENOMEM; |
@@ -298,7 +321,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
298 | struct i2o_controller *c; | 321 | struct i2o_controller *c; |
299 | int rc; | 322 | int rc; |
300 | struct pci_dev *i960 = NULL; | 323 | struct pci_dev *i960 = NULL; |
301 | int pci_dev_busy = 0; | 324 | int enabled = pdev->is_enabled; |
302 | 325 | ||
303 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); | 326 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); |
304 | 327 | ||
@@ -308,16 +331,12 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
308 | return -ENODEV; | 331 | return -ENODEV; |
309 | } | 332 | } |
310 | 333 | ||
311 | if ((rc = pci_enable_device(pdev))) { | 334 | if (!enabled) |
312 | printk(KERN_WARNING "i2o: couldn't enable device %s\n", | 335 | if ((rc = pci_enable_device(pdev))) { |
313 | pci_name(pdev)); | 336 | printk(KERN_WARNING "i2o: couldn't enable device %s\n", |
314 | return rc; | 337 | pci_name(pdev)); |
315 | } | 338 | return rc; |
316 | 339 | } | |
317 | if (pci_request_regions(pdev, OSM_DESCRIPTION)) { | ||
318 | printk(KERN_ERR "i2o: device already claimed\n"); | ||
319 | return -ENODEV; | ||
320 | } | ||
321 | 340 | ||
322 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 341 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
323 | printk(KERN_WARNING "i2o: no suitable DMA found for %s\n", | 342 | printk(KERN_WARNING "i2o: no suitable DMA found for %s\n", |
@@ -395,9 +414,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
395 | 414 | ||
396 | if ((rc = i2o_pci_alloc(c))) { | 415 | if ((rc = i2o_pci_alloc(c))) { |
397 | printk(KERN_ERR "%s: DMA / IO allocation for I2O controller " | 416 | printk(KERN_ERR "%s: DMA / IO allocation for I2O controller " |
398 | " failed\n", c->name); | 417 | "failed\n", c->name); |
399 | if (rc == -ENODEV) | ||
400 | pci_dev_busy = 1; | ||
401 | goto free_controller; | 418 | goto free_controller; |
402 | } | 419 | } |
403 | 420 | ||
@@ -425,7 +442,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
425 | i2o_iop_free(c); | 442 | i2o_iop_free(c); |
426 | 443 | ||
427 | disable: | 444 | disable: |
428 | if (!pci_dev_busy) | 445 | if (!enabled) |
429 | pci_disable_device(pdev); | 446 | pci_disable_device(pdev); |
430 | 447 | ||
431 | return rc; | 448 | return rc; |
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index 701620b6baed..8b3784e2de89 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c | |||
@@ -110,8 +110,9 @@ static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const voi | |||
110 | { | 110 | { |
111 | while (len > 0) { | 111 | while (len > 0) { |
112 | map_word d; | 112 | map_word d; |
113 | d.x[0] = *((uint32_t*)from)++; | 113 | d.x[0] = *((uint32_t*)from); |
114 | dc21285_write32(map, d, to); | 114 | dc21285_write32(map, d, to); |
115 | from += 4; | ||
115 | to += 4; | 116 | to += 4; |
116 | len -= 4; | 117 | len -= 4; |
117 | } | 118 | } |
@@ -121,8 +122,9 @@ static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const voi | |||
121 | { | 122 | { |
122 | while (len > 0) { | 123 | while (len > 0) { |
123 | map_word d; | 124 | map_word d; |
124 | d.x[0] = *((uint16_t*)from)++; | 125 | d.x[0] = *((uint16_t*)from); |
125 | dc21285_write16(map, d, to); | 126 | dc21285_write16(map, d, to); |
127 | from += 2; | ||
126 | to += 2; | 128 | to += 2; |
127 | len -= 2; | 129 | len -= 2; |
128 | } | 130 | } |
@@ -131,8 +133,9 @@ static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const voi | |||
131 | static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) | 133 | static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) |
132 | { | 134 | { |
133 | map_word d; | 135 | map_word d; |
134 | d.x[0] = *((uint8_t*)from)++; | 136 | d.x[0] = *((uint8_t*)from); |
135 | dc21285_write8(map, d, to); | 137 | dc21285_write8(map, d, to); |
138 | from++; | ||
136 | to++; | 139 | to++; |
137 | len--; | 140 | len--; |
138 | } | 141 | } |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 7488ee7f7caf..7f47124f118d 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -753,9 +753,11 @@ enum tx_desc_status { | |||
753 | enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; | 753 | enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; |
754 | 754 | ||
755 | struct vortex_extra_stats { | 755 | struct vortex_extra_stats { |
756 | unsigned long tx_deferred; | 756 | unsigned long tx_deferred; |
757 | unsigned long tx_multiple_collisions; | 757 | unsigned long tx_max_collisions; |
758 | unsigned long rx_bad_ssd; | 758 | unsigned long tx_multiple_collisions; |
759 | unsigned long tx_single_collisions; | ||
760 | unsigned long rx_bad_ssd; | ||
759 | }; | 761 | }; |
760 | 762 | ||
761 | struct vortex_private { | 763 | struct vortex_private { |
@@ -863,12 +865,14 @@ static struct { | |||
863 | const char str[ETH_GSTRING_LEN]; | 865 | const char str[ETH_GSTRING_LEN]; |
864 | } ethtool_stats_keys[] = { | 866 | } ethtool_stats_keys[] = { |
865 | { "tx_deferred" }, | 867 | { "tx_deferred" }, |
868 | { "tx_max_collisions" }, | ||
866 | { "tx_multiple_collisions" }, | 869 | { "tx_multiple_collisions" }, |
870 | { "tx_single_collisions" }, | ||
867 | { "rx_bad_ssd" }, | 871 | { "rx_bad_ssd" }, |
868 | }; | 872 | }; |
869 | 873 | ||
870 | /* number of ETHTOOL_GSTATS u64's */ | 874 | /* number of ETHTOOL_GSTATS u64's */ |
871 | #define VORTEX_NUM_STATS 3 | 875 | #define VORTEX_NUM_STATS 5 |
872 | 876 | ||
873 | static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | 877 | static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, |
874 | int chip_idx, int card_idx); | 878 | int chip_idx, int card_idx); |
@@ -2108,9 +2112,12 @@ vortex_error(struct net_device *dev, int status) | |||
2108 | iowrite8(0, ioaddr + TxStatus); | 2112 | iowrite8(0, ioaddr + TxStatus); |
2109 | if (tx_status & 0x30) { /* txJabber or txUnderrun */ | 2113 | if (tx_status & 0x30) { /* txJabber or txUnderrun */ |
2110 | do_tx_reset = 1; | 2114 | do_tx_reset = 1; |
2111 | } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ | 2115 | } else if (tx_status & 0x08) { /* maxCollisions */ |
2112 | do_tx_reset = 1; | 2116 | vp->xstats.tx_max_collisions++; |
2113 | reset_mask = 0x0108; /* Reset interface logic, but not download logic */ | 2117 | if (vp->drv_flags & MAX_COLLISION_RESET) { |
2118 | do_tx_reset = 1; | ||
2119 | reset_mask = 0x0108; /* Reset interface logic, but not download logic */ | ||
2120 | } | ||
2114 | } else { /* Merely re-enable the transmitter. */ | 2121 | } else { /* Merely re-enable the transmitter. */ |
2115 | iowrite16(TxEnable, ioaddr + EL3_CMD); | 2122 | iowrite16(TxEnable, ioaddr + EL3_CMD); |
2116 | } | 2123 | } |
@@ -2926,7 +2933,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) | |||
2926 | EL3WINDOW(6); | 2933 | EL3WINDOW(6); |
2927 | vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); | 2934 | vp->stats.tx_carrier_errors += ioread8(ioaddr + 0); |
2928 | vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); | 2935 | vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1); |
2929 | vp->stats.collisions += ioread8(ioaddr + 3); | ||
2930 | vp->stats.tx_window_errors += ioread8(ioaddr + 4); | 2936 | vp->stats.tx_window_errors += ioread8(ioaddr + 4); |
2931 | vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); | 2937 | vp->stats.rx_fifo_errors += ioread8(ioaddr + 5); |
2932 | vp->stats.tx_packets += ioread8(ioaddr + 6); | 2938 | vp->stats.tx_packets += ioread8(ioaddr + 6); |
@@ -2939,10 +2945,15 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev) | |||
2939 | vp->stats.tx_bytes += ioread16(ioaddr + 12); | 2945 | vp->stats.tx_bytes += ioread16(ioaddr + 12); |
2940 | /* Extra stats for get_ethtool_stats() */ | 2946 | /* Extra stats for get_ethtool_stats() */ |
2941 | vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); | 2947 | vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2); |
2948 | vp->xstats.tx_single_collisions += ioread8(ioaddr + 3); | ||
2942 | vp->xstats.tx_deferred += ioread8(ioaddr + 8); | 2949 | vp->xstats.tx_deferred += ioread8(ioaddr + 8); |
2943 | EL3WINDOW(4); | 2950 | EL3WINDOW(4); |
2944 | vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); | 2951 | vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12); |
2945 | 2952 | ||
2953 | vp->stats.collisions = vp->xstats.tx_multiple_collisions | ||
2954 | + vp->xstats.tx_single_collisions | ||
2955 | + vp->xstats.tx_max_collisions; | ||
2956 | |||
2946 | { | 2957 | { |
2947 | u8 up = ioread8(ioaddr + 13); | 2958 | u8 up = ioread8(ioaddr + 13); |
2948 | vp->stats.rx_bytes += (up & 0x0f) << 16; | 2959 | vp->stats.rx_bytes += (up & 0x0f) << 16; |
@@ -3036,8 +3047,10 @@ static void vortex_get_ethtool_stats(struct net_device *dev, | |||
3036 | spin_unlock_irqrestore(&vp->lock, flags); | 3047 | spin_unlock_irqrestore(&vp->lock, flags); |
3037 | 3048 | ||
3038 | data[0] = vp->xstats.tx_deferred; | 3049 | data[0] = vp->xstats.tx_deferred; |
3039 | data[1] = vp->xstats.tx_multiple_collisions; | 3050 | data[1] = vp->xstats.tx_max_collisions; |
3040 | data[2] = vp->xstats.rx_bad_ssd; | 3051 | data[2] = vp->xstats.tx_multiple_collisions; |
3052 | data[3] = vp->xstats.tx_single_collisions; | ||
3053 | data[4] = vp->xstats.rx_bad_ssd; | ||
3041 | } | 3054 | } |
3042 | 3055 | ||
3043 | 3056 | ||
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index f605dea57224..f63c387976cf 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig | |||
@@ -90,6 +90,15 @@ config PARPORT_ARC | |||
90 | depends on ARM && PARPORT | 90 | depends on ARM && PARPORT |
91 | select PARPORT_NOT_PC | 91 | select PARPORT_NOT_PC |
92 | 92 | ||
93 | config PARPORT_IP32 | ||
94 | tristate "SGI IP32 builtin port (EXPERIMENTAL)" | ||
95 | depends on SGI_IP32 && PARPORT && EXPERIMENTAL | ||
96 | select PARPORT_NOT_PC | ||
97 | help | ||
98 | Say Y here if you need support for the parallel port on | ||
99 | SGI O2 machines. This code is also available as a module (say M), | ||
100 | called parport_ip32. If in doubt, saying N is the safe plan. | ||
101 | |||
93 | config PARPORT_AMIGA | 102 | config PARPORT_AMIGA |
94 | tristate "Amiga builtin port" | 103 | tristate "Amiga builtin port" |
95 | depends on AMIGA && PARPORT | 104 | depends on AMIGA && PARPORT |
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile index 5372212bb9d9..a19de35f8de2 100644 --- a/drivers/parport/Makefile +++ b/drivers/parport/Makefile | |||
@@ -17,3 +17,4 @@ obj-$(CONFIG_PARPORT_MFC3) += parport_mfc3.o | |||
17 | obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o | 17 | obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o |
18 | obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o | 18 | obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o |
19 | obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o | 19 | obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o |
20 | obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o | ||
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 5b887ba5aaf9..690b239ad3a7 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c | |||
@@ -61,10 +61,10 @@ static void timeout_waiting_on_port (unsigned long cookie) | |||
61 | * set to zero, it returns immediately. | 61 | * set to zero, it returns immediately. |
62 | * | 62 | * |
63 | * If an interrupt occurs before the timeout period elapses, this | 63 | * If an interrupt occurs before the timeout period elapses, this |
64 | * function returns one immediately. If it times out, it returns | 64 | * function returns zero immediately. If it times out, it returns |
65 | * a value greater than zero. An error code less than zero | 65 | * one. An error code less than zero indicates an error (most |
66 | * indicates an error (most likely a pending signal), and the | 66 | * likely a pending signal), and the calling code should finish |
67 | * calling code should finish what it's doing as soon as it can. | 67 | * what it's doing as soon as it can. |
68 | */ | 68 | */ |
69 | 69 | ||
70 | int parport_wait_event (struct parport *port, signed long timeout) | 70 | int parport_wait_event (struct parport *port, signed long timeout) |
@@ -110,7 +110,7 @@ int parport_wait_event (struct parport *port, signed long timeout) | |||
110 | * | 110 | * |
111 | * If the status lines take on the desired values before the | 111 | * If the status lines take on the desired values before the |
112 | * timeout period elapses, parport_poll_peripheral() returns zero | 112 | * timeout period elapses, parport_poll_peripheral() returns zero |
113 | * immediately. A zero return value greater than zero indicates | 113 | * immediately. A return value greater than zero indicates |
114 | * a timeout. An error code (less than zero) indicates an error, | 114 | * a timeout. An error code (less than zero) indicates an error, |
115 | * most likely a signal that arrived, and the caller should | 115 | * most likely a signal that arrived, and the caller should |
116 | * finish what it is doing as soon as possible. | 116 | * finish what it is doing as soon as possible. |
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c new file mode 100644 index 000000000000..46e06e596d73 --- /dev/null +++ b/drivers/parport/parport_ip32.c | |||
@@ -0,0 +1,2253 @@ | |||
1 | /* Low-level parallel port routines for built-in port on SGI IP32 | ||
2 | * | ||
3 | * Author: Arnaud Giersch <arnaud.giersch@free.fr> | ||
4 | * | ||
5 | * Based on parport_pc.c by | ||
6 | * Phil Blundell, Tim Waugh, Jose Renau, David Campbell, | ||
7 | * Andrea Arcangeli, et al. | ||
8 | * | ||
9 | * Thanks to Ilya A. Volynets-Evenbakh for his help. | ||
10 | * | ||
11 | * Copyright (C) 2005, 2006 Arnaud Giersch. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the Free | ||
15 | * Software Foundation; either version 2 of the License, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
19 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
21 | * more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License along | ||
24 | * with this program; if not, write to the Free Software Foundation, Inc., 59 | ||
25 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
26 | */ | ||
27 | |||
28 | /* Current status: | ||
29 | * | ||
30 | * Basic SPP and PS2 modes are supported. | ||
31 | * Support for parallel port IRQ is present. | ||
32 | * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are | ||
33 | * supported. | ||
34 | * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with | ||
35 | * or without interrupt support. | ||
36 | * | ||
37 | * Hardware ECP mode is not fully implemented (ecp_read_data and | ||
38 | * ecp_write_addr are actually missing). | ||
39 | * | ||
40 | * To do: | ||
41 | * | ||
42 | * Fully implement ECP mode. | ||
43 | * EPP and ECP mode need to be tested. I currently do not own any | ||
44 | * peripheral supporting these extended mode, and cannot test them. | ||
45 | * If DMA mode works well, decide if support for PIO FIFO modes should be | ||
46 | * dropped. | ||
47 | * Use the io{read,write} family functions when they become available in | ||
48 | * the linux-mips.org tree. Note: the MIPS specific functions readsb() | ||
49 | * and writesb() are to be translated by ioread8_rep() and iowrite8_rep() | ||
50 | * respectively. | ||
51 | */ | ||
52 | |||
53 | /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an | ||
54 | * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1]. | ||
55 | * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte | ||
56 | * FIFO buffer and supports DMA transfers. | ||
57 | * | ||
58 | * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html | ||
59 | * | ||
60 | * Theoretically, we could simply use the parport_pc module. It is however | ||
61 | * not so simple. The parport_pc code assumes that the parallel port | ||
62 | * registers are port-mapped. On the O2, they are memory-mapped. | ||
63 | * Furthermore, each register is replicated on 256 consecutive addresses (as | ||
64 | * it is for the built-in serial ports on the same chip). | ||
65 | */ | ||
66 | |||
67 | /*--- Some configuration defines ---------------------------------------*/ | ||
68 | |||
69 | /* DEBUG_PARPORT_IP32 | ||
70 | * 0 disable debug | ||
71 | * 1 standard level: pr_debug1 is enabled | ||
72 | * 2 parport_ip32_dump_state is enabled | ||
73 | * >=3 verbose level: pr_debug is enabled | ||
74 | */ | ||
75 | #if !defined(DEBUG_PARPORT_IP32) | ||
76 | # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */ | ||
77 | #endif | ||
78 | |||
79 | /*----------------------------------------------------------------------*/ | ||
80 | |||
81 | /* Setup DEBUG macros. This is done before any includes, just in case we | ||
82 | * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3. | ||
83 | */ | ||
84 | #if DEBUG_PARPORT_IP32 == 1 | ||
85 | # warning DEBUG_PARPORT_IP32 == 1 | ||
86 | #elif DEBUG_PARPORT_IP32 == 2 | ||
87 | # warning DEBUG_PARPORT_IP32 == 2 | ||
88 | #elif DEBUG_PARPORT_IP32 >= 3 | ||
89 | # warning DEBUG_PARPORT_IP32 >= 3 | ||
90 | # if !defined(DEBUG) | ||
91 | # define DEBUG /* enable pr_debug() in kernel.h */ | ||
92 | # endif | ||
93 | #endif | ||
94 | |||
95 | #include <linux/completion.h> | ||
96 | #include <linux/delay.h> | ||
97 | #include <linux/dma-mapping.h> | ||
98 | #include <linux/err.h> | ||
99 | #include <linux/init.h> | ||
100 | #include <linux/interrupt.h> | ||
101 | #include <linux/jiffies.h> | ||
102 | #include <linux/kernel.h> | ||
103 | #include <linux/module.h> | ||
104 | #include <linux/parport.h> | ||
105 | #include <linux/sched.h> | ||
106 | #include <linux/spinlock.h> | ||
107 | #include <linux/stddef.h> | ||
108 | #include <linux/types.h> | ||
109 | #include <asm/io.h> | ||
110 | #include <asm/ip32/ip32_ints.h> | ||
111 | #include <asm/ip32/mace.h> | ||
112 | |||
113 | /*--- Global variables -------------------------------------------------*/ | ||
114 | |||
115 | /* Verbose probing on by default for debugging. */ | ||
116 | #if DEBUG_PARPORT_IP32 >= 1 | ||
117 | # define DEFAULT_VERBOSE_PROBING 1 | ||
118 | #else | ||
119 | # define DEFAULT_VERBOSE_PROBING 0 | ||
120 | #endif | ||
121 | |||
122 | /* Default prefix for printk */ | ||
123 | #define PPIP32 "parport_ip32: " | ||
124 | |||
125 | /* | ||
126 | * These are the module parameters: | ||
127 | * @features: bit mask of features to enable/disable | ||
128 | * (all enabled by default) | ||
129 | * @verbose_probing: log chit-chat during initialization | ||
130 | */ | ||
131 | #define PARPORT_IP32_ENABLE_IRQ (1U << 0) | ||
132 | #define PARPORT_IP32_ENABLE_DMA (1U << 1) | ||
133 | #define PARPORT_IP32_ENABLE_SPP (1U << 2) | ||
134 | #define PARPORT_IP32_ENABLE_EPP (1U << 3) | ||
135 | #define PARPORT_IP32_ENABLE_ECP (1U << 4) | ||
136 | static unsigned int features = ~0U; | ||
137 | static int verbose_probing = DEFAULT_VERBOSE_PROBING; | ||
138 | |||
139 | /* We do not support more than one port. */ | ||
140 | static struct parport *this_port = NULL; | ||
141 | |||
142 | /* Timing constants for FIFO modes. */ | ||
143 | #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */ | ||
144 | #define FIFO_POLLING_INTERVAL 50 /* microseconds */ | ||
145 | |||
146 | /*--- I/O register definitions -----------------------------------------*/ | ||
147 | |||
148 | /** | ||
149 | * struct parport_ip32_regs - virtual addresses of parallel port registers | ||
150 | * @data: Data Register | ||
151 | * @dsr: Device Status Register | ||
152 | * @dcr: Device Control Register | ||
153 | * @eppAddr: EPP Address Register | ||
154 | * @eppData0: EPP Data Register 0 | ||
155 | * @eppData1: EPP Data Register 1 | ||
156 | * @eppData2: EPP Data Register 2 | ||
157 | * @eppData3: EPP Data Register 3 | ||
158 | * @ecpAFifo: ECP Address FIFO | ||
159 | * @fifo: General FIFO register. The same address is used for: | ||
160 | * - cFifo, the Parallel Port DATA FIFO | ||
161 | * - ecpDFifo, the ECP Data FIFO | ||
162 | * - tFifo, the ECP Test FIFO | ||
163 | * @cnfgA: Configuration Register A | ||
164 | * @cnfgB: Configuration Register B | ||
165 | * @ecr: Extended Control Register | ||
166 | */ | ||
167 | struct parport_ip32_regs { | ||
168 | void __iomem *data; | ||
169 | void __iomem *dsr; | ||
170 | void __iomem *dcr; | ||
171 | void __iomem *eppAddr; | ||
172 | void __iomem *eppData0; | ||
173 | void __iomem *eppData1; | ||
174 | void __iomem *eppData2; | ||
175 | void __iomem *eppData3; | ||
176 | void __iomem *ecpAFifo; | ||
177 | void __iomem *fifo; | ||
178 | void __iomem *cnfgA; | ||
179 | void __iomem *cnfgB; | ||
180 | void __iomem *ecr; | ||
181 | }; | ||
182 | |||
183 | /* Device Status Register */ | ||
184 | #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */ | ||
185 | #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */ | ||
186 | #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */ | ||
187 | #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */ | ||
188 | #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */ | ||
189 | #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */ | ||
190 | /* #define DSR_reserved (1U << 1) */ | ||
191 | #define DSR_TIMEOUT (1U << 0) /* EPP timeout */ | ||
192 | |||
193 | /* Device Control Register */ | ||
194 | /* #define DCR_reserved (1U << 7) | (1U << 6) */ | ||
195 | #define DCR_DIR (1U << 5) /* direction */ | ||
196 | #define DCR_IRQ (1U << 4) /* interrupt on nAck */ | ||
197 | #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */ | ||
198 | #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */ | ||
199 | #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */ | ||
200 | #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */ | ||
201 | |||
202 | /* ECP Configuration Register A */ | ||
203 | #define CNFGA_IRQ (1U << 7) | ||
204 | #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4)) | ||
205 | #define CNFGA_ID_SHIFT 4 | ||
206 | #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT) | ||
207 | #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT) | ||
208 | #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT) | ||
209 | /* #define CNFGA_reserved (1U << 3) */ | ||
210 | #define CNFGA_nBYTEINTRANS (1U << 2) | ||
211 | #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0)) | ||
212 | |||
213 | /* ECP Configuration Register B */ | ||
214 | #define CNFGB_COMPRESS (1U << 7) | ||
215 | #define CNFGB_INTRVAL (1U << 6) | ||
216 | #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3)) | ||
217 | #define CNFGB_IRQ_SHIFT 3 | ||
218 | #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0)) | ||
219 | #define CNFGB_DMA_SHIFT 0 | ||
220 | |||
221 | /* Extended Control Register */ | ||
222 | #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5)) | ||
223 | #define ECR_MODE_SHIFT 5 | ||
224 | #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT) | ||
225 | #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT) | ||
226 | #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT) | ||
227 | #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT) | ||
228 | #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT) | ||
229 | /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */ | ||
230 | #define ECR_MODE_TST (06U << ECR_MODE_SHIFT) | ||
231 | #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT) | ||
232 | #define ECR_nERRINTR (1U << 4) | ||
233 | #define ECR_DMAEN (1U << 3) | ||
234 | #define ECR_SERVINTR (1U << 2) | ||
235 | #define ECR_F_FULL (1U << 1) | ||
236 | #define ECR_F_EMPTY (1U << 0) | ||
237 | |||
238 | /*--- Private data -----------------------------------------------------*/ | ||
239 | |||
240 | /** | ||
241 | * enum parport_ip32_irq_mode - operation mode of interrupt handler | ||
242 | * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer | ||
243 | * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally | ||
244 | */ | ||
245 | enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE }; | ||
246 | |||
247 | /** | ||
248 | * struct parport_ip32_private - private stuff for &struct parport | ||
249 | * @regs: register addresses | ||
250 | * @dcr_cache: cached contents of DCR | ||
251 | * @dcr_writable: bit mask of writable DCR bits | ||
252 | * @pword: number of bytes per PWord | ||
253 | * @fifo_depth: number of PWords that FIFO will hold | ||
254 | * @readIntrThreshold: minimum number of PWords we can read | ||
255 | * if we get an interrupt | ||
256 | * @writeIntrThreshold: minimum number of PWords we can write | ||
257 | * if we get an interrupt | ||
258 | * @irq_mode: operation mode of interrupt handler for this port | ||
259 | * @irq_complete: mutex used to wait for an interrupt to occur | ||
260 | */ | ||
261 | struct parport_ip32_private { | ||
262 | struct parport_ip32_regs regs; | ||
263 | unsigned int dcr_cache; | ||
264 | unsigned int dcr_writable; | ||
265 | unsigned int pword; | ||
266 | unsigned int fifo_depth; | ||
267 | unsigned int readIntrThreshold; | ||
268 | unsigned int writeIntrThreshold; | ||
269 | enum parport_ip32_irq_mode irq_mode; | ||
270 | struct completion irq_complete; | ||
271 | }; | ||
272 | |||
273 | /*--- Debug code -------------------------------------------------------*/ | ||
274 | |||
275 | /* | ||
276 | * pr_debug1 - print debug messages | ||
277 | * | ||
278 | * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1 | ||
279 | */ | ||
280 | #if DEBUG_PARPORT_IP32 >= 1 | ||
281 | # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__) | ||
282 | #else /* DEBUG_PARPORT_IP32 < 1 */ | ||
283 | # define pr_debug1(...) do { } while (0) | ||
284 | #endif | ||
285 | |||
286 | /* | ||
287 | * pr_trace, pr_trace1 - trace function calls | ||
288 | * @p: pointer to &struct parport | ||
289 | * @fmt: printk format string | ||
290 | * @...: parameters for format string | ||
291 | * | ||
292 | * Macros used to trace function calls. The given string is formatted after | ||
293 | * function name. pr_trace() uses pr_debug(), and pr_trace1() uses | ||
294 | * pr_debug1(). __pr_trace() is the low-level macro and is not to be used | ||
295 | * directly. | ||
296 | */ | ||
297 | #define __pr_trace(pr, p, fmt, ...) \ | ||
298 | pr("%s: %s" fmt "\n", \ | ||
299 | ({ const struct parport *__p = (p); \ | ||
300 | __p ? __p->name : "parport_ip32"; }), \ | ||
301 | __func__ , ##__VA_ARGS__) | ||
302 | #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__) | ||
303 | #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__) | ||
304 | |||
305 | /* | ||
306 | * __pr_probe, pr_probe - print message if @verbose_probing is true | ||
307 | * @p: pointer to &struct parport | ||
308 | * @fmt: printk format string | ||
309 | * @...: parameters for format string | ||
310 | * | ||
311 | * For new lines, use pr_probe(). Use __pr_probe() for continued lines. | ||
312 | */ | ||
313 | #define __pr_probe(...) \ | ||
314 | do { if (verbose_probing) printk(__VA_ARGS__); } while (0) | ||
315 | #define pr_probe(p, fmt, ...) \ | ||
316 | __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__) | ||
317 | |||
318 | /* | ||
319 | * parport_ip32_dump_state - print register status of parport | ||
320 | * @p: pointer to &struct parport | ||
321 | * @str: string to add in message | ||
322 | * @show_ecp_config: shall we dump ECP configuration registers too? | ||
323 | * | ||
324 | * This function is only here for debugging purpose, and should be used with | ||
325 | * care. Reading the parallel port registers may have undesired side effects. | ||
326 | * Especially if @show_ecp_config is true, the parallel port is resetted. | ||
327 | * This function is only defined if %DEBUG_PARPORT_IP32 >= 2. | ||
328 | */ | ||
329 | #if DEBUG_PARPORT_IP32 >= 2 | ||
330 | static void parport_ip32_dump_state(struct parport *p, char *str, | ||
331 | unsigned int show_ecp_config) | ||
332 | { | ||
333 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
334 | unsigned int i; | ||
335 | |||
336 | printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str); | ||
337 | { | ||
338 | static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF", | ||
339 | "ECP", "EPP", "???", | ||
340 | "TST", "CFG"}; | ||
341 | unsigned int ecr = readb(priv->regs.ecr); | ||
342 | printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr); | ||
343 | printk(" %s", | ||
344 | ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]); | ||
345 | if (ecr & ECR_nERRINTR) | ||
346 | printk(",nErrIntrEn"); | ||
347 | if (ecr & ECR_DMAEN) | ||
348 | printk(",dmaEn"); | ||
349 | if (ecr & ECR_SERVINTR) | ||
350 | printk(",serviceIntr"); | ||
351 | if (ecr & ECR_F_FULL) | ||
352 | printk(",f_full"); | ||
353 | if (ecr & ECR_F_EMPTY) | ||
354 | printk(",f_empty"); | ||
355 | printk("\n"); | ||
356 | } | ||
357 | if (show_ecp_config) { | ||
358 | unsigned int oecr, cnfgA, cnfgB; | ||
359 | oecr = readb(priv->regs.ecr); | ||
360 | writeb(ECR_MODE_PS2, priv->regs.ecr); | ||
361 | writeb(ECR_MODE_CFG, priv->regs.ecr); | ||
362 | cnfgA = readb(priv->regs.cnfgA); | ||
363 | cnfgB = readb(priv->regs.cnfgB); | ||
364 | writeb(ECR_MODE_PS2, priv->regs.ecr); | ||
365 | writeb(oecr, priv->regs.ecr); | ||
366 | printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA); | ||
367 | printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses"); | ||
368 | switch (cnfgA & CNFGA_ID_MASK) { | ||
369 | case CNFGA_ID_8: | ||
370 | printk(",8 bits"); | ||
371 | break; | ||
372 | case CNFGA_ID_16: | ||
373 | printk(",16 bits"); | ||
374 | break; | ||
375 | case CNFGA_ID_32: | ||
376 | printk(",32 bits"); | ||
377 | break; | ||
378 | default: | ||
379 | printk(",unknown ID"); | ||
380 | break; | ||
381 | } | ||
382 | if (!(cnfgA & CNFGA_nBYTEINTRANS)) | ||
383 | printk(",ByteInTrans"); | ||
384 | if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8) | ||
385 | printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT, | ||
386 | ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : ""); | ||
387 | printk("\n"); | ||
388 | printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB); | ||
389 | printk(" irq=%u,dma=%u", | ||
390 | (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT, | ||
391 | (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT); | ||
392 | printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL)); | ||
393 | if (cnfgB & CNFGB_COMPRESS) | ||
394 | printk(",compress"); | ||
395 | printk("\n"); | ||
396 | } | ||
397 | for (i = 0; i < 2; i++) { | ||
398 | unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr); | ||
399 | printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x", | ||
400 | i ? "soft" : "hard", dcr); | ||
401 | printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd"); | ||
402 | if (dcr & DCR_IRQ) | ||
403 | printk(",ackIntEn"); | ||
404 | if (!(dcr & DCR_SELECT)) | ||
405 | printk(",nSelectIn"); | ||
406 | if (dcr & DCR_nINIT) | ||
407 | printk(",nInit"); | ||
408 | if (!(dcr & DCR_AUTOFD)) | ||
409 | printk(",nAutoFD"); | ||
410 | if (!(dcr & DCR_STROBE)) | ||
411 | printk(",nStrobe"); | ||
412 | printk("\n"); | ||
413 | } | ||
414 | #define sep (f++ ? ',' : ' ') | ||
415 | { | ||
416 | unsigned int f = 0; | ||
417 | unsigned int dsr = readb(priv->regs.dsr); | ||
418 | printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr); | ||
419 | if (!(dsr & DSR_nBUSY)) | ||
420 | printk("%cBusy", sep); | ||
421 | if (dsr & DSR_nACK) | ||
422 | printk("%cnAck", sep); | ||
423 | if (dsr & DSR_PERROR) | ||
424 | printk("%cPError", sep); | ||
425 | if (dsr & DSR_SELECT) | ||
426 | printk("%cSelect", sep); | ||
427 | if (dsr & DSR_nFAULT) | ||
428 | printk("%cnFault", sep); | ||
429 | if (!(dsr & DSR_nPRINT)) | ||
430 | printk("%c(Print)", sep); | ||
431 | if (dsr & DSR_TIMEOUT) | ||
432 | printk("%cTimeout", sep); | ||
433 | printk("\n"); | ||
434 | } | ||
435 | #undef sep | ||
436 | } | ||
437 | #else /* DEBUG_PARPORT_IP32 < 2 */ | ||
438 | #define parport_ip32_dump_state(...) do { } while (0) | ||
439 | #endif | ||
440 | |||
441 | /* | ||
442 | * CHECK_EXTRA_BITS - track and log extra bits | ||
443 | * @p: pointer to &struct parport | ||
444 | * @b: byte to inspect | ||
445 | * @m: bit mask of authorized bits | ||
446 | * | ||
447 | * This is used to track and log extra bits that should not be there in | ||
448 | * parport_ip32_write_control() and parport_ip32_frob_control(). It is only | ||
449 | * defined if %DEBUG_PARPORT_IP32 >= 1. | ||
450 | */ | ||
451 | #if DEBUG_PARPORT_IP32 >= 1 | ||
452 | #define CHECK_EXTRA_BITS(p, b, m) \ | ||
453 | do { \ | ||
454 | unsigned int __b = (b), __m = (m); \ | ||
455 | if (__b & ~__m) \ | ||
456 | pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \ | ||
457 | "0x%02x/0x%02x\n", \ | ||
458 | (p)->name, __func__, #b, __b, __m); \ | ||
459 | } while (0) | ||
460 | #else /* DEBUG_PARPORT_IP32 < 1 */ | ||
461 | #define CHECK_EXTRA_BITS(...) do { } while (0) | ||
462 | #endif | ||
463 | |||
464 | /*--- IP32 parallel port DMA operations --------------------------------*/ | ||
465 | |||
466 | /** | ||
467 | * struct parport_ip32_dma_data - private data needed for DMA operation | ||
468 | * @dir: DMA direction (from or to device) | ||
469 | * @buf: buffer physical address | ||
470 | * @len: buffer length | ||
471 | * @next: address of next bytes to DMA transfer | ||
472 | * @left: number of bytes remaining | ||
473 | * @ctx: next context to write (0: context_a; 1: context_b) | ||
474 | * @irq_on: are the DMA IRQs currently enabled? | ||
475 | * @lock: spinlock to protect access to the structure | ||
476 | */ | ||
477 | struct parport_ip32_dma_data { | ||
478 | enum dma_data_direction dir; | ||
479 | dma_addr_t buf; | ||
480 | dma_addr_t next; | ||
481 | size_t len; | ||
482 | size_t left; | ||
483 | unsigned int ctx; | ||
484 | unsigned int irq_on; | ||
485 | spinlock_t lock; | ||
486 | }; | ||
487 | static struct parport_ip32_dma_data parport_ip32_dma; | ||
488 | |||
489 | /** | ||
490 | * parport_ip32_dma_setup_context - setup next DMA context | ||
491 | * @limit: maximum data size for the context | ||
492 | * | ||
493 | * The alignment constraints must be verified in caller function, and the | ||
494 | * parameter @limit must be set accordingly. | ||
495 | */ | ||
496 | static void parport_ip32_dma_setup_context(unsigned int limit) | ||
497 | { | ||
498 | unsigned long flags; | ||
499 | |||
500 | spin_lock_irqsave(&parport_ip32_dma.lock, flags); | ||
501 | if (parport_ip32_dma.left > 0) { | ||
502 | /* Note: ctxreg is "volatile" here only because | ||
503 | * mace->perif.ctrl.parport.context_a and context_b are | ||
504 | * "volatile". */ | ||
505 | volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ? | ||
506 | &mace->perif.ctrl.parport.context_a : | ||
507 | &mace->perif.ctrl.parport.context_b; | ||
508 | u64 count; | ||
509 | u64 ctxval; | ||
510 | if (parport_ip32_dma.left <= limit) { | ||
511 | count = parport_ip32_dma.left; | ||
512 | ctxval = MACEPAR_CONTEXT_LASTFLAG; | ||
513 | } else { | ||
514 | count = limit; | ||
515 | ctxval = 0; | ||
516 | } | ||
517 | |||
518 | pr_trace(NULL, | ||
519 | "(%u): 0x%04x:0x%04x, %u -> %u%s", | ||
520 | limit, | ||
521 | (unsigned int)parport_ip32_dma.buf, | ||
522 | (unsigned int)parport_ip32_dma.next, | ||
523 | (unsigned int)count, | ||
524 | parport_ip32_dma.ctx, ctxval ? "*" : ""); | ||
525 | |||
526 | ctxval |= parport_ip32_dma.next & | ||
527 | MACEPAR_CONTEXT_BASEADDR_MASK; | ||
528 | ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) & | ||
529 | MACEPAR_CONTEXT_DATALEN_MASK; | ||
530 | writeq(ctxval, ctxreg); | ||
531 | parport_ip32_dma.next += count; | ||
532 | parport_ip32_dma.left -= count; | ||
533 | parport_ip32_dma.ctx ^= 1U; | ||
534 | } | ||
535 | /* If there is nothing more to send, disable IRQs to avoid to | ||
536 | * face an IRQ storm which can lock the machine. Disable them | ||
537 | * only once. */ | ||
538 | if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) { | ||
539 | pr_debug(PPIP32 "IRQ off (ctx)\n"); | ||
540 | disable_irq_nosync(MACEISA_PAR_CTXA_IRQ); | ||
541 | disable_irq_nosync(MACEISA_PAR_CTXB_IRQ); | ||
542 | parport_ip32_dma.irq_on = 0; | ||
543 | } | ||
544 | spin_unlock_irqrestore(&parport_ip32_dma.lock, flags); | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * parport_ip32_dma_interrupt - DMA interrupt handler | ||
549 | * @irq: interrupt number | ||
550 | * @dev_id: unused | ||
551 | * @regs: pointer to &struct pt_regs | ||
552 | */ | ||
553 | static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id, | ||
554 | struct pt_regs *regs) | ||
555 | { | ||
556 | if (parport_ip32_dma.left) | ||
557 | pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx); | ||
558 | parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND); | ||
559 | return IRQ_HANDLED; | ||
560 | } | ||
561 | |||
562 | #if DEBUG_PARPORT_IP32 | ||
563 | static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id, | ||
564 | struct pt_regs *regs) | ||
565 | { | ||
566 | pr_trace1(NULL, "(%d)", irq); | ||
567 | return IRQ_HANDLED; | ||
568 | } | ||
569 | #endif | ||
570 | |||
571 | /** | ||
572 | * parport_ip32_dma_start - begins a DMA transfer | ||
573 | * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE | ||
574 | * @addr: pointer to data buffer | ||
575 | * @count: buffer size | ||
576 | * | ||
577 | * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be | ||
578 | * correctly balanced. | ||
579 | */ | ||
580 | static int parport_ip32_dma_start(enum dma_data_direction dir, | ||
581 | void *addr, size_t count) | ||
582 | { | ||
583 | unsigned int limit; | ||
584 | u64 ctrl; | ||
585 | |||
586 | pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count); | ||
587 | |||
588 | /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must | ||
589 | * be 64 bytes aligned. */ | ||
590 | BUG_ON(dir != DMA_TO_DEVICE); | ||
591 | |||
592 | /* Reset DMA controller */ | ||
593 | ctrl = MACEPAR_CTLSTAT_RESET; | ||
594 | writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); | ||
595 | |||
596 | /* DMA IRQs should normally be enabled */ | ||
597 | if (!parport_ip32_dma.irq_on) { | ||
598 | WARN_ON(1); | ||
599 | enable_irq(MACEISA_PAR_CTXA_IRQ); | ||
600 | enable_irq(MACEISA_PAR_CTXB_IRQ); | ||
601 | parport_ip32_dma.irq_on = 1; | ||
602 | } | ||
603 | |||
604 | /* Prepare DMA pointers */ | ||
605 | parport_ip32_dma.dir = dir; | ||
606 | parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir); | ||
607 | parport_ip32_dma.len = count; | ||
608 | parport_ip32_dma.next = parport_ip32_dma.buf; | ||
609 | parport_ip32_dma.left = parport_ip32_dma.len; | ||
610 | parport_ip32_dma.ctx = 0; | ||
611 | |||
612 | /* Setup DMA direction and first two contexts */ | ||
613 | ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION; | ||
614 | writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); | ||
615 | /* Single transfer should not cross a 4K page boundary */ | ||
616 | limit = MACEPAR_CONTEXT_DATA_BOUND - | ||
617 | (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1)); | ||
618 | parport_ip32_dma_setup_context(limit); | ||
619 | parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND); | ||
620 | |||
621 | /* Real start of DMA transfer */ | ||
622 | ctrl |= MACEPAR_CTLSTAT_ENABLE; | ||
623 | writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); | ||
624 | |||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | /** | ||
629 | * parport_ip32_dma_stop - ends a running DMA transfer | ||
630 | * | ||
631 | * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be | ||
632 | * correctly balanced. | ||
633 | */ | ||
634 | static void parport_ip32_dma_stop(void) | ||
635 | { | ||
636 | u64 ctx_a; | ||
637 | u64 ctx_b; | ||
638 | u64 ctrl; | ||
639 | u64 diag; | ||
640 | size_t res[2]; /* {[0] = res_a, [1] = res_b} */ | ||
641 | |||
642 | pr_trace(NULL, "()"); | ||
643 | |||
644 | /* Disable IRQs */ | ||
645 | spin_lock_irq(&parport_ip32_dma.lock); | ||
646 | if (parport_ip32_dma.irq_on) { | ||
647 | pr_debug(PPIP32 "IRQ off (stop)\n"); | ||
648 | disable_irq_nosync(MACEISA_PAR_CTXA_IRQ); | ||
649 | disable_irq_nosync(MACEISA_PAR_CTXB_IRQ); | ||
650 | parport_ip32_dma.irq_on = 0; | ||
651 | } | ||
652 | spin_unlock_irq(&parport_ip32_dma.lock); | ||
653 | /* Force IRQ synchronization, even if the IRQs were disabled | ||
654 | * elsewhere. */ | ||
655 | synchronize_irq(MACEISA_PAR_CTXA_IRQ); | ||
656 | synchronize_irq(MACEISA_PAR_CTXB_IRQ); | ||
657 | |||
658 | /* Stop DMA transfer */ | ||
659 | ctrl = readq(&mace->perif.ctrl.parport.cntlstat); | ||
660 | ctrl &= ~MACEPAR_CTLSTAT_ENABLE; | ||
661 | writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); | ||
662 | |||
663 | /* Adjust residue (parport_ip32_dma.left) */ | ||
664 | ctx_a = readq(&mace->perif.ctrl.parport.context_a); | ||
665 | ctx_b = readq(&mace->perif.ctrl.parport.context_b); | ||
666 | ctrl = readq(&mace->perif.ctrl.parport.cntlstat); | ||
667 | diag = readq(&mace->perif.ctrl.parport.diagnostic); | ||
668 | res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ? | ||
669 | 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >> | ||
670 | MACEPAR_CONTEXT_DATALEN_SHIFT) : | ||
671 | 0; | ||
672 | res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ? | ||
673 | 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >> | ||
674 | MACEPAR_CONTEXT_DATALEN_SHIFT) : | ||
675 | 0; | ||
676 | if (diag & MACEPAR_DIAG_DMACTIVE) | ||
677 | res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] = | ||
678 | 1 + ((diag & MACEPAR_DIAG_CTRMASK) >> | ||
679 | MACEPAR_DIAG_CTRSHIFT); | ||
680 | parport_ip32_dma.left += res[0] + res[1]; | ||
681 | |||
682 | /* Reset DMA controller, and re-enable IRQs */ | ||
683 | ctrl = MACEPAR_CTLSTAT_RESET; | ||
684 | writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); | ||
685 | pr_debug(PPIP32 "IRQ on (stop)\n"); | ||
686 | enable_irq(MACEISA_PAR_CTXA_IRQ); | ||
687 | enable_irq(MACEISA_PAR_CTXB_IRQ); | ||
688 | parport_ip32_dma.irq_on = 1; | ||
689 | |||
690 | dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len, | ||
691 | parport_ip32_dma.dir); | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * parport_ip32_dma_get_residue - get residue from last DMA transfer | ||
696 | * | ||
697 | * Returns the number of bytes remaining from last DMA transfer. | ||
698 | */ | ||
699 | static inline size_t parport_ip32_dma_get_residue(void) | ||
700 | { | ||
701 | return parport_ip32_dma.left; | ||
702 | } | ||
703 | |||
704 | /** | ||
705 | * parport_ip32_dma_register - initialize DMA engine | ||
706 | * | ||
707 | * Returns zero for success. | ||
708 | */ | ||
709 | static int parport_ip32_dma_register(void) | ||
710 | { | ||
711 | int err; | ||
712 | |||
713 | spin_lock_init(&parport_ip32_dma.lock); | ||
714 | parport_ip32_dma.irq_on = 1; | ||
715 | |||
716 | /* Reset DMA controller */ | ||
717 | writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat); | ||
718 | |||
719 | /* Request IRQs */ | ||
720 | err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt, | ||
721 | 0, "parport_ip32", NULL); | ||
722 | if (err) | ||
723 | goto fail_a; | ||
724 | err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt, | ||
725 | 0, "parport_ip32", NULL); | ||
726 | if (err) | ||
727 | goto fail_b; | ||
728 | #if DEBUG_PARPORT_IP32 | ||
729 | /* FIXME - what is this IRQ for? */ | ||
730 | err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt, | ||
731 | 0, "parport_ip32", NULL); | ||
732 | if (err) | ||
733 | goto fail_merr; | ||
734 | #endif | ||
735 | return 0; | ||
736 | |||
737 | #if DEBUG_PARPORT_IP32 | ||
738 | fail_merr: | ||
739 | free_irq(MACEISA_PAR_CTXB_IRQ, NULL); | ||
740 | #endif | ||
741 | fail_b: | ||
742 | free_irq(MACEISA_PAR_CTXA_IRQ, NULL); | ||
743 | fail_a: | ||
744 | return err; | ||
745 | } | ||
746 | |||
747 | /** | ||
748 | * parport_ip32_dma_unregister - release and free resources for DMA engine | ||
749 | */ | ||
750 | static void parport_ip32_dma_unregister(void) | ||
751 | { | ||
752 | #if DEBUG_PARPORT_IP32 | ||
753 | free_irq(MACEISA_PAR_MERR_IRQ, NULL); | ||
754 | #endif | ||
755 | free_irq(MACEISA_PAR_CTXB_IRQ, NULL); | ||
756 | free_irq(MACEISA_PAR_CTXA_IRQ, NULL); | ||
757 | } | ||
758 | |||
759 | /*--- Interrupt handlers and associates --------------------------------*/ | ||
760 | |||
761 | /** | ||
762 | * parport_ip32_wakeup - wakes up code waiting for an interrupt | ||
763 | * @p: pointer to &struct parport | ||
764 | */ | ||
765 | static inline void parport_ip32_wakeup(struct parport *p) | ||
766 | { | ||
767 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
768 | complete(&priv->irq_complete); | ||
769 | } | ||
770 | |||
771 | /** | ||
772 | * parport_ip32_interrupt - interrupt handler | ||
773 | * @irq: interrupt number | ||
774 | * @dev_id: pointer to &struct parport | ||
775 | * @regs: pointer to &struct pt_regs | ||
776 | * | ||
777 | * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is | ||
778 | * %PARPORT_IP32_IRQ_FWD. | ||
779 | */ | ||
780 | static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id, | ||
781 | struct pt_regs *regs) | ||
782 | { | ||
783 | struct parport * const p = dev_id; | ||
784 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
785 | enum parport_ip32_irq_mode irq_mode = priv->irq_mode; | ||
786 | switch (irq_mode) { | ||
787 | case PARPORT_IP32_IRQ_FWD: | ||
788 | parport_generic_irq(irq, p, regs); | ||
789 | break; | ||
790 | case PARPORT_IP32_IRQ_HERE: | ||
791 | parport_ip32_wakeup(p); | ||
792 | break; | ||
793 | } | ||
794 | return IRQ_HANDLED; | ||
795 | } | ||
796 | |||
797 | /*--- Some utility function to manipulate ECR register -----------------*/ | ||
798 | |||
799 | /** | ||
800 | * parport_ip32_read_econtrol - read contents of the ECR register | ||
801 | * @p: pointer to &struct parport | ||
802 | */ | ||
803 | static inline unsigned int parport_ip32_read_econtrol(struct parport *p) | ||
804 | { | ||
805 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
806 | return readb(priv->regs.ecr); | ||
807 | } | ||
808 | |||
809 | /** | ||
810 | * parport_ip32_write_econtrol - write new contents to the ECR register | ||
811 | * @p: pointer to &struct parport | ||
812 | * @c: new value to write | ||
813 | */ | ||
814 | static inline void parport_ip32_write_econtrol(struct parport *p, | ||
815 | unsigned int c) | ||
816 | { | ||
817 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
818 | writeb(c, priv->regs.ecr); | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * parport_ip32_frob_econtrol - change bits from the ECR register | ||
823 | * @p: pointer to &struct parport | ||
824 | * @mask: bit mask of bits to change | ||
825 | * @val: new value for changed bits | ||
826 | * | ||
827 | * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits | ||
828 | * in @val, and write the result to the ECR. | ||
829 | */ | ||
830 | static inline void parport_ip32_frob_econtrol(struct parport *p, | ||
831 | unsigned int mask, | ||
832 | unsigned int val) | ||
833 | { | ||
834 | unsigned int c; | ||
835 | c = (parport_ip32_read_econtrol(p) & ~mask) ^ val; | ||
836 | parport_ip32_write_econtrol(p, c); | ||
837 | } | ||
838 | |||
839 | /** | ||
840 | * parport_ip32_set_mode - change mode of ECP port | ||
841 | * @p: pointer to &struct parport | ||
842 | * @mode: new mode to write in ECR | ||
843 | * | ||
844 | * ECR is reset in a sane state (interrupts and DMA disabled), and placed in | ||
845 | * mode @mode. Go through PS2 mode if needed. | ||
846 | */ | ||
847 | static void parport_ip32_set_mode(struct parport *p, unsigned int mode) | ||
848 | { | ||
849 | unsigned int omode; | ||
850 | |||
851 | mode &= ECR_MODE_MASK; | ||
852 | omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK; | ||
853 | |||
854 | if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2 | ||
855 | || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) { | ||
856 | /* We have to go through PS2 mode */ | ||
857 | unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; | ||
858 | parport_ip32_write_econtrol(p, ecr); | ||
859 | } | ||
860 | parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR); | ||
861 | } | ||
862 | |||
863 | /*--- Basic functions needed for parport -------------------------------*/ | ||
864 | |||
865 | /** | ||
866 | * parport_ip32_read_data - return current contents of the DATA register | ||
867 | * @p: pointer to &struct parport | ||
868 | */ | ||
869 | static inline unsigned char parport_ip32_read_data(struct parport *p) | ||
870 | { | ||
871 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
872 | return readb(priv->regs.data); | ||
873 | } | ||
874 | |||
875 | /** | ||
876 | * parport_ip32_write_data - set new contents for the DATA register | ||
877 | * @p: pointer to &struct parport | ||
878 | * @d: new value to write | ||
879 | */ | ||
880 | static inline void parport_ip32_write_data(struct parport *p, unsigned char d) | ||
881 | { | ||
882 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
883 | writeb(d, priv->regs.data); | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * parport_ip32_read_status - return current contents of the DSR register | ||
888 | * @p: pointer to &struct parport | ||
889 | */ | ||
890 | static inline unsigned char parport_ip32_read_status(struct parport *p) | ||
891 | { | ||
892 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
893 | return readb(priv->regs.dsr); | ||
894 | } | ||
895 | |||
896 | /** | ||
897 | * __parport_ip32_read_control - return cached contents of the DCR register | ||
898 | * @p: pointer to &struct parport | ||
899 | */ | ||
900 | static inline unsigned int __parport_ip32_read_control(struct parport *p) | ||
901 | { | ||
902 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
903 | return priv->dcr_cache; /* use soft copy */ | ||
904 | } | ||
905 | |||
906 | /** | ||
907 | * __parport_ip32_write_control - set new contents for the DCR register | ||
908 | * @p: pointer to &struct parport | ||
909 | * @c: new value to write | ||
910 | */ | ||
911 | static inline void __parport_ip32_write_control(struct parport *p, | ||
912 | unsigned int c) | ||
913 | { | ||
914 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
915 | CHECK_EXTRA_BITS(p, c, priv->dcr_writable); | ||
916 | c &= priv->dcr_writable; /* only writable bits */ | ||
917 | writeb(c, priv->regs.dcr); | ||
918 | priv->dcr_cache = c; /* update soft copy */ | ||
919 | } | ||
920 | |||
921 | /** | ||
922 | * __parport_ip32_frob_control - change bits from the DCR register | ||
923 | * @p: pointer to &struct parport | ||
924 | * @mask: bit mask of bits to change | ||
925 | * @val: new value for changed bits | ||
926 | * | ||
927 | * This is equivalent to read from the DCR, mask out the bits in @mask, | ||
928 | * exclusive-or with the bits in @val, and write the result to the DCR. | ||
929 | * Actually, the cached contents of the DCR is used. | ||
930 | */ | ||
931 | static inline void __parport_ip32_frob_control(struct parport *p, | ||
932 | unsigned int mask, | ||
933 | unsigned int val) | ||
934 | { | ||
935 | unsigned int c; | ||
936 | c = (__parport_ip32_read_control(p) & ~mask) ^ val; | ||
937 | __parport_ip32_write_control(p, c); | ||
938 | } | ||
939 | |||
940 | /** | ||
941 | * parport_ip32_read_control - return cached contents of the DCR register | ||
942 | * @p: pointer to &struct parport | ||
943 | * | ||
944 | * The return value is masked so as to only return the value of %DCR_STROBE, | ||
945 | * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. | ||
946 | */ | ||
947 | static inline unsigned char parport_ip32_read_control(struct parport *p) | ||
948 | { | ||
949 | const unsigned int rm = | ||
950 | DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; | ||
951 | return __parport_ip32_read_control(p) & rm; | ||
952 | } | ||
953 | |||
954 | /** | ||
955 | * parport_ip32_write_control - set new contents for the DCR register | ||
956 | * @p: pointer to &struct parport | ||
957 | * @c: new value to write | ||
958 | * | ||
959 | * The value is masked so as to only change the value of %DCR_STROBE, | ||
960 | * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. | ||
961 | */ | ||
962 | static inline void parport_ip32_write_control(struct parport *p, | ||
963 | unsigned char c) | ||
964 | { | ||
965 | const unsigned int wm = | ||
966 | DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; | ||
967 | CHECK_EXTRA_BITS(p, c, wm); | ||
968 | __parport_ip32_frob_control(p, wm, c & wm); | ||
969 | } | ||
970 | |||
971 | /** | ||
972 | * parport_ip32_frob_control - change bits from the DCR register | ||
973 | * @p: pointer to &struct parport | ||
974 | * @mask: bit mask of bits to change | ||
975 | * @val: new value for changed bits | ||
976 | * | ||
977 | * This differs from __parport_ip32_frob_control() in that it only allows to | ||
978 | * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. | ||
979 | */ | ||
980 | static inline unsigned char parport_ip32_frob_control(struct parport *p, | ||
981 | unsigned char mask, | ||
982 | unsigned char val) | ||
983 | { | ||
984 | const unsigned int wm = | ||
985 | DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; | ||
986 | CHECK_EXTRA_BITS(p, mask, wm); | ||
987 | CHECK_EXTRA_BITS(p, val, wm); | ||
988 | __parport_ip32_frob_control(p, mask & wm, val & wm); | ||
989 | return parport_ip32_read_control(p); | ||
990 | } | ||
991 | |||
992 | /** | ||
993 | * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK | ||
994 | * @p: pointer to &struct parport | ||
995 | */ | ||
996 | static inline void parport_ip32_disable_irq(struct parport *p) | ||
997 | { | ||
998 | __parport_ip32_frob_control(p, DCR_IRQ, 0); | ||
999 | } | ||
1000 | |||
1001 | /** | ||
1002 | * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK | ||
1003 | * @p: pointer to &struct parport | ||
1004 | */ | ||
1005 | static inline void parport_ip32_enable_irq(struct parport *p) | ||
1006 | { | ||
1007 | __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ); | ||
1008 | } | ||
1009 | |||
1010 | /** | ||
1011 | * parport_ip32_data_forward - enable host-to-peripheral communications | ||
1012 | * @p: pointer to &struct parport | ||
1013 | * | ||
1014 | * Enable the data line drivers, for 8-bit host-to-peripheral communications. | ||
1015 | */ | ||
1016 | static inline void parport_ip32_data_forward(struct parport *p) | ||
1017 | { | ||
1018 | __parport_ip32_frob_control(p, DCR_DIR, 0); | ||
1019 | } | ||
1020 | |||
1021 | /** | ||
1022 | * parport_ip32_data_reverse - enable peripheral-to-host communications | ||
1023 | * @p: pointer to &struct parport | ||
1024 | * | ||
1025 | * Place the data bus in a high impedance state, if @p->modes has the | ||
1026 | * PARPORT_MODE_TRISTATE bit set. | ||
1027 | */ | ||
1028 | static inline void parport_ip32_data_reverse(struct parport *p) | ||
1029 | { | ||
1030 | __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR); | ||
1031 | } | ||
1032 | |||
1033 | /** | ||
1034 | * parport_ip32_init_state - for core parport code | ||
1035 | * @dev: pointer to &struct pardevice | ||
1036 | * @s: pointer to &struct parport_state to initialize | ||
1037 | */ | ||
1038 | static void parport_ip32_init_state(struct pardevice *dev, | ||
1039 | struct parport_state *s) | ||
1040 | { | ||
1041 | s->u.ip32.dcr = DCR_SELECT | DCR_nINIT; | ||
1042 | s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; | ||
1043 | } | ||
1044 | |||
1045 | /** | ||
1046 | * parport_ip32_save_state - for core parport code | ||
1047 | * @p: pointer to &struct parport | ||
1048 | * @s: pointer to &struct parport_state to save state to | ||
1049 | */ | ||
1050 | static void parport_ip32_save_state(struct parport *p, | ||
1051 | struct parport_state *s) | ||
1052 | { | ||
1053 | s->u.ip32.dcr = __parport_ip32_read_control(p); | ||
1054 | s->u.ip32.ecr = parport_ip32_read_econtrol(p); | ||
1055 | } | ||
1056 | |||
1057 | /** | ||
1058 | * parport_ip32_restore_state - for core parport code | ||
1059 | * @p: pointer to &struct parport | ||
1060 | * @s: pointer to &struct parport_state to restore state from | ||
1061 | */ | ||
1062 | static void parport_ip32_restore_state(struct parport *p, | ||
1063 | struct parport_state *s) | ||
1064 | { | ||
1065 | parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK); | ||
1066 | parport_ip32_write_econtrol(p, s->u.ip32.ecr); | ||
1067 | __parport_ip32_write_control(p, s->u.ip32.dcr); | ||
1068 | } | ||
1069 | |||
1070 | /*--- EPP mode functions -----------------------------------------------*/ | ||
1071 | |||
1072 | /** | ||
1073 | * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode | ||
1074 | * @p: pointer to &struct parport | ||
1075 | * | ||
1076 | * Returns 1 if the Timeout bit is clear, and 0 otherwise. | ||
1077 | */ | ||
1078 | static unsigned int parport_ip32_clear_epp_timeout(struct parport *p) | ||
1079 | { | ||
1080 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1081 | unsigned int cleared; | ||
1082 | |||
1083 | if (!(parport_ip32_read_status(p) & DSR_TIMEOUT)) | ||
1084 | cleared = 1; | ||
1085 | else { | ||
1086 | unsigned int r; | ||
1087 | /* To clear timeout some chips require double read */ | ||
1088 | parport_ip32_read_status(p); | ||
1089 | r = parport_ip32_read_status(p); | ||
1090 | /* Some reset by writing 1 */ | ||
1091 | writeb(r | DSR_TIMEOUT, priv->regs.dsr); | ||
1092 | /* Others by writing 0 */ | ||
1093 | writeb(r & ~DSR_TIMEOUT, priv->regs.dsr); | ||
1094 | |||
1095 | r = parport_ip32_read_status(p); | ||
1096 | cleared = !(r & DSR_TIMEOUT); | ||
1097 | } | ||
1098 | |||
1099 | pr_trace(p, "(): %s", cleared ? "cleared" : "failed"); | ||
1100 | return cleared; | ||
1101 | } | ||
1102 | |||
1103 | /** | ||
1104 | * parport_ip32_epp_read - generic EPP read function | ||
1105 | * @eppreg: I/O register to read from | ||
1106 | * @p: pointer to &struct parport | ||
1107 | * @buf: buffer to store read data | ||
1108 | * @len: length of buffer @buf | ||
1109 | * @flags: may be PARPORT_EPP_FAST | ||
1110 | */ | ||
1111 | static size_t parport_ip32_epp_read(void __iomem *eppreg, | ||
1112 | struct parport *p, void *buf, | ||
1113 | size_t len, int flags) | ||
1114 | { | ||
1115 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1116 | size_t got; | ||
1117 | parport_ip32_set_mode(p, ECR_MODE_EPP); | ||
1118 | parport_ip32_data_reverse(p); | ||
1119 | parport_ip32_write_control(p, DCR_nINIT); | ||
1120 | if ((flags & PARPORT_EPP_FAST) && (len > 1)) { | ||
1121 | readsb(eppreg, buf, len); | ||
1122 | if (readb(priv->regs.dsr) & DSR_TIMEOUT) { | ||
1123 | parport_ip32_clear_epp_timeout(p); | ||
1124 | return -EIO; | ||
1125 | } | ||
1126 | got = len; | ||
1127 | } else { | ||
1128 | u8 *bufp = buf; | ||
1129 | for (got = 0; got < len; got++) { | ||
1130 | *bufp++ = readb(eppreg); | ||
1131 | if (readb(priv->regs.dsr) & DSR_TIMEOUT) { | ||
1132 | parport_ip32_clear_epp_timeout(p); | ||
1133 | break; | ||
1134 | } | ||
1135 | } | ||
1136 | } | ||
1137 | parport_ip32_data_forward(p); | ||
1138 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1139 | return got; | ||
1140 | } | ||
1141 | |||
1142 | /** | ||
1143 | * parport_ip32_epp_write - generic EPP write function | ||
1144 | * @eppreg: I/O register to write to | ||
1145 | * @p: pointer to &struct parport | ||
1146 | * @buf: buffer of data to write | ||
1147 | * @len: length of buffer @buf | ||
1148 | * @flags: may be PARPORT_EPP_FAST | ||
1149 | */ | ||
1150 | static size_t parport_ip32_epp_write(void __iomem *eppreg, | ||
1151 | struct parport *p, const void *buf, | ||
1152 | size_t len, int flags) | ||
1153 | { | ||
1154 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1155 | size_t written; | ||
1156 | parport_ip32_set_mode(p, ECR_MODE_EPP); | ||
1157 | parport_ip32_data_forward(p); | ||
1158 | parport_ip32_write_control(p, DCR_nINIT); | ||
1159 | if ((flags & PARPORT_EPP_FAST) && (len > 1)) { | ||
1160 | writesb(eppreg, buf, len); | ||
1161 | if (readb(priv->regs.dsr) & DSR_TIMEOUT) { | ||
1162 | parport_ip32_clear_epp_timeout(p); | ||
1163 | return -EIO; | ||
1164 | } | ||
1165 | written = len; | ||
1166 | } else { | ||
1167 | const u8 *bufp = buf; | ||
1168 | for (written = 0; written < len; written++) { | ||
1169 | writeb(*bufp++, eppreg); | ||
1170 | if (readb(priv->regs.dsr) & DSR_TIMEOUT) { | ||
1171 | parport_ip32_clear_epp_timeout(p); | ||
1172 | break; | ||
1173 | } | ||
1174 | } | ||
1175 | } | ||
1176 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1177 | return written; | ||
1178 | } | ||
1179 | |||
1180 | /** | ||
1181 | * parport_ip32_epp_read_data - read a block of data in EPP mode | ||
1182 | * @p: pointer to &struct parport | ||
1183 | * @buf: buffer to store read data | ||
1184 | * @len: length of buffer @buf | ||
1185 | * @flags: may be PARPORT_EPP_FAST | ||
1186 | */ | ||
1187 | static size_t parport_ip32_epp_read_data(struct parport *p, void *buf, | ||
1188 | size_t len, int flags) | ||
1189 | { | ||
1190 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1191 | return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags); | ||
1192 | } | ||
1193 | |||
1194 | /** | ||
1195 | * parport_ip32_epp_write_data - write a block of data in EPP mode | ||
1196 | * @p: pointer to &struct parport | ||
1197 | * @buf: buffer of data to write | ||
1198 | * @len: length of buffer @buf | ||
1199 | * @flags: may be PARPORT_EPP_FAST | ||
1200 | */ | ||
1201 | static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf, | ||
1202 | size_t len, int flags) | ||
1203 | { | ||
1204 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1205 | return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags); | ||
1206 | } | ||
1207 | |||
1208 | /** | ||
1209 | * parport_ip32_epp_read_addr - read a block of addresses in EPP mode | ||
1210 | * @p: pointer to &struct parport | ||
1211 | * @buf: buffer to store read data | ||
1212 | * @len: length of buffer @buf | ||
1213 | * @flags: may be PARPORT_EPP_FAST | ||
1214 | */ | ||
1215 | static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf, | ||
1216 | size_t len, int flags) | ||
1217 | { | ||
1218 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1219 | return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags); | ||
1220 | } | ||
1221 | |||
1222 | /** | ||
1223 | * parport_ip32_epp_write_addr - write a block of addresses in EPP mode | ||
1224 | * @p: pointer to &struct parport | ||
1225 | * @buf: buffer of data to write | ||
1226 | * @len: length of buffer @buf | ||
1227 | * @flags: may be PARPORT_EPP_FAST | ||
1228 | */ | ||
1229 | static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf, | ||
1230 | size_t len, int flags) | ||
1231 | { | ||
1232 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1233 | return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags); | ||
1234 | } | ||
1235 | |||
1236 | /*--- ECP mode functions (FIFO) ----------------------------------------*/ | ||
1237 | |||
1238 | /** | ||
1239 | * parport_ip32_fifo_wait_break - check if the waiting function should return | ||
1240 | * @p: pointer to &struct parport | ||
1241 | * @expire: timeout expiring date, in jiffies | ||
1242 | * | ||
1243 | * parport_ip32_fifo_wait_break() checks if the waiting function should return | ||
1244 | * immediately or not. The break conditions are: | ||
1245 | * - expired timeout; | ||
1246 | * - a pending signal; | ||
1247 | * - nFault asserted low. | ||
1248 | * This function also calls cond_resched(). | ||
1249 | */ | ||
1250 | static unsigned int parport_ip32_fifo_wait_break(struct parport *p, | ||
1251 | unsigned long expire) | ||
1252 | { | ||
1253 | cond_resched(); | ||
1254 | if (time_after(jiffies, expire)) { | ||
1255 | pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name); | ||
1256 | return 1; | ||
1257 | } | ||
1258 | if (signal_pending(current)) { | ||
1259 | pr_debug1(PPIP32 "%s: Signal pending\n", p->name); | ||
1260 | return 1; | ||
1261 | } | ||
1262 | if (!(parport_ip32_read_status(p) & DSR_nFAULT)) { | ||
1263 | pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name); | ||
1264 | return 1; | ||
1265 | } | ||
1266 | return 0; | ||
1267 | } | ||
1268 | |||
1269 | /** | ||
1270 | * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling) | ||
1271 | * @p: pointer to &struct parport | ||
1272 | * | ||
1273 | * Returns the number of bytes that can safely be written in the FIFO. A | ||
1274 | * return value of zero means that the calling function should terminate as | ||
1275 | * fast as possible. | ||
1276 | */ | ||
1277 | static unsigned int parport_ip32_fwp_wait_polling(struct parport *p) | ||
1278 | { | ||
1279 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1280 | struct parport * const physport = p->physport; | ||
1281 | unsigned long expire; | ||
1282 | unsigned int count; | ||
1283 | unsigned int ecr; | ||
1284 | |||
1285 | expire = jiffies + physport->cad->timeout; | ||
1286 | count = 0; | ||
1287 | while (1) { | ||
1288 | if (parport_ip32_fifo_wait_break(p, expire)) | ||
1289 | break; | ||
1290 | |||
1291 | /* Check FIFO state. We do nothing when the FIFO is nor full, | ||
1292 | * nor empty. It appears that the FIFO full bit is not always | ||
1293 | * reliable, the FIFO state is sometimes wrongly reported, and | ||
1294 | * the chip gets confused if we give it another byte. */ | ||
1295 | ecr = parport_ip32_read_econtrol(p); | ||
1296 | if (ecr & ECR_F_EMPTY) { | ||
1297 | /* FIFO is empty, fill it up */ | ||
1298 | count = priv->fifo_depth; | ||
1299 | break; | ||
1300 | } | ||
1301 | |||
1302 | /* Wait a moment... */ | ||
1303 | udelay(FIFO_POLLING_INTERVAL); | ||
1304 | } /* while (1) */ | ||
1305 | |||
1306 | return count; | ||
1307 | } | ||
1308 | |||
1309 | /** | ||
1310 | * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven) | ||
1311 | * @p: pointer to &struct parport | ||
1312 | * | ||
1313 | * Returns the number of bytes that can safely be written in the FIFO. A | ||
1314 | * return value of zero means that the calling function should terminate as | ||
1315 | * fast as possible. | ||
1316 | */ | ||
1317 | static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p) | ||
1318 | { | ||
1319 | static unsigned int lost_interrupt = 0; | ||
1320 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1321 | struct parport * const physport = p->physport; | ||
1322 | unsigned long nfault_timeout; | ||
1323 | unsigned long expire; | ||
1324 | unsigned int count; | ||
1325 | unsigned int ecr; | ||
1326 | |||
1327 | nfault_timeout = min((unsigned long)physport->cad->timeout, | ||
1328 | msecs_to_jiffies(FIFO_NFAULT_TIMEOUT)); | ||
1329 | expire = jiffies + physport->cad->timeout; | ||
1330 | count = 0; | ||
1331 | while (1) { | ||
1332 | if (parport_ip32_fifo_wait_break(p, expire)) | ||
1333 | break; | ||
1334 | |||
1335 | /* Initialize mutex used to take interrupts into account */ | ||
1336 | INIT_COMPLETION(priv->irq_complete); | ||
1337 | |||
1338 | /* Enable serviceIntr */ | ||
1339 | parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); | ||
1340 | |||
1341 | /* Enabling serviceIntr while the FIFO is empty does not | ||
1342 | * always generate an interrupt, so check for emptiness | ||
1343 | * now. */ | ||
1344 | ecr = parport_ip32_read_econtrol(p); | ||
1345 | if (!(ecr & ECR_F_EMPTY)) { | ||
1346 | /* FIFO is not empty: wait for an interrupt or a | ||
1347 | * timeout to occur */ | ||
1348 | wait_for_completion_interruptible_timeout( | ||
1349 | &priv->irq_complete, nfault_timeout); | ||
1350 | ecr = parport_ip32_read_econtrol(p); | ||
1351 | if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR) | ||
1352 | && !lost_interrupt) { | ||
1353 | printk(KERN_WARNING PPIP32 | ||
1354 | "%s: lost interrupt in %s\n", | ||
1355 | p->name, __func__); | ||
1356 | lost_interrupt = 1; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | /* Disable serviceIntr */ | ||
1361 | parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR); | ||
1362 | |||
1363 | /* Check FIFO state */ | ||
1364 | if (ecr & ECR_F_EMPTY) { | ||
1365 | /* FIFO is empty, fill it up */ | ||
1366 | count = priv->fifo_depth; | ||
1367 | break; | ||
1368 | } else if (ecr & ECR_SERVINTR) { | ||
1369 | /* FIFO is not empty, but we know that can safely push | ||
1370 | * writeIntrThreshold bytes into it */ | ||
1371 | count = priv->writeIntrThreshold; | ||
1372 | break; | ||
1373 | } | ||
1374 | /* FIFO is not empty, and we did not get any interrupt. | ||
1375 | * Either it's time to check for nFault, or a signal is | ||
1376 | * pending. This is verified in | ||
1377 | * parport_ip32_fifo_wait_break(), so we continue the loop. */ | ||
1378 | } /* while (1) */ | ||
1379 | |||
1380 | return count; | ||
1381 | } | ||
1382 | |||
1383 | /** | ||
1384 | * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode) | ||
1385 | * @p: pointer to &struct parport | ||
1386 | * @buf: buffer of data to write | ||
1387 | * @len: length of buffer @buf | ||
1388 | * | ||
1389 | * Uses PIO to write the contents of the buffer @buf into the parallel port | ||
1390 | * FIFO. Returns the number of bytes that were actually written. It can work | ||
1391 | * with or without the help of interrupts. The parallel port must be | ||
1392 | * correctly initialized before calling parport_ip32_fifo_write_block_pio(). | ||
1393 | */ | ||
1394 | static size_t parport_ip32_fifo_write_block_pio(struct parport *p, | ||
1395 | const void *buf, size_t len) | ||
1396 | { | ||
1397 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1398 | const u8 *bufp = buf; | ||
1399 | size_t left = len; | ||
1400 | |||
1401 | priv->irq_mode = PARPORT_IP32_IRQ_HERE; | ||
1402 | |||
1403 | while (left > 0) { | ||
1404 | unsigned int count; | ||
1405 | |||
1406 | count = (p->irq == PARPORT_IRQ_NONE) ? | ||
1407 | parport_ip32_fwp_wait_polling(p) : | ||
1408 | parport_ip32_fwp_wait_interrupt(p); | ||
1409 | if (count == 0) | ||
1410 | break; /* Transmission should be stopped */ | ||
1411 | if (count > left) | ||
1412 | count = left; | ||
1413 | if (count == 1) { | ||
1414 | writeb(*bufp, priv->regs.fifo); | ||
1415 | bufp++, left--; | ||
1416 | } else { | ||
1417 | writesb(priv->regs.fifo, bufp, count); | ||
1418 | bufp += count, left -= count; | ||
1419 | } | ||
1420 | } | ||
1421 | |||
1422 | priv->irq_mode = PARPORT_IP32_IRQ_FWD; | ||
1423 | |||
1424 | return len - left; | ||
1425 | } | ||
1426 | |||
1427 | /** | ||
1428 | * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode) | ||
1429 | * @p: pointer to &struct parport | ||
1430 | * @buf: buffer of data to write | ||
1431 | * @len: length of buffer @buf | ||
1432 | * | ||
1433 | * Uses DMA to write the contents of the buffer @buf into the parallel port | ||
1434 | * FIFO. Returns the number of bytes that were actually written. The | ||
1435 | * parallel port must be correctly initialized before calling | ||
1436 | * parport_ip32_fifo_write_block_dma(). | ||
1437 | */ | ||
1438 | static size_t parport_ip32_fifo_write_block_dma(struct parport *p, | ||
1439 | const void *buf, size_t len) | ||
1440 | { | ||
1441 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1442 | struct parport * const physport = p->physport; | ||
1443 | unsigned long nfault_timeout; | ||
1444 | unsigned long expire; | ||
1445 | size_t written; | ||
1446 | unsigned int ecr; | ||
1447 | |||
1448 | priv->irq_mode = PARPORT_IP32_IRQ_HERE; | ||
1449 | |||
1450 | parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len); | ||
1451 | INIT_COMPLETION(priv->irq_complete); | ||
1452 | parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN); | ||
1453 | |||
1454 | nfault_timeout = min((unsigned long)physport->cad->timeout, | ||
1455 | msecs_to_jiffies(FIFO_NFAULT_TIMEOUT)); | ||
1456 | expire = jiffies + physport->cad->timeout; | ||
1457 | while (1) { | ||
1458 | if (parport_ip32_fifo_wait_break(p, expire)) | ||
1459 | break; | ||
1460 | wait_for_completion_interruptible_timeout(&priv->irq_complete, | ||
1461 | nfault_timeout); | ||
1462 | ecr = parport_ip32_read_econtrol(p); | ||
1463 | if (ecr & ECR_SERVINTR) | ||
1464 | break; /* DMA transfer just finished */ | ||
1465 | } | ||
1466 | parport_ip32_dma_stop(); | ||
1467 | written = len - parport_ip32_dma_get_residue(); | ||
1468 | |||
1469 | priv->irq_mode = PARPORT_IP32_IRQ_FWD; | ||
1470 | |||
1471 | return written; | ||
1472 | } | ||
1473 | |||
1474 | /** | ||
1475 | * parport_ip32_fifo_write_block - write a block of data | ||
1476 | * @p: pointer to &struct parport | ||
1477 | * @buf: buffer of data to write | ||
1478 | * @len: length of buffer @buf | ||
1479 | * | ||
1480 | * Uses PIO or DMA to write the contents of the buffer @buf into the parallel | ||
1481 | * p FIFO. Returns the number of bytes that were actually written. | ||
1482 | */ | ||
1483 | static size_t parport_ip32_fifo_write_block(struct parport *p, | ||
1484 | const void *buf, size_t len) | ||
1485 | { | ||
1486 | size_t written = 0; | ||
1487 | if (len) | ||
1488 | /* FIXME - Maybe some threshold value should be set for @len | ||
1489 | * under which we revert to PIO mode? */ | ||
1490 | written = (p->modes & PARPORT_MODE_DMA) ? | ||
1491 | parport_ip32_fifo_write_block_dma(p, buf, len) : | ||
1492 | parport_ip32_fifo_write_block_pio(p, buf, len); | ||
1493 | return written; | ||
1494 | } | ||
1495 | |||
1496 | /** | ||
1497 | * parport_ip32_drain_fifo - wait for FIFO to empty | ||
1498 | * @p: pointer to &struct parport | ||
1499 | * @timeout: timeout, in jiffies | ||
1500 | * | ||
1501 | * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or | ||
1502 | * 0 if the timeout @timeout is reached before, or if a signal is pending. | ||
1503 | */ | ||
1504 | static unsigned int parport_ip32_drain_fifo(struct parport *p, | ||
1505 | unsigned long timeout) | ||
1506 | { | ||
1507 | unsigned long expire = jiffies + timeout; | ||
1508 | unsigned int polling_interval; | ||
1509 | unsigned int counter; | ||
1510 | |||
1511 | /* Busy wait for approx. 200us */ | ||
1512 | for (counter = 0; counter < 40; counter++) { | ||
1513 | if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY) | ||
1514 | break; | ||
1515 | if (time_after(jiffies, expire)) | ||
1516 | break; | ||
1517 | if (signal_pending(current)) | ||
1518 | break; | ||
1519 | udelay(5); | ||
1520 | } | ||
1521 | /* Poll slowly. Polling interval starts with 1 millisecond, and is | ||
1522 | * increased exponentially until 128. */ | ||
1523 | polling_interval = 1; /* msecs */ | ||
1524 | while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) { | ||
1525 | if (time_after_eq(jiffies, expire)) | ||
1526 | break; | ||
1527 | msleep_interruptible(polling_interval); | ||
1528 | if (signal_pending(current)) | ||
1529 | break; | ||
1530 | if (polling_interval < 128) | ||
1531 | polling_interval *= 2; | ||
1532 | } | ||
1533 | |||
1534 | return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY); | ||
1535 | } | ||
1536 | |||
1537 | /** | ||
1538 | * parport_ip32_get_fifo_residue - reset FIFO | ||
1539 | * @p: pointer to &struct parport | ||
1540 | * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP) | ||
1541 | * | ||
1542 | * This function resets FIFO, and returns the number of bytes remaining in it. | ||
1543 | */ | ||
1544 | static unsigned int parport_ip32_get_fifo_residue(struct parport *p, | ||
1545 | unsigned int mode) | ||
1546 | { | ||
1547 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1548 | unsigned int residue; | ||
1549 | unsigned int cnfga; | ||
1550 | |||
1551 | /* FIXME - We are missing one byte if the printer is off-line. I | ||
1552 | * don't know how to detect this. It looks that the full bit is not | ||
1553 | * always reliable. For the moment, the problem is avoided in most | ||
1554 | * cases by testing for BUSY in parport_ip32_compat_write_data(). | ||
1555 | */ | ||
1556 | if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY) | ||
1557 | residue = 0; | ||
1558 | else { | ||
1559 | pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name); | ||
1560 | |||
1561 | /* Stop all transfers. | ||
1562 | * | ||
1563 | * Microsoft's document instructs to drive DCR_STROBE to 0, | ||
1564 | * but it doesn't work (at least in Compatibility mode, not | ||
1565 | * tested in ECP mode). Switching directly to Test mode (as | ||
1566 | * in parport_pc) is not an option: it does confuse the port, | ||
1567 | * ECP service interrupts are no more working after that. A | ||
1568 | * hard reset is then needed to revert to a sane state. | ||
1569 | * | ||
1570 | * Let's hope that the FIFO is really stuck and that the | ||
1571 | * peripheral doesn't wake up now. | ||
1572 | */ | ||
1573 | parport_ip32_frob_control(p, DCR_STROBE, 0); | ||
1574 | |||
1575 | /* Fill up FIFO */ | ||
1576 | for (residue = priv->fifo_depth; residue > 0; residue--) { | ||
1577 | if (parport_ip32_read_econtrol(p) & ECR_F_FULL) | ||
1578 | break; | ||
1579 | writeb(0x00, priv->regs.fifo); | ||
1580 | } | ||
1581 | } | ||
1582 | if (residue) | ||
1583 | pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n", | ||
1584 | p->name, residue, | ||
1585 | (residue == 1) ? " was" : "s were"); | ||
1586 | |||
1587 | /* Now reset the FIFO */ | ||
1588 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1589 | |||
1590 | /* Host recovery for ECP mode */ | ||
1591 | if (mode == ECR_MODE_ECP) { | ||
1592 | parport_ip32_data_reverse(p); | ||
1593 | parport_ip32_frob_control(p, DCR_nINIT, 0); | ||
1594 | if (parport_wait_peripheral(p, DSR_PERROR, 0)) | ||
1595 | pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n", | ||
1596 | p->name, __func__); | ||
1597 | parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE); | ||
1598 | parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT); | ||
1599 | if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) | ||
1600 | pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n", | ||
1601 | p->name, __func__); | ||
1602 | } | ||
1603 | |||
1604 | /* Adjust residue if needed */ | ||
1605 | parport_ip32_set_mode(p, ECR_MODE_CFG); | ||
1606 | cnfga = readb(priv->regs.cnfgA); | ||
1607 | if (!(cnfga & CNFGA_nBYTEINTRANS)) { | ||
1608 | pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n", | ||
1609 | p->name, cnfga); | ||
1610 | pr_debug1(PPIP32 "%s: Accounting for extra byte\n", | ||
1611 | p->name); | ||
1612 | residue++; | ||
1613 | } | ||
1614 | |||
1615 | /* Don't care about partial PWords since we do not support | ||
1616 | * PWord != 1 byte. */ | ||
1617 | |||
1618 | /* Back to forward PS2 mode. */ | ||
1619 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1620 | parport_ip32_data_forward(p); | ||
1621 | |||
1622 | return residue; | ||
1623 | } | ||
1624 | |||
1625 | /** | ||
1626 | * parport_ip32_compat_write_data - write a block of data in SPP mode | ||
1627 | * @p: pointer to &struct parport | ||
1628 | * @buf: buffer of data to write | ||
1629 | * @len: length of buffer @buf | ||
1630 | * @flags: ignored | ||
1631 | */ | ||
1632 | static size_t parport_ip32_compat_write_data(struct parport *p, | ||
1633 | const void *buf, size_t len, | ||
1634 | int flags) | ||
1635 | { | ||
1636 | static unsigned int ready_before = 1; | ||
1637 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1638 | struct parport * const physport = p->physport; | ||
1639 | size_t written = 0; | ||
1640 | |||
1641 | /* Special case: a timeout of zero means we cannot call schedule(). | ||
1642 | * Also if O_NONBLOCK is set then use the default implementation. */ | ||
1643 | if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) | ||
1644 | return parport_ieee1284_write_compat(p, buf, len, flags); | ||
1645 | |||
1646 | /* Reset FIFO, go in forward mode, and disable ackIntEn */ | ||
1647 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1648 | parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); | ||
1649 | parport_ip32_data_forward(p); | ||
1650 | parport_ip32_disable_irq(p); | ||
1651 | parport_ip32_set_mode(p, ECR_MODE_PPF); | ||
1652 | physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; | ||
1653 | |||
1654 | /* Wait for peripheral to become ready */ | ||
1655 | if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT, | ||
1656 | DSR_nBUSY | DSR_nFAULT)) { | ||
1657 | /* Avoid to flood the logs */ | ||
1658 | if (ready_before) | ||
1659 | printk(KERN_INFO PPIP32 "%s: not ready in %s\n", | ||
1660 | p->name, __func__); | ||
1661 | ready_before = 0; | ||
1662 | goto stop; | ||
1663 | } | ||
1664 | ready_before = 1; | ||
1665 | |||
1666 | written = parport_ip32_fifo_write_block(p, buf, len); | ||
1667 | |||
1668 | /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */ | ||
1669 | parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth); | ||
1670 | |||
1671 | /* Check for a potential residue */ | ||
1672 | written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF); | ||
1673 | |||
1674 | /* Then, wait for BUSY to get low. */ | ||
1675 | if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY)) | ||
1676 | printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n", | ||
1677 | p->name, __func__); | ||
1678 | |||
1679 | stop: | ||
1680 | /* Reset FIFO */ | ||
1681 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1682 | physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE; | ||
1683 | |||
1684 | return written; | ||
1685 | } | ||
1686 | |||
1687 | /* | ||
1688 | * FIXME - Insert here parport_ip32_ecp_read_data(). | ||
1689 | */ | ||
1690 | |||
1691 | /** | ||
1692 | * parport_ip32_ecp_write_data - write a block of data in ECP mode | ||
1693 | * @p: pointer to &struct parport | ||
1694 | * @buf: buffer of data to write | ||
1695 | * @len: length of buffer @buf | ||
1696 | * @flags: ignored | ||
1697 | */ | ||
1698 | static size_t parport_ip32_ecp_write_data(struct parport *p, | ||
1699 | const void *buf, size_t len, | ||
1700 | int flags) | ||
1701 | { | ||
1702 | static unsigned int ready_before = 1; | ||
1703 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1704 | struct parport * const physport = p->physport; | ||
1705 | size_t written = 0; | ||
1706 | |||
1707 | /* Special case: a timeout of zero means we cannot call schedule(). | ||
1708 | * Also if O_NONBLOCK is set then use the default implementation. */ | ||
1709 | if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) | ||
1710 | return parport_ieee1284_ecp_write_data(p, buf, len, flags); | ||
1711 | |||
1712 | /* Negotiate to forward mode if necessary. */ | ||
1713 | if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) { | ||
1714 | /* Event 47: Set nInit high. */ | ||
1715 | parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD, | ||
1716 | DCR_nINIT | DCR_AUTOFD); | ||
1717 | |||
1718 | /* Event 49: PError goes high. */ | ||
1719 | if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) { | ||
1720 | printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s", | ||
1721 | p->name, __func__); | ||
1722 | physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN; | ||
1723 | return 0; | ||
1724 | } | ||
1725 | } | ||
1726 | |||
1727 | /* Reset FIFO, go in forward mode, and disable ackIntEn */ | ||
1728 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1729 | parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); | ||
1730 | parport_ip32_data_forward(p); | ||
1731 | parport_ip32_disable_irq(p); | ||
1732 | parport_ip32_set_mode(p, ECR_MODE_ECP); | ||
1733 | physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; | ||
1734 | |||
1735 | /* Wait for peripheral to become ready */ | ||
1736 | if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT, | ||
1737 | DSR_nBUSY | DSR_nFAULT)) { | ||
1738 | /* Avoid to flood the logs */ | ||
1739 | if (ready_before) | ||
1740 | printk(KERN_INFO PPIP32 "%s: not ready in %s\n", | ||
1741 | p->name, __func__); | ||
1742 | ready_before = 0; | ||
1743 | goto stop; | ||
1744 | } | ||
1745 | ready_before = 1; | ||
1746 | |||
1747 | written = parport_ip32_fifo_write_block(p, buf, len); | ||
1748 | |||
1749 | /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */ | ||
1750 | parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth); | ||
1751 | |||
1752 | /* Check for a potential residue */ | ||
1753 | written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP); | ||
1754 | |||
1755 | /* Then, wait for BUSY to get low. */ | ||
1756 | if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY)) | ||
1757 | printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n", | ||
1758 | p->name, __func__); | ||
1759 | |||
1760 | stop: | ||
1761 | /* Reset FIFO */ | ||
1762 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1763 | physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE; | ||
1764 | |||
1765 | return written; | ||
1766 | } | ||
1767 | |||
1768 | /* | ||
1769 | * FIXME - Insert here parport_ip32_ecp_write_addr(). | ||
1770 | */ | ||
1771 | |||
1772 | /*--- Default parport operations ---------------------------------------*/ | ||
1773 | |||
1774 | static __initdata struct parport_operations parport_ip32_ops = { | ||
1775 | .write_data = parport_ip32_write_data, | ||
1776 | .read_data = parport_ip32_read_data, | ||
1777 | |||
1778 | .write_control = parport_ip32_write_control, | ||
1779 | .read_control = parport_ip32_read_control, | ||
1780 | .frob_control = parport_ip32_frob_control, | ||
1781 | |||
1782 | .read_status = parport_ip32_read_status, | ||
1783 | |||
1784 | .enable_irq = parport_ip32_enable_irq, | ||
1785 | .disable_irq = parport_ip32_disable_irq, | ||
1786 | |||
1787 | .data_forward = parport_ip32_data_forward, | ||
1788 | .data_reverse = parport_ip32_data_reverse, | ||
1789 | |||
1790 | .init_state = parport_ip32_init_state, | ||
1791 | .save_state = parport_ip32_save_state, | ||
1792 | .restore_state = parport_ip32_restore_state, | ||
1793 | |||
1794 | .epp_write_data = parport_ieee1284_epp_write_data, | ||
1795 | .epp_read_data = parport_ieee1284_epp_read_data, | ||
1796 | .epp_write_addr = parport_ieee1284_epp_write_addr, | ||
1797 | .epp_read_addr = parport_ieee1284_epp_read_addr, | ||
1798 | |||
1799 | .ecp_write_data = parport_ieee1284_ecp_write_data, | ||
1800 | .ecp_read_data = parport_ieee1284_ecp_read_data, | ||
1801 | .ecp_write_addr = parport_ieee1284_ecp_write_addr, | ||
1802 | |||
1803 | .compat_write_data = parport_ieee1284_write_compat, | ||
1804 | .nibble_read_data = parport_ieee1284_read_nibble, | ||
1805 | .byte_read_data = parport_ieee1284_read_byte, | ||
1806 | |||
1807 | .owner = THIS_MODULE, | ||
1808 | }; | ||
1809 | |||
1810 | /*--- Device detection -------------------------------------------------*/ | ||
1811 | |||
1812 | /** | ||
1813 | * parport_ip32_ecp_supported - check for an ECP port | ||
1814 | * @p: pointer to the &parport structure | ||
1815 | * | ||
1816 | * Returns 1 if an ECP port is found, and 0 otherwise. This function actually | ||
1817 | * checks if an Extended Control Register seems to be present. On successful | ||
1818 | * return, the port is placed in SPP mode. | ||
1819 | */ | ||
1820 | static __init unsigned int parport_ip32_ecp_supported(struct parport *p) | ||
1821 | { | ||
1822 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1823 | unsigned int ecr; | ||
1824 | |||
1825 | ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; | ||
1826 | writeb(ecr, priv->regs.ecr); | ||
1827 | if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY)) | ||
1828 | goto fail; | ||
1829 | |||
1830 | pr_probe(p, "Found working ECR register\n"); | ||
1831 | parport_ip32_set_mode(p, ECR_MODE_SPP); | ||
1832 | parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); | ||
1833 | return 1; | ||
1834 | |||
1835 | fail: | ||
1836 | pr_probe(p, "ECR register not found\n"); | ||
1837 | return 0; | ||
1838 | } | ||
1839 | |||
1840 | /** | ||
1841 | * parport_ip32_fifo_supported - check for FIFO parameters | ||
1842 | * @p: pointer to the &parport structure | ||
1843 | * | ||
1844 | * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on | ||
1845 | * success, and 0 otherwise. Adjust FIFO parameters in the parport structure. | ||
1846 | * On return, the port is placed in SPP mode. | ||
1847 | */ | ||
1848 | static __init unsigned int parport_ip32_fifo_supported(struct parport *p) | ||
1849 | { | ||
1850 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
1851 | unsigned int configa, configb; | ||
1852 | unsigned int pword; | ||
1853 | unsigned int i; | ||
1854 | |||
1855 | /* Configuration mode */ | ||
1856 | parport_ip32_set_mode(p, ECR_MODE_CFG); | ||
1857 | configa = readb(priv->regs.cnfgA); | ||
1858 | configb = readb(priv->regs.cnfgB); | ||
1859 | |||
1860 | /* Find out PWord size */ | ||
1861 | switch (configa & CNFGA_ID_MASK) { | ||
1862 | case CNFGA_ID_8: | ||
1863 | pword = 1; | ||
1864 | break; | ||
1865 | case CNFGA_ID_16: | ||
1866 | pword = 2; | ||
1867 | break; | ||
1868 | case CNFGA_ID_32: | ||
1869 | pword = 4; | ||
1870 | break; | ||
1871 | default: | ||
1872 | pr_probe(p, "Unknown implementation ID: 0x%0x\n", | ||
1873 | (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT); | ||
1874 | goto fail; | ||
1875 | break; | ||
1876 | } | ||
1877 | if (pword != 1) { | ||
1878 | pr_probe(p, "Unsupported PWord size: %u\n", pword); | ||
1879 | goto fail; | ||
1880 | } | ||
1881 | priv->pword = pword; | ||
1882 | pr_probe(p, "PWord is %u bits\n", 8 * priv->pword); | ||
1883 | |||
1884 | /* Check for compression support */ | ||
1885 | writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB); | ||
1886 | if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS) | ||
1887 | pr_probe(p, "Hardware compression detected (unsupported)\n"); | ||
1888 | writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB); | ||
1889 | |||
1890 | /* Reset FIFO and go in test mode (no interrupt, no DMA) */ | ||
1891 | parport_ip32_set_mode(p, ECR_MODE_TST); | ||
1892 | |||
1893 | /* FIFO must be empty now */ | ||
1894 | if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) { | ||
1895 | pr_probe(p, "FIFO not reset\n"); | ||
1896 | goto fail; | ||
1897 | } | ||
1898 | |||
1899 | /* Find out FIFO depth. */ | ||
1900 | priv->fifo_depth = 0; | ||
1901 | for (i = 0; i < 1024; i++) { | ||
1902 | if (readb(priv->regs.ecr) & ECR_F_FULL) { | ||
1903 | /* FIFO full */ | ||
1904 | priv->fifo_depth = i; | ||
1905 | break; | ||
1906 | } | ||
1907 | writeb((u8)i, priv->regs.fifo); | ||
1908 | } | ||
1909 | if (i >= 1024) { | ||
1910 | pr_probe(p, "Can't fill FIFO\n"); | ||
1911 | goto fail; | ||
1912 | } | ||
1913 | if (!priv->fifo_depth) { | ||
1914 | pr_probe(p, "Can't get FIFO depth\n"); | ||
1915 | goto fail; | ||
1916 | } | ||
1917 | pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth); | ||
1918 | |||
1919 | /* Enable interrupts */ | ||
1920 | parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); | ||
1921 | |||
1922 | /* Find out writeIntrThreshold: number of PWords we know we can write | ||
1923 | * if we get an interrupt. */ | ||
1924 | priv->writeIntrThreshold = 0; | ||
1925 | for (i = 0; i < priv->fifo_depth; i++) { | ||
1926 | if (readb(priv->regs.fifo) != (u8)i) { | ||
1927 | pr_probe(p, "Invalid data in FIFO\n"); | ||
1928 | goto fail; | ||
1929 | } | ||
1930 | if (!priv->writeIntrThreshold | ||
1931 | && readb(priv->regs.ecr) & ECR_SERVINTR) | ||
1932 | /* writeIntrThreshold reached */ | ||
1933 | priv->writeIntrThreshold = i + 1; | ||
1934 | if (i + 1 < priv->fifo_depth | ||
1935 | && readb(priv->regs.ecr) & ECR_F_EMPTY) { | ||
1936 | /* FIFO empty before the last byte? */ | ||
1937 | pr_probe(p, "Data lost in FIFO\n"); | ||
1938 | goto fail; | ||
1939 | } | ||
1940 | } | ||
1941 | if (!priv->writeIntrThreshold) { | ||
1942 | pr_probe(p, "Can't get writeIntrThreshold\n"); | ||
1943 | goto fail; | ||
1944 | } | ||
1945 | pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold); | ||
1946 | |||
1947 | /* FIFO must be empty now */ | ||
1948 | if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) { | ||
1949 | pr_probe(p, "Can't empty FIFO\n"); | ||
1950 | goto fail; | ||
1951 | } | ||
1952 | |||
1953 | /* Reset FIFO */ | ||
1954 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1955 | /* Set reverse direction (must be in PS2 mode) */ | ||
1956 | parport_ip32_data_reverse(p); | ||
1957 | /* Test FIFO, no interrupt, no DMA */ | ||
1958 | parport_ip32_set_mode(p, ECR_MODE_TST); | ||
1959 | /* Enable interrupts */ | ||
1960 | parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); | ||
1961 | |||
1962 | /* Find out readIntrThreshold: number of PWords we can read if we get | ||
1963 | * an interrupt. */ | ||
1964 | priv->readIntrThreshold = 0; | ||
1965 | for (i = 0; i < priv->fifo_depth; i++) { | ||
1966 | writeb(0xaa, priv->regs.fifo); | ||
1967 | if (readb(priv->regs.ecr) & ECR_SERVINTR) { | ||
1968 | /* readIntrThreshold reached */ | ||
1969 | priv->readIntrThreshold = i + 1; | ||
1970 | break; | ||
1971 | } | ||
1972 | } | ||
1973 | if (!priv->readIntrThreshold) { | ||
1974 | pr_probe(p, "Can't get readIntrThreshold\n"); | ||
1975 | goto fail; | ||
1976 | } | ||
1977 | pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold); | ||
1978 | |||
1979 | /* Reset ECR */ | ||
1980 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
1981 | parport_ip32_data_forward(p); | ||
1982 | parport_ip32_set_mode(p, ECR_MODE_SPP); | ||
1983 | return 1; | ||
1984 | |||
1985 | fail: | ||
1986 | priv->fifo_depth = 0; | ||
1987 | parport_ip32_set_mode(p, ECR_MODE_SPP); | ||
1988 | return 0; | ||
1989 | } | ||
1990 | |||
1991 | /*--- Initialization code ----------------------------------------------*/ | ||
1992 | |||
1993 | /** | ||
1994 | * parport_ip32_make_isa_registers - compute (ISA) register addresses | ||
1995 | * @regs: pointer to &struct parport_ip32_regs to fill | ||
1996 | * @base: base address of standard and EPP registers | ||
1997 | * @base_hi: base address of ECP registers | ||
1998 | * @regshift: how much to shift register offset by | ||
1999 | * | ||
2000 | * Compute register addresses, according to the ISA standard. The addresses | ||
2001 | * of the standard and EPP registers are computed from address @base. The | ||
2002 | * addresses of the ECP registers are computed from address @base_hi. | ||
2003 | */ | ||
2004 | static void __init | ||
2005 | parport_ip32_make_isa_registers(struct parport_ip32_regs *regs, | ||
2006 | void __iomem *base, void __iomem *base_hi, | ||
2007 | unsigned int regshift) | ||
2008 | { | ||
2009 | #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift)) | ||
2010 | #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift)) | ||
2011 | *regs = (struct parport_ip32_regs){ | ||
2012 | .data = r_base(0), | ||
2013 | .dsr = r_base(1), | ||
2014 | .dcr = r_base(2), | ||
2015 | .eppAddr = r_base(3), | ||
2016 | .eppData0 = r_base(4), | ||
2017 | .eppData1 = r_base(5), | ||
2018 | .eppData2 = r_base(6), | ||
2019 | .eppData3 = r_base(7), | ||
2020 | .ecpAFifo = r_base(0), | ||
2021 | .fifo = r_base_hi(0), | ||
2022 | .cnfgA = r_base_hi(0), | ||
2023 | .cnfgB = r_base_hi(1), | ||
2024 | .ecr = r_base_hi(2) | ||
2025 | }; | ||
2026 | #undef r_base_hi | ||
2027 | #undef r_base | ||
2028 | } | ||
2029 | |||
2030 | /** | ||
2031 | * parport_ip32_probe_port - probe and register IP32 built-in parallel port | ||
2032 | * | ||
2033 | * Returns the new allocated &parport structure. On error, an error code is | ||
2034 | * encoded in return value with the ERR_PTR function. | ||
2035 | */ | ||
2036 | static __init struct parport *parport_ip32_probe_port(void) | ||
2037 | { | ||
2038 | struct parport_ip32_regs regs; | ||
2039 | struct parport_ip32_private *priv = NULL; | ||
2040 | struct parport_operations *ops = NULL; | ||
2041 | struct parport *p = NULL; | ||
2042 | int err; | ||
2043 | |||
2044 | parport_ip32_make_isa_registers(®s, &mace->isa.parallel, | ||
2045 | &mace->isa.ecp1284, 8 /* regshift */); | ||
2046 | |||
2047 | ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); | ||
2048 | priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL); | ||
2049 | p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops); | ||
2050 | if (ops == NULL || priv == NULL || p == NULL) { | ||
2051 | err = -ENOMEM; | ||
2052 | goto fail; | ||
2053 | } | ||
2054 | p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel); | ||
2055 | p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284); | ||
2056 | p->private_data = priv; | ||
2057 | |||
2058 | *ops = parport_ip32_ops; | ||
2059 | *priv = (struct parport_ip32_private){ | ||
2060 | .regs = regs, | ||
2061 | .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT | | ||
2062 | DCR_AUTOFD | DCR_STROBE, | ||
2063 | .irq_mode = PARPORT_IP32_IRQ_FWD, | ||
2064 | }; | ||
2065 | init_completion(&priv->irq_complete); | ||
2066 | |||
2067 | /* Probe port. */ | ||
2068 | if (!parport_ip32_ecp_supported(p)) { | ||
2069 | err = -ENODEV; | ||
2070 | goto fail; | ||
2071 | } | ||
2072 | parport_ip32_dump_state(p, "begin init", 0); | ||
2073 | |||
2074 | /* We found what looks like a working ECR register. Simply assume | ||
2075 | * that all modes are correctly supported. Enable basic modes. */ | ||
2076 | p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; | ||
2077 | p->modes |= PARPORT_MODE_TRISTATE; | ||
2078 | |||
2079 | if (!parport_ip32_fifo_supported(p)) { | ||
2080 | printk(KERN_WARNING PPIP32 | ||
2081 | "%s: error: FIFO disabled\n", p->name); | ||
2082 | /* Disable hardware modes depending on a working FIFO. */ | ||
2083 | features &= ~PARPORT_IP32_ENABLE_SPP; | ||
2084 | features &= ~PARPORT_IP32_ENABLE_ECP; | ||
2085 | /* DMA is not needed if FIFO is not supported. */ | ||
2086 | features &= ~PARPORT_IP32_ENABLE_DMA; | ||
2087 | } | ||
2088 | |||
2089 | /* Request IRQ */ | ||
2090 | if (features & PARPORT_IP32_ENABLE_IRQ) { | ||
2091 | int irq = MACEISA_PARALLEL_IRQ; | ||
2092 | if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) { | ||
2093 | printk(KERN_WARNING PPIP32 | ||
2094 | "%s: error: IRQ disabled\n", p->name); | ||
2095 | /* DMA cannot work without interrupts. */ | ||
2096 | features &= ~PARPORT_IP32_ENABLE_DMA; | ||
2097 | } else { | ||
2098 | pr_probe(p, "Interrupt support enabled\n"); | ||
2099 | p->irq = irq; | ||
2100 | priv->dcr_writable |= DCR_IRQ; | ||
2101 | } | ||
2102 | } | ||
2103 | |||
2104 | /* Allocate DMA resources */ | ||
2105 | if (features & PARPORT_IP32_ENABLE_DMA) { | ||
2106 | if (parport_ip32_dma_register()) | ||
2107 | printk(KERN_WARNING PPIP32 | ||
2108 | "%s: error: DMA disabled\n", p->name); | ||
2109 | else { | ||
2110 | pr_probe(p, "DMA support enabled\n"); | ||
2111 | p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */ | ||
2112 | p->modes |= PARPORT_MODE_DMA; | ||
2113 | } | ||
2114 | } | ||
2115 | |||
2116 | if (features & PARPORT_IP32_ENABLE_SPP) { | ||
2117 | /* Enable compatibility FIFO mode */ | ||
2118 | p->ops->compat_write_data = parport_ip32_compat_write_data; | ||
2119 | p->modes |= PARPORT_MODE_COMPAT; | ||
2120 | pr_probe(p, "Hardware support for SPP mode enabled\n"); | ||
2121 | } | ||
2122 | if (features & PARPORT_IP32_ENABLE_EPP) { | ||
2123 | /* Set up access functions to use EPP hardware. */ | ||
2124 | p->ops->epp_read_data = parport_ip32_epp_read_data; | ||
2125 | p->ops->epp_write_data = parport_ip32_epp_write_data; | ||
2126 | p->ops->epp_read_addr = parport_ip32_epp_read_addr; | ||
2127 | p->ops->epp_write_addr = parport_ip32_epp_write_addr; | ||
2128 | p->modes |= PARPORT_MODE_EPP; | ||
2129 | pr_probe(p, "Hardware support for EPP mode enabled\n"); | ||
2130 | } | ||
2131 | if (features & PARPORT_IP32_ENABLE_ECP) { | ||
2132 | /* Enable ECP FIFO mode */ | ||
2133 | p->ops->ecp_write_data = parport_ip32_ecp_write_data; | ||
2134 | /* FIXME - not implemented */ | ||
2135 | /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */ | ||
2136 | /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */ | ||
2137 | p->modes |= PARPORT_MODE_ECP; | ||
2138 | pr_probe(p, "Hardware support for ECP mode enabled\n"); | ||
2139 | } | ||
2140 | |||
2141 | /* Initialize the port with sensible values */ | ||
2142 | parport_ip32_set_mode(p, ECR_MODE_PS2); | ||
2143 | parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); | ||
2144 | parport_ip32_data_forward(p); | ||
2145 | parport_ip32_disable_irq(p); | ||
2146 | parport_ip32_write_data(p, 0x00); | ||
2147 | parport_ip32_dump_state(p, "end init", 0); | ||
2148 | |||
2149 | /* Print out what we found */ | ||
2150 | printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)", | ||
2151 | p->name, p->base, p->base_hi); | ||
2152 | if (p->irq != PARPORT_IRQ_NONE) | ||
2153 | printk(", irq %d", p->irq); | ||
2154 | printk(" ["); | ||
2155 | #define printmode(x) if (p->modes & PARPORT_MODE_##x) \ | ||
2156 | printk("%s%s", f++ ? "," : "", #x) | ||
2157 | { | ||
2158 | unsigned int f = 0; | ||
2159 | printmode(PCSPP); | ||
2160 | printmode(TRISTATE); | ||
2161 | printmode(COMPAT); | ||
2162 | printmode(EPP); | ||
2163 | printmode(ECP); | ||
2164 | printmode(DMA); | ||
2165 | } | ||
2166 | #undef printmode | ||
2167 | printk("]\n"); | ||
2168 | |||
2169 | parport_announce_port(p); | ||
2170 | return p; | ||
2171 | |||
2172 | fail: | ||
2173 | if (p) | ||
2174 | parport_put_port(p); | ||
2175 | kfree(priv); | ||
2176 | kfree(ops); | ||
2177 | return ERR_PTR(err); | ||
2178 | } | ||
2179 | |||
2180 | /** | ||
2181 | * parport_ip32_unregister_port - unregister a parallel port | ||
2182 | * @p: pointer to the &struct parport | ||
2183 | * | ||
2184 | * Unregisters a parallel port and free previously allocated resources | ||
2185 | * (memory, IRQ, ...). | ||
2186 | */ | ||
2187 | static __exit void parport_ip32_unregister_port(struct parport *p) | ||
2188 | { | ||
2189 | struct parport_ip32_private * const priv = p->physport->private_data; | ||
2190 | struct parport_operations *ops = p->ops; | ||
2191 | |||
2192 | parport_remove_port(p); | ||
2193 | if (p->modes & PARPORT_MODE_DMA) | ||
2194 | parport_ip32_dma_unregister(); | ||
2195 | if (p->irq != PARPORT_IRQ_NONE) | ||
2196 | free_irq(p->irq, p); | ||
2197 | parport_put_port(p); | ||
2198 | kfree(priv); | ||
2199 | kfree(ops); | ||
2200 | } | ||
2201 | |||
2202 | /** | ||
2203 | * parport_ip32_init - module initialization function | ||
2204 | */ | ||
2205 | static int __init parport_ip32_init(void) | ||
2206 | { | ||
2207 | pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); | ||
2208 | pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__); | ||
2209 | this_port = parport_ip32_probe_port(); | ||
2210 | return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; | ||
2211 | } | ||
2212 | |||
2213 | /** | ||
2214 | * parport_ip32_exit - module termination function | ||
2215 | */ | ||
2216 | static void __exit parport_ip32_exit(void) | ||
2217 | { | ||
2218 | parport_ip32_unregister_port(this_port); | ||
2219 | } | ||
2220 | |||
2221 | /*--- Module stuff -----------------------------------------------------*/ | ||
2222 | |||
2223 | MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>"); | ||
2224 | MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver"); | ||
2225 | MODULE_LICENSE("GPL"); | ||
2226 | MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */ | ||
2227 | |||
2228 | module_init(parport_ip32_init); | ||
2229 | module_exit(parport_ip32_exit); | ||
2230 | |||
2231 | module_param(verbose_probing, bool, S_IRUGO); | ||
2232 | MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization"); | ||
2233 | |||
2234 | module_param(features, uint, S_IRUGO); | ||
2235 | MODULE_PARM_DESC(features, | ||
2236 | "Bit mask of features to enable" | ||
2237 | ", bit 0: IRQ support" | ||
2238 | ", bit 1: DMA support" | ||
2239 | ", bit 2: hardware SPP mode" | ||
2240 | ", bit 3: hardware EPP mode" | ||
2241 | ", bit 4: hardware ECP mode"); | ||
2242 | |||
2243 | /*--- Inform (X)Emacs about preferred coding style ---------------------*/ | ||
2244 | /* | ||
2245 | * Local Variables: | ||
2246 | * mode: c | ||
2247 | * c-file-style: "linux" | ||
2248 | * indent-tabs-mode: t | ||
2249 | * tab-width: 8 | ||
2250 | * fill-column: 78 | ||
2251 | * ispell-local-dictionary: "american" | ||
2252 | * End: | ||
2253 | */ | ||
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 76dd077e3184..166de3507780 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c | |||
@@ -329,9 +329,9 @@ static int __devinit parport_register (struct pci_dev *dev, | |||
329 | 329 | ||
330 | if (priv->num_par == ARRAY_SIZE (priv->port)) { | 330 | if (priv->num_par == ARRAY_SIZE (priv->port)) { |
331 | printk (KERN_WARNING | 331 | printk (KERN_WARNING |
332 | "parport_serial: %s: only %u parallel ports " | 332 | "parport_serial: %s: only %zu parallel ports " |
333 | "supported (%d reported)\n", pci_name (dev), | 333 | "supported (%d reported)\n", pci_name (dev), |
334 | ARRAY_SIZE (priv->port), card->numports); | 334 | ARRAY_SIZE(priv->port), card->numports); |
335 | break; | 335 | break; |
336 | } | 336 | } |
337 | 337 | ||
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index b62aee8de3cb..ea83b70e0de2 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c | |||
@@ -199,7 +199,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, | |||
199 | 199 | ||
200 | if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { | 200 | if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) { |
201 | if (belen != len) { | 201 | if (belen != len) { |
202 | printk (KERN_DEBUG "%s: Device ID was %d bytes" | 202 | printk (KERN_DEBUG "%s: Device ID was %zd bytes" |
203 | " while device told it would be %d" | 203 | " while device told it would be %d" |
204 | " bytes\n", | 204 | " bytes\n", |
205 | port->name, len, belen); | 205 | port->name, len, belen); |
@@ -214,7 +214,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, | |||
214 | if (buffer[len-1] == ';') { | 214 | if (buffer[len-1] == ';') { |
215 | printk (KERN_DEBUG "%s: Device ID reading stopped" | 215 | printk (KERN_DEBUG "%s: Device ID reading stopped" |
216 | " before device told data not available. " | 216 | " before device told data not available. " |
217 | "Current idlen %d of %d, len bytes %02X %02X\n", | 217 | "Current idlen %u of %u, len bytes %02X %02X\n", |
218 | port->name, current_idlen, numidlens, | 218 | port->name, current_idlen, numidlens, |
219 | length[0], length[1]); | 219 | length[0], length[1]); |
220 | goto done; | 220 | goto done; |
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6f50cc9323d9..6912399d0937 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig | |||
@@ -55,13 +55,21 @@ config DASD_DIAG | |||
55 | Disks under VM. If you are not running under VM or unsure what it is, | 55 | Disks under VM. If you are not running under VM or unsure what it is, |
56 | say "N". | 56 | say "N". |
57 | 57 | ||
58 | config DASD_EER | ||
59 | tristate "Extended error reporting (EER)" | ||
60 | depends on DASD | ||
61 | help | ||
62 | This driver provides a character device interface to the | ||
63 | DASD extended error reporting. This is only needed if you want to | ||
64 | use applications written for the EER facility. | ||
65 | |||
58 | config DASD_CMB | 66 | config DASD_CMB |
59 | tristate "Compatibility interface for DASD channel measurement blocks" | 67 | tristate "Compatibility interface for DASD channel measurement blocks" |
60 | depends on DASD | 68 | depends on DASD |
61 | help | 69 | help |
62 | This driver provides an additional interface to the channel measurement | 70 | This driver provides an additional interface to the channel |
63 | facility, which is normally accessed though sysfs, with a set of | 71 | measurement facility, which is normally accessed though sysfs, with |
64 | ioctl functions specific to the dasd driver. | 72 | a set of ioctl functions specific to the dasd driver. |
65 | This is only needed if you want to use applications written for | 73 | This is only needed if you want to use applications written for |
66 | linux-2.4 dasd channel measurement facility interface. | 74 | linux-2.4 dasd channel measurement facility interface. |
67 | 75 | ||
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 58c6780134f7..0c0d871e8f51 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o | 5 | dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o |
6 | dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o | 6 | dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o |
7 | dasd_diag_mod-objs := dasd_diag.o | 7 | dasd_diag_mod-objs := dasd_diag.o |
8 | dasd_eer_mod-objs := dasd_eer.o | ||
8 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ | 9 | dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ |
9 | dasd_genhd.o dasd_erp.o | 10 | dasd_genhd.o dasd_erp.o |
10 | 11 | ||
@@ -13,5 +14,6 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o | |||
13 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o | 14 | obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o |
14 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o | 15 | obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o |
15 | obj-$(CONFIG_DASD_CMB) += dasd_cmb.o | 16 | obj-$(CONFIG_DASD_CMB) += dasd_cmb.o |
17 | obj-$(CONFIG_DASD_EER) += dasd_eer.o | ||
16 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o | 18 | obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o |
17 | obj-$(CONFIG_DCSSBLK) += dcssblk.o | 19 | obj-$(CONFIG_DCSSBLK) += dcssblk.o |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index abdf1ee633e7..08c88fcd8963 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/buffer_head.h> | 19 | #include <linux/buffer_head.h> |
20 | #include <linux/hdreg.h> | 20 | #include <linux/hdreg.h> |
21 | #include <linux/notifier.h> | ||
21 | 22 | ||
22 | #include <asm/ccwdev.h> | 23 | #include <asm/ccwdev.h> |
23 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
@@ -57,6 +58,7 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | |||
57 | static void dasd_flush_ccw_queue(struct dasd_device *, int); | 58 | static void dasd_flush_ccw_queue(struct dasd_device *, int); |
58 | static void dasd_tasklet(struct dasd_device *); | 59 | static void dasd_tasklet(struct dasd_device *); |
59 | static void do_kick_device(void *data); | 60 | static void do_kick_device(void *data); |
61 | static void dasd_disable_eer(struct dasd_device *device); | ||
60 | 62 | ||
61 | /* | 63 | /* |
62 | * SECTION: Operations on the device structure. | 64 | * SECTION: Operations on the device structure. |
@@ -151,6 +153,8 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
151 | static inline void | 153 | static inline void |
152 | dasd_state_known_to_new(struct dasd_device * device) | 154 | dasd_state_known_to_new(struct dasd_device * device) |
153 | { | 155 | { |
156 | /* disable extended error reporting for this device */ | ||
157 | dasd_disable_eer(device); | ||
154 | /* Forget the discipline information. */ | 158 | /* Forget the discipline information. */ |
155 | device->discipline = NULL; | 159 | device->discipline = NULL; |
156 | device->state = DASD_STATE_NEW; | 160 | device->state = DASD_STATE_NEW; |
@@ -867,6 +871,9 @@ dasd_handle_state_change_pending(struct dasd_device *device) | |||
867 | struct dasd_ccw_req *cqr; | 871 | struct dasd_ccw_req *cqr; |
868 | struct list_head *l, *n; | 872 | struct list_head *l, *n; |
869 | 873 | ||
874 | /* first of all call extended error reporting */ | ||
875 | dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL); | ||
876 | |||
870 | device->stopped &= ~DASD_STOPPED_PENDING; | 877 | device->stopped &= ~DASD_STOPPED_PENDING; |
871 | 878 | ||
872 | /* restart all 'running' IO on queue */ | 879 | /* restart all 'running' IO on queue */ |
@@ -1086,6 +1093,19 @@ restart: | |||
1086 | } | 1093 | } |
1087 | goto restart; | 1094 | goto restart; |
1088 | } | 1095 | } |
1096 | |||
1097 | /* first of all call extended error reporting */ | ||
1098 | if (device->eer && cqr->status == DASD_CQR_FAILED) { | ||
1099 | dasd_write_eer_trigger(DASD_EER_FATALERROR, | ||
1100 | device, cqr); | ||
1101 | |||
1102 | /* restart request */ | ||
1103 | cqr->status = DASD_CQR_QUEUED; | ||
1104 | cqr->retries = 255; | ||
1105 | device->stopped |= DASD_STOPPED_QUIESCE; | ||
1106 | goto restart; | ||
1107 | } | ||
1108 | |||
1089 | /* Process finished ERP request. */ | 1109 | /* Process finished ERP request. */ |
1090 | if (cqr->refers) { | 1110 | if (cqr->refers) { |
1091 | __dasd_process_erp(device, cqr); | 1111 | __dasd_process_erp(device, cqr); |
@@ -1223,7 +1243,8 @@ __dasd_start_head(struct dasd_device * device) | |||
1223 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1243 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1224 | /* check FAILFAST */ | 1244 | /* check FAILFAST */ |
1225 | if (device->stopped & ~DASD_STOPPED_PENDING && | 1245 | if (device->stopped & ~DASD_STOPPED_PENDING && |
1226 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { | 1246 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && |
1247 | (!device->eer)) { | ||
1227 | cqr->status = DASD_CQR_FAILED; | 1248 | cqr->status = DASD_CQR_FAILED; |
1228 | dasd_schedule_bh(device); | 1249 | dasd_schedule_bh(device); |
1229 | } | 1250 | } |
@@ -1965,6 +1986,9 @@ dasd_generic_notify(struct ccw_device *cdev, int event) | |||
1965 | switch (event) { | 1986 | switch (event) { |
1966 | case CIO_GONE: | 1987 | case CIO_GONE: |
1967 | case CIO_NO_PATH: | 1988 | case CIO_NO_PATH: |
1989 | /* first of all call extended error reporting */ | ||
1990 | dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL); | ||
1991 | |||
1968 | if (device->state < DASD_STATE_BASIC) | 1992 | if (device->state < DASD_STATE_BASIC) |
1969 | break; | 1993 | break; |
1970 | /* Device is active. We want to keep it. */ | 1994 | /* Device is active. We want to keep it. */ |
@@ -2022,6 +2046,51 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver) | |||
2022 | put_driver(drv); | 2046 | put_driver(drv); |
2023 | } | 2047 | } |
2024 | 2048 | ||
2049 | /* | ||
2050 | * notifications for extended error reports | ||
2051 | */ | ||
2052 | static struct notifier_block *dasd_eer_chain; | ||
2053 | |||
2054 | int | ||
2055 | dasd_register_eer_notifier(struct notifier_block *nb) | ||
2056 | { | ||
2057 | return notifier_chain_register(&dasd_eer_chain, nb); | ||
2058 | } | ||
2059 | |||
2060 | int | ||
2061 | dasd_unregister_eer_notifier(struct notifier_block *nb) | ||
2062 | { | ||
2063 | return notifier_chain_unregister(&dasd_eer_chain, nb); | ||
2064 | } | ||
2065 | |||
2066 | /* | ||
2067 | * Notify the registered error reporting module of a problem | ||
2068 | */ | ||
2069 | void | ||
2070 | dasd_write_eer_trigger(unsigned int id, struct dasd_device *device, | ||
2071 | struct dasd_ccw_req *cqr) | ||
2072 | { | ||
2073 | if (device->eer) { | ||
2074 | struct dasd_eer_trigger temp; | ||
2075 | temp.id = id; | ||
2076 | temp.device = device; | ||
2077 | temp.cqr = cqr; | ||
2078 | notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER, | ||
2079 | (void *)&temp); | ||
2080 | } | ||
2081 | } | ||
2082 | |||
2083 | /* | ||
2084 | * Tell the registered error reporting module to disable error reporting for | ||
2085 | * a given device and to cleanup any private data structures on that device. | ||
2086 | */ | ||
2087 | static void | ||
2088 | dasd_disable_eer(struct dasd_device *device) | ||
2089 | { | ||
2090 | notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device); | ||
2091 | } | ||
2092 | |||
2093 | |||
2025 | static int __init | 2094 | static int __init |
2026 | dasd_init(void) | 2095 | dasd_init(void) |
2027 | { | 2096 | { |
@@ -2103,6 +2172,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); | |||
2103 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); | 2172 | EXPORT_SYMBOL_GPL(dasd_generic_set_offline); |
2104 | EXPORT_SYMBOL_GPL(dasd_generic_auto_online); | 2173 | EXPORT_SYMBOL_GPL(dasd_generic_auto_online); |
2105 | 2174 | ||
2175 | EXPORT_SYMBOL(dasd_register_eer_notifier); | ||
2176 | EXPORT_SYMBOL(dasd_unregister_eer_notifier); | ||
2177 | EXPORT_SYMBOL(dasd_write_eer_trigger); | ||
2178 | |||
2179 | |||
2106 | /* | 2180 | /* |
2107 | * Overrides for Emacs so that we follow Linus's tabbing style. | 2181 | * Overrides for Emacs so that we follow Linus's tabbing style. |
2108 | * Emacs will notice this stuff at the end of the file and automatically | 2182 | * Emacs will notice this stuff at the end of the file and automatically |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4ee0f934e325..c811380b9079 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -1108,6 +1108,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) | |||
1108 | case 0x0B: | 1108 | case 0x0B: |
1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1109 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
1110 | "FORMAT F - Volume is suspended duplex"); | 1110 | "FORMAT F - Volume is suspended duplex"); |
1111 | /* call extended error reporting (EER) */ | ||
1112 | dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device, | ||
1113 | erp->refers); | ||
1111 | break; | 1114 | break; |
1112 | case 0x0C: | 1115 | case 0x0C: |
1113 | DEV_MESSAGE(KERN_WARNING, device, "%s", | 1116 | DEV_MESSAGE(KERN_WARNING, device, "%s", |
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index bc3823d35223..e15dd7978050 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define DASD_ECKD_CCW_PSF 0x27 | 29 | #define DASD_ECKD_CCW_PSF 0x27 |
30 | #define DASD_ECKD_CCW_RSSD 0x3e | 30 | #define DASD_ECKD_CCW_RSSD 0x3e |
31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 | 31 | #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 |
32 | #define DASD_ECKD_CCW_SNSS 0x54 | ||
32 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 | 33 | #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 |
33 | #define DASD_ECKD_CCW_WRITE_MT 0x85 | 34 | #define DASD_ECKD_CCW_WRITE_MT 0x85 |
34 | #define DASD_ECKD_CCW_READ_MT 0x86 | 35 | #define DASD_ECKD_CCW_READ_MT 0x86 |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c new file mode 100644 index 000000000000..f70cd7716b24 --- /dev/null +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -0,0 +1,1090 @@ | |||
1 | /* | ||
2 | * character device driver for extended error reporting | ||
3 | * | ||
4 | * | ||
5 | * Copyright (C) 2005 IBM Corporation | ||
6 | * extended error reporting for DASD ECKD devices | ||
7 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/miscdevice.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/moduleparam.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include <linux/poll.h> | ||
20 | #include <linux/notifier.h> | ||
21 | |||
22 | #include <asm/uaccess.h> | ||
23 | #include <asm/semaphore.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/ebcdic.h> | ||
26 | |||
27 | #include "dasd_int.h" | ||
28 | #include "dasd_eckd.h" | ||
29 | |||
30 | |||
31 | MODULE_LICENSE("GPL"); | ||
32 | |||
33 | MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>"); | ||
34 | MODULE_DESCRIPTION("DASD extended error reporting module"); | ||
35 | |||
36 | |||
37 | #ifdef PRINTK_HEADER | ||
38 | #undef PRINTK_HEADER | ||
39 | #endif /* PRINTK_HEADER */ | ||
40 | #define PRINTK_HEADER "dasd(eer):" | ||
41 | |||
42 | |||
43 | |||
44 | |||
45 | |||
46 | /*****************************************************************************/ | ||
47 | /* the internal buffer */ | ||
48 | /*****************************************************************************/ | ||
49 | |||
50 | /* | ||
51 | * The internal buffer is meant to store obaque blobs of data, so it doesn't | ||
52 | * know of higher level concepts like triggers. | ||
53 | * It consists of a number of pages that are used as a ringbuffer. Each data | ||
54 | * blob is stored in a simple record that consists of an integer, which | ||
55 | * contains the size of the following data, and the data bytes themselfes. | ||
56 | * | ||
57 | * To allow for multiple independent readers we create one internal buffer | ||
58 | * each time the device is opened and destroy the buffer when the file is | ||
59 | * closed again. | ||
60 | * | ||
61 | * One record can be written to a buffer by using the functions | ||
62 | * - dasd_eer_start_record (one time per record to write the size to the buffer | ||
63 | * and reserve the space for the data) | ||
64 | * - dasd_eer_write_buffer (one or more times per record to write the data) | ||
65 | * The data can be written in several steps but you will have to compute | ||
66 | * the total size up front for the invocation of dasd_eer_start_record. | ||
67 | * If the ringbuffer is full, dasd_eer_start_record will remove the required | ||
68 | * number of old records. | ||
69 | * | ||
70 | * A record is typically read in two steps, first read the integer that | ||
71 | * specifies the size of the following data, then read the data. | ||
72 | * Both can be done by | ||
73 | * - dasd_eer_read_buffer | ||
74 | * | ||
75 | * For all mentioned functions you need to get the bufferlock first and keep it | ||
76 | * until a complete record is written or read. | ||
77 | */ | ||
78 | |||
79 | |||
80 | /* | ||
81 | * Alle information necessary to keep track of an internal buffer is kept in | ||
82 | * a struct eerbuffer. The buffer specific to a file pointer is strored in | ||
83 | * the private_data field of that file. To be able to write data to all | ||
84 | * existing buffers, each buffer is also added to the bufferlist. | ||
85 | * If the user doesn't want to read a complete record in one go, we have to | ||
86 | * keep track of the rest of the record. residual stores the number of bytes | ||
87 | * that are still to deliver. If the rest of the record is invalidated between | ||
88 | * two reads then residual will be set to -1 so that the next read will fail. | ||
89 | * All entries in the eerbuffer structure are protected with the bufferlock. | ||
90 | * To avoid races between writing to a buffer on the one side and creating | ||
91 | * and destroying buffers on the other side, the bufferlock must also be used | ||
92 | * to protect the bufferlist. | ||
93 | */ | ||
94 | |||
95 | struct eerbuffer { | ||
96 | struct list_head list; | ||
97 | char **buffer; | ||
98 | int buffersize; | ||
99 | int buffer_page_count; | ||
100 | int head; | ||
101 | int tail; | ||
102 | int residual; | ||
103 | }; | ||
104 | |||
105 | LIST_HEAD(bufferlist); | ||
106 | |||
107 | static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; | ||
108 | |||
109 | DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); | ||
110 | |||
111 | /* | ||
112 | * How many free bytes are available on the buffer. | ||
113 | * needs to be called with bufferlock held | ||
114 | */ | ||
115 | static int | ||
116 | dasd_eer_get_free_bytes(struct eerbuffer *eerb) | ||
117 | { | ||
118 | if (eerb->head < eerb->tail) { | ||
119 | return eerb->tail - eerb->head - 1; | ||
120 | } else | ||
121 | return eerb->buffersize - eerb->head + eerb->tail -1; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * How many bytes of buffer space are used. | ||
126 | * needs to be called with bufferlock held | ||
127 | */ | ||
128 | static int | ||
129 | dasd_eer_get_filled_bytes(struct eerbuffer *eerb) | ||
130 | { | ||
131 | |||
132 | if (eerb->head >= eerb->tail) { | ||
133 | return eerb->head - eerb->tail; | ||
134 | } else | ||
135 | return eerb->buffersize - eerb->tail + eerb->head; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * The dasd_eer_write_buffer function just copies count bytes of data | ||
140 | * to the buffer. Make sure to call dasd_eer_start_record first, to | ||
141 | * make sure that enough free space is available. | ||
142 | * needs to be called with bufferlock held | ||
143 | */ | ||
144 | static void | ||
145 | dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data) | ||
146 | { | ||
147 | |||
148 | unsigned long headindex,localhead; | ||
149 | unsigned long rest, len; | ||
150 | char *nextdata; | ||
151 | |||
152 | nextdata = data; | ||
153 | rest = count; | ||
154 | while (rest > 0) { | ||
155 | headindex = eerb->head / PAGE_SIZE; | ||
156 | localhead = eerb->head % PAGE_SIZE; | ||
157 | len = min(rest, (PAGE_SIZE - localhead)); | ||
158 | memcpy(eerb->buffer[headindex]+localhead, nextdata, len); | ||
159 | nextdata += len; | ||
160 | rest -= len; | ||
161 | eerb->head += len; | ||
162 | if ( eerb->head == eerb->buffersize ) | ||
163 | eerb->head = 0; /* wrap around */ | ||
164 | if (eerb->head > eerb->buffersize) { | ||
165 | MESSAGE(KERN_ERR, "%s", "runaway buffer head."); | ||
166 | BUG(); | ||
167 | } | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * needs to be called with bufferlock held | ||
173 | */ | ||
174 | static int | ||
175 | dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data) | ||
176 | { | ||
177 | |||
178 | unsigned long tailindex,localtail; | ||
179 | unsigned long rest, len, finalcount; | ||
180 | char *nextdata; | ||
181 | |||
182 | finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); | ||
183 | nextdata = data; | ||
184 | rest = finalcount; | ||
185 | while (rest > 0) { | ||
186 | tailindex = eerb->tail / PAGE_SIZE; | ||
187 | localtail = eerb->tail % PAGE_SIZE; | ||
188 | len = min(rest, (PAGE_SIZE - localtail)); | ||
189 | memcpy(nextdata, eerb->buffer[tailindex]+localtail, len); | ||
190 | nextdata += len; | ||
191 | rest -= len; | ||
192 | eerb->tail += len; | ||
193 | if ( eerb->tail == eerb->buffersize ) | ||
194 | eerb->tail = 0; /* wrap around */ | ||
195 | if (eerb->tail > eerb->buffersize) { | ||
196 | MESSAGE(KERN_ERR, "%s", "runaway buffer tail."); | ||
197 | BUG(); | ||
198 | } | ||
199 | } | ||
200 | return finalcount; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Whenever you want to write a blob of data to the internal buffer you | ||
205 | * have to start by using this function first. It will write the number | ||
206 | * of bytes that will be written to the buffer. If necessary it will remove | ||
207 | * old records to make room for the new one. | ||
208 | * needs to be called with bufferlock held | ||
209 | */ | ||
210 | static int | ||
211 | dasd_eer_start_record(struct eerbuffer *eerb, int count) | ||
212 | { | ||
213 | int tailcount; | ||
214 | if (count + sizeof(count) > eerb->buffersize) | ||
215 | return -ENOMEM; | ||
216 | while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { | ||
217 | if (eerb->residual > 0) { | ||
218 | eerb->tail += eerb->residual; | ||
219 | if (eerb->tail >= eerb->buffersize) | ||
220 | eerb->tail -= eerb->buffersize; | ||
221 | eerb->residual = -1; | ||
222 | } | ||
223 | dasd_eer_read_buffer(eerb, sizeof(tailcount), | ||
224 | (char*)(&tailcount)); | ||
225 | eerb->tail += tailcount; | ||
226 | if (eerb->tail >= eerb->buffersize) | ||
227 | eerb->tail -= eerb->buffersize; | ||
228 | } | ||
229 | dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count)); | ||
230 | |||
231 | return 0; | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * release pages that are not used anymore | ||
236 | */ | ||
237 | static void | ||
238 | dasd_eer_free_buffer_pages(char **buf, int no_pages) | ||
239 | { | ||
240 | int i; | ||
241 | |||
242 | for (i = 0; i < no_pages; ++i) { | ||
243 | free_page((unsigned long)buf[i]); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * allocate a new set of memory pages | ||
249 | */ | ||
250 | static int | ||
251 | dasd_eer_allocate_buffer_pages(char **buf, int no_pages) | ||
252 | { | ||
253 | int i; | ||
254 | |||
255 | for (i = 0; i < no_pages; ++i) { | ||
256 | buf[i] = (char *) get_zeroed_page(GFP_KERNEL); | ||
257 | if (!buf[i]) { | ||
258 | dasd_eer_free_buffer_pages(buf, i); | ||
259 | return -ENOMEM; | ||
260 | } | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * empty the buffer by resetting head and tail | ||
267 | * In case there is a half read data blob in the buffer, we set residual | ||
268 | * to -1 to indicate that the remainder of the blob is lost. | ||
269 | */ | ||
270 | static void | ||
271 | dasd_eer_purge_buffer(struct eerbuffer *eerb) | ||
272 | { | ||
273 | unsigned long flags; | ||
274 | |||
275 | spin_lock_irqsave(&bufferlock, flags); | ||
276 | if (eerb->residual > 0) | ||
277 | eerb->residual = -1; | ||
278 | eerb->tail=0; | ||
279 | eerb->head=0; | ||
280 | spin_unlock_irqrestore(&bufferlock, flags); | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * set the size of the buffer, newsize is the new number of pages to be used | ||
285 | * we don't try to copy any data back an forth, so any resize will also purge | ||
286 | * the buffer | ||
287 | */ | ||
288 | static int | ||
289 | dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize) | ||
290 | { | ||
291 | int i, oldcount, reuse; | ||
292 | char **new; | ||
293 | char **old; | ||
294 | unsigned long flags; | ||
295 | |||
296 | if (newsize < 1) | ||
297 | return -EINVAL; | ||
298 | if (eerb->buffer_page_count == newsize) { | ||
299 | /* documented behaviour is that any successfull invocation | ||
300 | * will purge all records */ | ||
301 | dasd_eer_purge_buffer(eerb); | ||
302 | return 0; | ||
303 | } | ||
304 | new = kmalloc(newsize*sizeof(char*), GFP_KERNEL); | ||
305 | if (!new) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | reuse=min(eerb->buffer_page_count, newsize); | ||
309 | for (i = 0; i < reuse; ++i) { | ||
310 | new[i] = eerb->buffer[i]; | ||
311 | } | ||
312 | if (eerb->buffer_page_count < newsize) { | ||
313 | if (dasd_eer_allocate_buffer_pages( | ||
314 | &new[eerb->buffer_page_count], | ||
315 | newsize - eerb->buffer_page_count)) { | ||
316 | kfree(new); | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | spin_lock_irqsave(&bufferlock, flags); | ||
322 | old = eerb->buffer; | ||
323 | eerb->buffer = new; | ||
324 | if (eerb->residual > 0) | ||
325 | eerb->residual = -1; | ||
326 | eerb->tail = 0; | ||
327 | eerb->head = 0; | ||
328 | oldcount = eerb->buffer_page_count; | ||
329 | eerb->buffer_page_count = newsize; | ||
330 | spin_unlock_irqrestore(&bufferlock, flags); | ||
331 | |||
332 | if (oldcount > newsize) { | ||
333 | for (i = newsize; i < oldcount; ++i) { | ||
334 | free_page((unsigned long)old[i]); | ||
335 | } | ||
336 | } | ||
337 | kfree(old); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | |||
343 | /*****************************************************************************/ | ||
344 | /* The extended error reporting functionality */ | ||
345 | /*****************************************************************************/ | ||
346 | |||
347 | /* | ||
348 | * When a DASD device driver wants to report an error, it calls the | ||
349 | * function dasd_eer_write_trigger (via a notifier mechanism) and gives the | ||
350 | * respective trigger ID as parameter. | ||
351 | * Currently there are four kinds of triggers: | ||
352 | * | ||
353 | * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems | ||
354 | * DASD_EER_PPRCSUSPEND: PPRC was suspended | ||
355 | * DASD_EER_NOPATH: There is no path to the device left. | ||
356 | * DASD_EER_STATECHANGE: The state of the device has changed. | ||
357 | * | ||
358 | * For the first three triggers all required information can be supplied by | ||
359 | * the caller. For these triggers a record is written by the function | ||
360 | * dasd_eer_write_standard_trigger. | ||
361 | * | ||
362 | * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE | ||
363 | * trigger, we have to gather the necessary sense data first. We cannot queue | ||
364 | * the necessary SNSS (sense subsystem status) request immediatly, since we | ||
365 | * are likely to run in a deadlock situation. Instead, we schedule a | ||
366 | * work_struct that calls the function dasd_eer_sense_subsystem_status to | ||
367 | * create and start an SNSS request asynchronously. | ||
368 | * | ||
369 | * To avoid memory allocations at runtime, the necessary memory is allocated | ||
370 | * when the extended error reporting is enabled for a device (by | ||
371 | * dasd_eer_probe). There is one private eer data structure for each eer | ||
372 | * enabled DASD device. It contains memory for the work_struct, one SNSS cqr | ||
373 | * and a flags field that is used to coordinate the use of the cqr. The call | ||
374 | * to write a state change trigger can come in at any time, so we have one flag | ||
375 | * CQR_IN_USE that protects the cqr itself. When this flag indicates that the | ||
376 | * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a | ||
377 | * second request but sets the SNSS_REQUESTED flag instead. | ||
378 | * | ||
379 | * When the request is finished, the callback function dasd_eer_SNSS_cb | ||
380 | * is called. This function will invoke the function | ||
381 | * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also | ||
382 | * check the SNSS_REQUESTED flag and if it is set it will call | ||
383 | * dasd_eer_sense_subsystem_status again. | ||
384 | * | ||
385 | * To avoid race conditions during the handling of the lock, the flags must | ||
386 | * be protected by the snsslock. | ||
387 | */ | ||
388 | |||
389 | struct dasd_eer_private { | ||
390 | struct dasd_ccw_req *cqr; | ||
391 | unsigned long flags; | ||
392 | struct work_struct worker; | ||
393 | }; | ||
394 | |||
395 | static void dasd_eer_destroy(struct dasd_device *device, | ||
396 | struct dasd_eer_private *eer); | ||
397 | static int | ||
398 | dasd_eer_write_trigger(struct dasd_eer_trigger *trigger); | ||
399 | static void dasd_eer_sense_subsystem_status(void *data); | ||
400 | static int dasd_eer_notify(struct notifier_block *self, | ||
401 | unsigned long action, void *data); | ||
402 | |||
403 | struct workqueue_struct *dasd_eer_workqueue; | ||
404 | |||
405 | #define SNSS_DATA_SIZE 44 | ||
406 | static spinlock_t snsslock = SPIN_LOCK_UNLOCKED; | ||
407 | |||
408 | #define DASD_EER_BUSID_SIZE 10 | ||
409 | struct dasd_eer_header { | ||
410 | __u32 total_size; | ||
411 | __u32 trigger; | ||
412 | __u64 tv_sec; | ||
413 | __u64 tv_usec; | ||
414 | char busid[DASD_EER_BUSID_SIZE]; | ||
415 | } __attribute__ ((packed)); | ||
416 | |||
417 | static struct notifier_block dasd_eer_nb = { | ||
418 | .notifier_call = dasd_eer_notify, | ||
419 | }; | ||
420 | |||
421 | /* | ||
422 | * flags for use with dasd_eer_private | ||
423 | */ | ||
424 | #define CQR_IN_USE 0 | ||
425 | #define SNSS_REQUESTED 1 | ||
426 | |||
427 | /* | ||
428 | * This function checks if extended error reporting is available for a given | ||
429 | * dasd_device. If yes, then it creates and returns a struct dasd_eer, | ||
430 | * otherwise it returns an -EPERM error pointer. | ||
431 | */ | ||
432 | struct dasd_eer_private * | ||
433 | dasd_eer_probe(struct dasd_device *device) | ||
434 | { | ||
435 | struct dasd_eer_private *private; | ||
436 | |||
437 | if (!(device && device->discipline | ||
438 | && !strcmp(device->discipline->name, "ECKD"))) { | ||
439 | return ERR_PTR(-EPERM); | ||
440 | } | ||
441 | /* allocate the private data structure */ | ||
442 | private = (struct dasd_eer_private *)kmalloc( | ||
443 | sizeof(struct dasd_eer_private), GFP_KERNEL); | ||
444 | if (!private) { | ||
445 | return ERR_PTR(-ENOMEM); | ||
446 | } | ||
447 | INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status, | ||
448 | (void *)device); | ||
449 | private->cqr = dasd_kmalloc_request("ECKD", | ||
450 | 1 /* SNSS */ , | ||
451 | SNSS_DATA_SIZE , | ||
452 | device); | ||
453 | if (!private->cqr) { | ||
454 | kfree(private); | ||
455 | return ERR_PTR(-ENOMEM); | ||
456 | } | ||
457 | private->flags = 0; | ||
458 | return private; | ||
459 | }; | ||
460 | |||
461 | /* | ||
462 | * If our private SNSS request is queued, remove it from the | ||
463 | * dasd ccw queue so we can free the requests memory. | ||
464 | */ | ||
465 | static void | ||
466 | dasd_eer_dequeue_SNSS_request(struct dasd_device *device, | ||
467 | struct dasd_eer_private *eer) | ||
468 | { | ||
469 | struct list_head *lst, *nxt; | ||
470 | struct dasd_ccw_req *cqr, *erpcqr; | ||
471 | dasd_erp_fn_t erp_fn; | ||
472 | |||
473 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
474 | list_for_each_safe(lst, nxt, &device->ccw_queue) { | ||
475 | cqr = list_entry(lst, struct dasd_ccw_req, list); | ||
476 | /* we are looking for two kinds or requests */ | ||
477 | /* first kind: our SNSS request: */ | ||
478 | if (cqr == eer->cqr) { | ||
479 | if (cqr->status == DASD_CQR_IN_IO) | ||
480 | device->discipline->term_IO(cqr); | ||
481 | list_del(&cqr->list); | ||
482 | break; | ||
483 | } | ||
484 | /* second kind: ERP requests for our SNSS request */ | ||
485 | if (cqr->refers) { | ||
486 | /* If this erp request chain ends in our cqr, then */ | ||
487 | /* cal the erp_postaction to clean it up */ | ||
488 | erpcqr = cqr; | ||
489 | while (erpcqr->refers) { | ||
490 | erpcqr = erpcqr->refers; | ||
491 | } | ||
492 | if (erpcqr == eer->cqr) { | ||
493 | erp_fn = device->discipline->erp_postaction( | ||
494 | cqr); | ||
495 | erp_fn(cqr); | ||
496 | } | ||
497 | continue; | ||
498 | } | ||
499 | } | ||
500 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * This function dismantles a struct dasd_eer that was created by | ||
505 | * dasd_eer_probe. Since we want to free our private data structure, | ||
506 | * we must make sure that the memory is not in use anymore. | ||
507 | * We have to flush the work queue and remove a possible SNSS request | ||
508 | * from the dasd queue. | ||
509 | */ | ||
510 | static void | ||
511 | dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer) | ||
512 | { | ||
513 | flush_workqueue(dasd_eer_workqueue); | ||
514 | dasd_eer_dequeue_SNSS_request(device, eer); | ||
515 | dasd_kfree_request(eer->cqr, device); | ||
516 | kfree(eer); | ||
517 | }; | ||
518 | |||
519 | /* | ||
520 | * enable the extended error reporting for a particular device | ||
521 | */ | ||
522 | static int | ||
523 | dasd_eer_enable_on_device(struct dasd_device *device) | ||
524 | { | ||
525 | void *eer; | ||
526 | if (!device) | ||
527 | return -ENODEV; | ||
528 | if (device->eer) | ||
529 | return 0; | ||
530 | if (!try_module_get(THIS_MODULE)) { | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | eer = (void *)dasd_eer_probe(device); | ||
534 | if (IS_ERR(eer)) { | ||
535 | module_put(THIS_MODULE); | ||
536 | return PTR_ERR(eer); | ||
537 | } | ||
538 | device->eer = eer; | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * enable the extended error reporting for a particular device | ||
544 | */ | ||
545 | static int | ||
546 | dasd_eer_disable_on_device(struct dasd_device *device) | ||
547 | { | ||
548 | struct dasd_eer_private *eer = device->eer; | ||
549 | |||
550 | if (!device) | ||
551 | return -ENODEV; | ||
552 | if (!device->eer) | ||
553 | return 0; | ||
554 | device->eer = NULL; | ||
555 | dasd_eer_destroy(device,eer); | ||
556 | module_put(THIS_MODULE); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Set extended error reporting (eer) | ||
563 | * Note: This will be registered as a DASD ioctl, to be called on DASD devices. | ||
564 | */ | ||
565 | static int | ||
566 | dasd_ioctl_set_eer(struct block_device *bdev, int no, long args) | ||
567 | { | ||
568 | struct dasd_device *device; | ||
569 | int intval; | ||
570 | |||
571 | if (!capable(CAP_SYS_ADMIN)) | ||
572 | return -EACCES; | ||
573 | if (bdev != bdev->bd_contains) | ||
574 | /* Error-reporting is not allowed for partitions */ | ||
575 | return -EINVAL; | ||
576 | if (get_user(intval, (int __user *) args)) | ||
577 | return -EFAULT; | ||
578 | device = bdev->bd_disk->private_data; | ||
579 | if (device == NULL) | ||
580 | return -ENODEV; | ||
581 | |||
582 | intval = (intval != 0); | ||
583 | DEV_MESSAGE (KERN_DEBUG, device, | ||
584 | "set eer on device to %d", intval); | ||
585 | if (intval) | ||
586 | return dasd_eer_enable_on_device(device); | ||
587 | else | ||
588 | return dasd_eer_disable_on_device(device); | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Get value of extended error reporting. | ||
593 | * Note: This will be registered as a DASD ioctl, to be called on DASD devices. | ||
594 | */ | ||
595 | static int | ||
596 | dasd_ioctl_get_eer(struct block_device *bdev, int no, long args) | ||
597 | { | ||
598 | struct dasd_device *device; | ||
599 | |||
600 | device = bdev->bd_disk->private_data; | ||
601 | if (device == NULL) | ||
602 | return -ENODEV; | ||
603 | return put_user((device->eer != NULL), (int __user *) args); | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * The following function can be used for those triggers that have | ||
608 | * all necessary data available when the function is called. | ||
609 | * If the parameter cqr is not NULL, the chain of requests will be searched | ||
610 | * for valid sense data, and all valid sense data sets will be added to | ||
611 | * the triggers data. | ||
612 | */ | ||
613 | static int | ||
614 | dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device, | ||
615 | struct dasd_ccw_req *cqr) | ||
616 | { | ||
617 | struct dasd_ccw_req *temp_cqr; | ||
618 | int data_size; | ||
619 | struct timeval tv; | ||
620 | struct dasd_eer_header header; | ||
621 | unsigned long flags; | ||
622 | struct eerbuffer *eerb; | ||
623 | |||
624 | /* go through cqr chain and count the valid sense data sets */ | ||
625 | temp_cqr = cqr; | ||
626 | data_size = 0; | ||
627 | while (temp_cqr) { | ||
628 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
629 | data_size += 32; | ||
630 | temp_cqr = temp_cqr->refers; | ||
631 | } | ||
632 | |||
633 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
634 | header.trigger = trigger; | ||
635 | do_gettimeofday(&tv); | ||
636 | header.tv_sec = tv.tv_sec; | ||
637 | header.tv_usec = tv.tv_usec; | ||
638 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
639 | |||
640 | spin_lock_irqsave(&bufferlock, flags); | ||
641 | list_for_each_entry(eerb, &bufferlist, list) { | ||
642 | dasd_eer_start_record(eerb, header.total_size); | ||
643 | dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header)); | ||
644 | temp_cqr = cqr; | ||
645 | while (temp_cqr) { | ||
646 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
647 | dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw); | ||
648 | temp_cqr = temp_cqr->refers; | ||
649 | } | ||
650 | dasd_eer_write_buffer(eerb, 4,"EOR"); | ||
651 | } | ||
652 | spin_unlock_irqrestore(&bufferlock, flags); | ||
653 | |||
654 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
655 | |||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * This function writes a DASD_EER_STATECHANGE trigger. | ||
661 | */ | ||
662 | static void | ||
663 | dasd_eer_write_SNSS_trigger(struct dasd_device *device, | ||
664 | struct dasd_ccw_req *cqr) | ||
665 | { | ||
666 | int data_size; | ||
667 | int snss_rc; | ||
668 | struct timeval tv; | ||
669 | struct dasd_eer_header header; | ||
670 | unsigned long flags; | ||
671 | struct eerbuffer *eerb; | ||
672 | |||
673 | snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | ||
674 | if (snss_rc) | ||
675 | data_size = 0; | ||
676 | else | ||
677 | data_size = SNSS_DATA_SIZE; | ||
678 | |||
679 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
680 | header.trigger = DASD_EER_STATECHANGE; | ||
681 | do_gettimeofday(&tv); | ||
682 | header.tv_sec = tv.tv_sec; | ||
683 | header.tv_usec = tv.tv_usec; | ||
684 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
685 | |||
686 | spin_lock_irqsave(&bufferlock, flags); | ||
687 | list_for_each_entry(eerb, &bufferlist, list) { | ||
688 | dasd_eer_start_record(eerb, header.total_size); | ||
689 | dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header)); | ||
690 | if (!snss_rc) | ||
691 | dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data); | ||
692 | dasd_eer_write_buffer(eerb, 4,"EOR"); | ||
693 | } | ||
694 | spin_unlock_irqrestore(&bufferlock, flags); | ||
695 | |||
696 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * callback function for use with SNSS request | ||
701 | */ | ||
702 | static void | ||
703 | dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data) | ||
704 | { | ||
705 | struct dasd_device *device; | ||
706 | struct dasd_eer_private *private; | ||
707 | unsigned long irqflags; | ||
708 | |||
709 | device = (struct dasd_device *)data; | ||
710 | private = (struct dasd_eer_private *)device->eer; | ||
711 | dasd_eer_write_SNSS_trigger(device, cqr); | ||
712 | spin_lock_irqsave(&snsslock, irqflags); | ||
713 | if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) { | ||
714 | clear_bit(CQR_IN_USE, &private->flags); | ||
715 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
716 | return; | ||
717 | }; | ||
718 | clear_bit(CQR_IN_USE, &private->flags); | ||
719 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
720 | dasd_eer_sense_subsystem_status(device); | ||
721 | return; | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * clean a used cqr before using it again | ||
726 | */ | ||
727 | static void | ||
728 | dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr) | ||
729 | { | ||
730 | struct ccw1 *cpaddr = cqr->cpaddr; | ||
731 | void *data = cqr->data; | ||
732 | |||
733 | memset(cqr, 0, sizeof(struct dasd_ccw_req)); | ||
734 | memset(cpaddr, 0, sizeof(struct ccw1)); | ||
735 | memset(data, 0, SNSS_DATA_SIZE); | ||
736 | cqr->cpaddr = cpaddr; | ||
737 | cqr->data = data; | ||
738 | strncpy((char *) &cqr->magic, "ECKD", 4); | ||
739 | ASCEBC((char *) &cqr->magic, 4); | ||
740 | set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | ||
741 | } | ||
742 | |||
743 | /* | ||
744 | * build and start an SNSS request | ||
745 | * This function is called from a work queue so we have to | ||
746 | * pass the dasd_device pointer as a void pointer. | ||
747 | */ | ||
748 | static void | ||
749 | dasd_eer_sense_subsystem_status(void *data) | ||
750 | { | ||
751 | struct dasd_device *device; | ||
752 | struct dasd_eer_private *private; | ||
753 | struct dasd_ccw_req *cqr; | ||
754 | struct ccw1 *ccw; | ||
755 | unsigned long irqflags; | ||
756 | |||
757 | device = (struct dasd_device *)data; | ||
758 | private = (struct dasd_eer_private *)device->eer; | ||
759 | if (!private) /* device not eer enabled any more */ | ||
760 | return; | ||
761 | cqr = private->cqr; | ||
762 | spin_lock_irqsave(&snsslock, irqflags); | ||
763 | if(test_and_set_bit(CQR_IN_USE, &private->flags)) { | ||
764 | set_bit(SNSS_REQUESTED, &private->flags); | ||
765 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
766 | return; | ||
767 | }; | ||
768 | spin_unlock_irqrestore(&snsslock, irqflags); | ||
769 | dasd_eer_clean_SNSS_request(cqr); | ||
770 | cqr->device = device; | ||
771 | cqr->retries = 255; | ||
772 | cqr->expires = 10 * HZ; | ||
773 | |||
774 | ccw = cqr->cpaddr; | ||
775 | ccw->cmd_code = DASD_ECKD_CCW_SNSS; | ||
776 | ccw->count = SNSS_DATA_SIZE; | ||
777 | ccw->flags = 0; | ||
778 | ccw->cda = (__u32)(addr_t)cqr->data; | ||
779 | |||
780 | cqr->buildclk = get_clock(); | ||
781 | cqr->status = DASD_CQR_FILLED; | ||
782 | cqr->callback = dasd_eer_SNSS_cb; | ||
783 | cqr->callback_data = (void *)device; | ||
784 | dasd_add_request_head(cqr); | ||
785 | |||
786 | return; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * This function is called for all triggers. It calls the appropriate | ||
791 | * function that writes the actual trigger records. | ||
792 | */ | ||
793 | static int | ||
794 | dasd_eer_write_trigger(struct dasd_eer_trigger *trigger) | ||
795 | { | ||
796 | int rc; | ||
797 | struct dasd_eer_private *private = trigger->device->eer; | ||
798 | |||
799 | switch (trigger->id) { | ||
800 | case DASD_EER_FATALERROR: | ||
801 | case DASD_EER_PPRCSUSPEND: | ||
802 | rc = dasd_eer_write_standard_trigger( | ||
803 | trigger->id, trigger->device, trigger->cqr); | ||
804 | break; | ||
805 | case DASD_EER_NOPATH: | ||
806 | rc = dasd_eer_write_standard_trigger( | ||
807 | trigger->id, trigger->device, NULL); | ||
808 | break; | ||
809 | case DASD_EER_STATECHANGE: | ||
810 | if (queue_work(dasd_eer_workqueue, &private->worker)) { | ||
811 | rc=0; | ||
812 | } else { | ||
813 | /* If the work_struct was already queued, it can't | ||
814 | * be queued again. But this is OK since we don't | ||
815 | * need to have it queued twice. | ||
816 | */ | ||
817 | rc = -EBUSY; | ||
818 | } | ||
819 | break; | ||
820 | default: /* unknown trigger, so we write it without any sense data */ | ||
821 | rc = dasd_eer_write_standard_trigger( | ||
822 | trigger->id, trigger->device, NULL); | ||
823 | break; | ||
824 | } | ||
825 | return rc; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * This function is registered with the dasd device driver and gets called | ||
830 | * for all dasd eer notifications. | ||
831 | */ | ||
832 | static int dasd_eer_notify(struct notifier_block *self, | ||
833 | unsigned long action, void *data) | ||
834 | { | ||
835 | switch (action) { | ||
836 | case DASD_EER_DISABLE: | ||
837 | dasd_eer_disable_on_device((struct dasd_device *)data); | ||
838 | break; | ||
839 | case DASD_EER_TRIGGER: | ||
840 | dasd_eer_write_trigger((struct dasd_eer_trigger *)data); | ||
841 | break; | ||
842 | } | ||
843 | return NOTIFY_OK; | ||
844 | } | ||
845 | |||
846 | |||
847 | /*****************************************************************************/ | ||
848 | /* the device operations */ | ||
849 | /*****************************************************************************/ | ||
850 | |||
851 | /* | ||
852 | * On the one side we need a lock to access our internal buffer, on the | ||
853 | * other side a copy_to_user can sleep. So we need to copy the data we have | ||
854 | * to transfer in a readbuffer, which is protected by the readbuffer_mutex. | ||
855 | */ | ||
856 | static char readbuffer[PAGE_SIZE]; | ||
857 | DECLARE_MUTEX(readbuffer_mutex); | ||
858 | |||
859 | |||
860 | static int | ||
861 | dasd_eer_open(struct inode *inp, struct file *filp) | ||
862 | { | ||
863 | struct eerbuffer *eerb; | ||
864 | unsigned long flags; | ||
865 | |||
866 | eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL); | ||
867 | eerb->head = 0; | ||
868 | eerb->tail = 0; | ||
869 | eerb->residual = 0; | ||
870 | eerb->buffer_page_count = 1; | ||
871 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; | ||
872 | eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*), | ||
873 | GFP_KERNEL); | ||
874 | if (!eerb->buffer) | ||
875 | return -ENOMEM; | ||
876 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, | ||
877 | eerb->buffer_page_count)) { | ||
878 | kfree(eerb->buffer); | ||
879 | return -ENOMEM; | ||
880 | } | ||
881 | filp->private_data = eerb; | ||
882 | spin_lock_irqsave(&bufferlock, flags); | ||
883 | list_add(&eerb->list, &bufferlist); | ||
884 | spin_unlock_irqrestore(&bufferlock, flags); | ||
885 | |||
886 | return nonseekable_open(inp,filp); | ||
887 | } | ||
888 | |||
889 | static int | ||
890 | dasd_eer_close(struct inode *inp, struct file *filp) | ||
891 | { | ||
892 | struct eerbuffer *eerb; | ||
893 | unsigned long flags; | ||
894 | |||
895 | eerb = (struct eerbuffer *)filp->private_data; | ||
896 | spin_lock_irqsave(&bufferlock, flags); | ||
897 | list_del(&eerb->list); | ||
898 | spin_unlock_irqrestore(&bufferlock, flags); | ||
899 | dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); | ||
900 | kfree(eerb->buffer); | ||
901 | kfree(eerb); | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | static long | ||
907 | dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
908 | { | ||
909 | int intval; | ||
910 | struct eerbuffer *eerb; | ||
911 | |||
912 | eerb = (struct eerbuffer *)filp->private_data; | ||
913 | switch (cmd) { | ||
914 | case DASD_EER_PURGE: | ||
915 | dasd_eer_purge_buffer(eerb); | ||
916 | return 0; | ||
917 | case DASD_EER_SETBUFSIZE: | ||
918 | if (get_user(intval, (int __user *)arg)) | ||
919 | return -EFAULT; | ||
920 | return dasd_eer_resize_buffer(eerb, intval); | ||
921 | default: | ||
922 | return -ENOIOCTLCMD; | ||
923 | } | ||
924 | } | ||
925 | |||
926 | static ssize_t | ||
927 | dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | ||
928 | { | ||
929 | int tc,rc; | ||
930 | int tailcount,effective_count; | ||
931 | unsigned long flags; | ||
932 | struct eerbuffer *eerb; | ||
933 | |||
934 | eerb = (struct eerbuffer *)filp->private_data; | ||
935 | if(down_interruptible(&readbuffer_mutex)) | ||
936 | return -ERESTARTSYS; | ||
937 | |||
938 | spin_lock_irqsave(&bufferlock, flags); | ||
939 | |||
940 | if (eerb->residual < 0) { /* the remainder of this record */ | ||
941 | /* has been deleted */ | ||
942 | eerb->residual = 0; | ||
943 | spin_unlock_irqrestore(&bufferlock, flags); | ||
944 | up(&readbuffer_mutex); | ||
945 | return -EIO; | ||
946 | } else if (eerb->residual > 0) { | ||
947 | /* OK we still have a second half of a record to deliver */ | ||
948 | effective_count = min(eerb->residual, (int)count); | ||
949 | eerb->residual -= effective_count; | ||
950 | } else { | ||
951 | tc = 0; | ||
952 | while (!tc) { | ||
953 | tc = dasd_eer_read_buffer(eerb, | ||
954 | sizeof(tailcount), (char*)(&tailcount)); | ||
955 | if (!tc) { | ||
956 | /* no data available */ | ||
957 | spin_unlock_irqrestore(&bufferlock, flags); | ||
958 | up(&readbuffer_mutex); | ||
959 | if (filp->f_flags & O_NONBLOCK) | ||
960 | return -EAGAIN; | ||
961 | rc = wait_event_interruptible( | ||
962 | dasd_eer_read_wait_queue, | ||
963 | eerb->head != eerb->tail); | ||
964 | if (rc) { | ||
965 | return rc; | ||
966 | } | ||
967 | if(down_interruptible(&readbuffer_mutex)) | ||
968 | return -ERESTARTSYS; | ||
969 | spin_lock_irqsave(&bufferlock, flags); | ||
970 | } | ||
971 | } | ||
972 | WARN_ON(tc != sizeof(tailcount)); | ||
973 | effective_count = min(tailcount,(int)count); | ||
974 | eerb->residual = tailcount - effective_count; | ||
975 | } | ||
976 | |||
977 | tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer); | ||
978 | WARN_ON(tc != effective_count); | ||
979 | |||
980 | spin_unlock_irqrestore(&bufferlock, flags); | ||
981 | |||
982 | if (copy_to_user(buf, readbuffer, effective_count)) { | ||
983 | up(&readbuffer_mutex); | ||
984 | return -EFAULT; | ||
985 | } | ||
986 | |||
987 | up(&readbuffer_mutex); | ||
988 | return effective_count; | ||
989 | } | ||
990 | |||
991 | static unsigned int | ||
992 | dasd_eer_poll (struct file *filp, poll_table *ptable) | ||
993 | { | ||
994 | unsigned int mask; | ||
995 | unsigned long flags; | ||
996 | struct eerbuffer *eerb; | ||
997 | |||
998 | eerb = (struct eerbuffer *)filp->private_data; | ||
999 | poll_wait(filp, &dasd_eer_read_wait_queue, ptable); | ||
1000 | spin_lock_irqsave(&bufferlock, flags); | ||
1001 | if (eerb->head != eerb->tail) | ||
1002 | mask = POLLIN | POLLRDNORM ; | ||
1003 | else | ||
1004 | mask = 0; | ||
1005 | spin_unlock_irqrestore(&bufferlock, flags); | ||
1006 | return mask; | ||
1007 | } | ||
1008 | |||
1009 | static struct file_operations dasd_eer_fops = { | ||
1010 | .open = &dasd_eer_open, | ||
1011 | .release = &dasd_eer_close, | ||
1012 | .unlocked_ioctl = &dasd_eer_ioctl, | ||
1013 | .compat_ioctl = &dasd_eer_ioctl, | ||
1014 | .read = &dasd_eer_read, | ||
1015 | .poll = &dasd_eer_poll, | ||
1016 | .owner = THIS_MODULE, | ||
1017 | }; | ||
1018 | |||
1019 | static struct miscdevice dasd_eer_dev = { | ||
1020 | .minor = MISC_DYNAMIC_MINOR, | ||
1021 | .name = "dasd_eer", | ||
1022 | .fops = &dasd_eer_fops, | ||
1023 | }; | ||
1024 | |||
1025 | |||
1026 | /*****************************************************************************/ | ||
1027 | /* Init and exit */ | ||
1028 | /*****************************************************************************/ | ||
1029 | |||
1030 | static int | ||
1031 | __init dasd_eer_init(void) | ||
1032 | { | ||
1033 | int rc; | ||
1034 | |||
1035 | dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer"); | ||
1036 | if (!dasd_eer_workqueue) { | ||
1037 | MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not " | ||
1038 | "create workqueue \n"); | ||
1039 | rc = -ENOMEM; | ||
1040 | goto out; | ||
1041 | } | ||
1042 | |||
1043 | rc = dasd_register_eer_notifier(&dasd_eer_nb); | ||
1044 | if (rc) { | ||
1045 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
1046 | "register error reporting"); | ||
1047 | goto queue; | ||
1048 | } | ||
1049 | |||
1050 | dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer); | ||
1051 | dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer); | ||
1052 | |||
1053 | /* we don't need our own character device, | ||
1054 | * so we just register as misc device */ | ||
1055 | rc = misc_register(&dasd_eer_dev); | ||
1056 | if (rc) { | ||
1057 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
1058 | "register misc device"); | ||
1059 | goto unregister; | ||
1060 | } | ||
1061 | |||
1062 | return 0; | ||
1063 | |||
1064 | unregister: | ||
1065 | dasd_unregister_eer_notifier(&dasd_eer_nb); | ||
1066 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, | ||
1067 | dasd_ioctl_set_eer); | ||
1068 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, | ||
1069 | dasd_ioctl_get_eer); | ||
1070 | queue: | ||
1071 | destroy_workqueue(dasd_eer_workqueue); | ||
1072 | out: | ||
1073 | return rc; | ||
1074 | |||
1075 | } | ||
1076 | module_init(dasd_eer_init); | ||
1077 | |||
1078 | static void | ||
1079 | __exit dasd_eer_exit(void) | ||
1080 | { | ||
1081 | dasd_unregister_eer_notifier(&dasd_eer_nb); | ||
1082 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, | ||
1083 | dasd_ioctl_set_eer); | ||
1084 | dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, | ||
1085 | dasd_ioctl_get_eer); | ||
1086 | destroy_workqueue(dasd_eer_workqueue); | ||
1087 | |||
1088 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | ||
1089 | } | ||
1090 | module_exit(dasd_eer_exit); | ||
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index c20af9874500..d1b08fa13fd2 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -275,6 +275,34 @@ struct dasd_discipline { | |||
275 | 275 | ||
276 | extern struct dasd_discipline *dasd_diag_discipline_pointer; | 276 | extern struct dasd_discipline *dasd_diag_discipline_pointer; |
277 | 277 | ||
278 | |||
279 | /* | ||
280 | * Notification numbers for extended error reporting notifications: | ||
281 | * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's | ||
282 | * eer pointer) is freed. The error reporting module needs to do all necessary | ||
283 | * cleanup steps. | ||
284 | * The DASD_EER_TRIGGER notification sends the actual error reports (triggers). | ||
285 | */ | ||
286 | #define DASD_EER_DISABLE 0 | ||
287 | #define DASD_EER_TRIGGER 1 | ||
288 | |||
289 | /* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */ | ||
290 | #define DASD_EER_FATALERROR 1 | ||
291 | #define DASD_EER_NOPATH 2 | ||
292 | #define DASD_EER_STATECHANGE 3 | ||
293 | #define DASD_EER_PPRCSUSPEND 4 | ||
294 | |||
295 | /* | ||
296 | * The dasd_eer_trigger structure contains all data that we need to send | ||
297 | * along with an DASD_EER_TRIGGER notification. | ||
298 | */ | ||
299 | struct dasd_eer_trigger { | ||
300 | unsigned int id; | ||
301 | struct dasd_device *device; | ||
302 | struct dasd_ccw_req *cqr; | ||
303 | }; | ||
304 | |||
305 | |||
278 | struct dasd_device { | 306 | struct dasd_device { |
279 | /* Block device stuff. */ | 307 | /* Block device stuff. */ |
280 | struct gendisk *gdp; | 308 | struct gendisk *gdp; |
@@ -288,6 +316,9 @@ struct dasd_device { | |||
288 | unsigned long flags; /* per device flags */ | 316 | unsigned long flags; /* per device flags */ |
289 | unsigned short features; /* copy of devmap-features (read-only!) */ | 317 | unsigned short features; /* copy of devmap-features (read-only!) */ |
290 | 318 | ||
319 | /* extended error reporting stuff (eer) */ | ||
320 | void *eer; | ||
321 | |||
291 | /* Device discipline stuff. */ | 322 | /* Device discipline stuff. */ |
292 | struct dasd_discipline *discipline; | 323 | struct dasd_discipline *discipline; |
293 | char *private; | 324 | char *private; |
@@ -488,6 +519,12 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); | |||
488 | int dasd_generic_set_offline (struct ccw_device *cdev); | 519 | int dasd_generic_set_offline (struct ccw_device *cdev); |
489 | int dasd_generic_notify(struct ccw_device *, int); | 520 | int dasd_generic_notify(struct ccw_device *, int); |
490 | void dasd_generic_auto_online (struct ccw_driver *); | 521 | void dasd_generic_auto_online (struct ccw_driver *); |
522 | int dasd_register_eer_notifier(struct notifier_block *); | ||
523 | int dasd_unregister_eer_notifier(struct notifier_block *); | ||
524 | void dasd_write_eer_trigger(unsigned int , struct dasd_device *, | ||
525 | struct dasd_ccw_req *); | ||
526 | |||
527 | |||
491 | 528 | ||
492 | /* externals in dasd_devmap.c */ | 529 | /* externals in dasd_devmap.c */ |
493 | extern int dasd_max_devindex; | 530 | extern int dasd_max_devindex; |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 44e4b4bb1c5a..3e75095f35d0 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -68,6 +68,6 @@ extern void *chsc_get_chp_desc(struct subchannel*, int); | |||
68 | 68 | ||
69 | extern int chsc_enable_facility(int); | 69 | extern int chsc_enable_facility(int); |
70 | 70 | ||
71 | #define to_channelpath(dev) container_of(dev, struct channel_path, dev) | 71 | #define to_channelpath(device) container_of(device, struct channel_path, dev) |
72 | 72 | ||
73 | #endif | 73 | #endif |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 0d38f0f2ae29..ee4265d7a8c9 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -892,20 +892,20 @@ config SERIAL_VR41XX_CONSOLE | |||
892 | a console on a serial port, say Y. Otherwise, say N. | 892 | a console on a serial port, say Y. Otherwise, say N. |
893 | 893 | ||
894 | config SERIAL_JSM | 894 | config SERIAL_JSM |
895 | tristate "Digi International NEO PCI Support" | 895 | tristate "Digi International NEO PCI Support" |
896 | depends on PCI && BROKEN | 896 | depends on PCI |
897 | select SERIAL_CORE | 897 | select SERIAL_CORE |
898 | help | 898 | help |
899 | This is a driver for Digi International's Neo series | 899 | This is a driver for Digi International's Neo series |
900 | of cards which provide multiple serial ports. You would need | 900 | of cards which provide multiple serial ports. You would need |
901 | something like this to connect more than two modems to your Linux | 901 | something like this to connect more than two modems to your Linux |
902 | box, for instance in order to become a dial-in server. This driver | 902 | box, for instance in order to become a dial-in server. This driver |
903 | supports PCI boards only. | 903 | supports PCI boards only. |
904 | If you have a card like this, say Y here and read the file | 904 | If you have a card like this, say Y here and read the file |
905 | <file:Documentation/jsm.txt>. | 905 | <file:Documentation/jsm.txt>. |
906 | 906 | ||
907 | To compile this driver as a module, choose M here: the | 907 | To compile this driver as a module, choose M here: the |
908 | module will be called jsm. | 908 | module will be called jsm. |
909 | 909 | ||
910 | config SERIAL_SGI_IOC4 | 910 | config SERIAL_SGI_IOC4 |
911 | tristate "SGI IOC4 controller serial support" | 911 | tristate "SGI IOC4 controller serial support" |
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h index 18753193f59b..dfc1e86d3aa1 100644 --- a/drivers/serial/jsm/jsm.h +++ b/drivers/serial/jsm/jsm.h | |||
@@ -380,7 +380,6 @@ struct neo_uart_struct { | |||
380 | extern struct uart_driver jsm_uart_driver; | 380 | extern struct uart_driver jsm_uart_driver; |
381 | extern struct board_ops jsm_neo_ops; | 381 | extern struct board_ops jsm_neo_ops; |
382 | extern int jsm_debug; | 382 | extern int jsm_debug; |
383 | extern int jsm_rawreadok; | ||
384 | 383 | ||
385 | /************************************************************************* | 384 | /************************************************************************* |
386 | * | 385 | * |
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c index 7e56c7824194..b1b66e71d281 100644 --- a/drivers/serial/jsm/jsm_driver.c +++ b/drivers/serial/jsm/jsm_driver.c | |||
@@ -49,11 +49,8 @@ struct uart_driver jsm_uart_driver = { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | int jsm_debug; | 51 | int jsm_debug; |
52 | int jsm_rawreadok; | ||
53 | module_param(jsm_debug, int, 0); | 52 | module_param(jsm_debug, int, 0); |
54 | module_param(jsm_rawreadok, int, 0); | ||
55 | MODULE_PARM_DESC(jsm_debug, "Driver debugging level"); | 53 | MODULE_PARM_DESC(jsm_debug, "Driver debugging level"); |
56 | MODULE_PARM_DESC(jsm_rawreadok, "Bypass flip buffers on input"); | ||
57 | 54 | ||
58 | static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 55 | static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
59 | { | 56 | { |
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index 6fa0d62d6f68..4d48b625cd3d 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c | |||
@@ -20,8 +20,10 @@ | |||
20 | * | 20 | * |
21 | * Contact Information: | 21 | * Contact Information: |
22 | * Scott H Kilau <Scott_Kilau@digi.com> | 22 | * Scott H Kilau <Scott_Kilau@digi.com> |
23 | * Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> | 23 | * Ananda Venkatarman <mansarov@us.ibm.com> |
24 | * | 24 | * Modifications: |
25 | * 01/19/06: changed jsm_input routine to use the dynamically allocated | ||
26 | * tty_buffer changes. Contributors: Scott Kilau and Ananda V. | ||
25 | ***********************************************************************/ | 27 | ***********************************************************************/ |
26 | #include <linux/tty.h> | 28 | #include <linux/tty.h> |
27 | #include <linux/tty_flip.h> | 29 | #include <linux/tty_flip.h> |
@@ -497,16 +499,15 @@ void jsm_input(struct jsm_channel *ch) | |||
497 | { | 499 | { |
498 | struct jsm_board *bd; | 500 | struct jsm_board *bd; |
499 | struct tty_struct *tp; | 501 | struct tty_struct *tp; |
502 | struct tty_ldisc *ld; | ||
500 | u32 rmask; | 503 | u32 rmask; |
501 | u16 head; | 504 | u16 head; |
502 | u16 tail; | 505 | u16 tail; |
503 | int data_len; | 506 | int data_len; |
504 | unsigned long lock_flags; | 507 | unsigned long lock_flags; |
505 | int flip_len; | 508 | int flip_len = 0; |
506 | int len = 0; | 509 | int len = 0; |
507 | int n = 0; | 510 | int n = 0; |
508 | char *buf = NULL; | ||
509 | char *buf2 = NULL; | ||
510 | int s = 0; | 511 | int s = 0; |
511 | int i = 0; | 512 | int i = 0; |
512 | 513 | ||
@@ -574,56 +575,50 @@ void jsm_input(struct jsm_channel *ch) | |||
574 | 575 | ||
575 | /* | 576 | /* |
576 | * If the rxbuf is empty and we are not throttled, put as much | 577 | * If the rxbuf is empty and we are not throttled, put as much |
577 | * as we can directly into the linux TTY flip buffer. | 578 | * as we can directly into the linux TTY buffer. |
578 | * The jsm_rawreadok case takes advantage of carnal knowledge that | ||
579 | * the char_buf and the flag_buf are next to each other and | ||
580 | * are each of (2 * TTY_FLIPBUF_SIZE) size. | ||
581 | * | 579 | * |
582 | * NOTE: if(!tty->real_raw), the call to ldisc.receive_buf | ||
583 | *actually still uses the flag buffer, so you can't | ||
584 | *use it for input data | ||
585 | */ | 580 | */ |
586 | if (jsm_rawreadok) { | 581 | flip_len = TTY_FLIPBUF_SIZE; |
587 | if (tp->real_raw) | ||
588 | flip_len = MYFLIPLEN; | ||
589 | else | ||
590 | flip_len = 2 * TTY_FLIPBUF_SIZE; | ||
591 | } else | ||
592 | flip_len = TTY_FLIPBUF_SIZE - tp->flip.count; | ||
593 | 582 | ||
594 | len = min(data_len, flip_len); | 583 | len = min(data_len, flip_len); |
595 | len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt); | 584 | len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt); |
585 | ld = tty_ldisc_ref(tp); | ||
596 | 586 | ||
597 | if (len <= 0) { | 587 | /* |
598 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | 588 | * If the DONT_FLIP flag is on, don't flush our buffer, and act |
599 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); | 589 | * like the ld doesn't have any space to put the data right now. |
600 | return; | 590 | */ |
601 | } | 591 | if (test_bit(TTY_DONT_FLIP, &tp->flags)) |
592 | len = 0; | ||
602 | 593 | ||
603 | /* | 594 | /* |
604 | * If we're bypassing flip buffers on rx, we can blast it | 595 | * If we were unable to get a reference to the ld, |
605 | * right into the beginning of the buffer. | 596 | * don't flush our buffer, and act like the ld doesn't |
597 | * have any space to put the data right now. | ||
606 | */ | 598 | */ |
607 | if (jsm_rawreadok) { | 599 | if (!ld) { |
608 | if (tp->real_raw) { | 600 | len = 0; |
609 | if (ch->ch_flags & CH_FLIPBUF_IN_USE) { | ||
610 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, | ||
611 | "JSM - FLIPBUF in use. delaying input\n"); | ||
612 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | ||
613 | return; | ||
614 | } | ||
615 | ch->ch_flags |= CH_FLIPBUF_IN_USE; | ||
616 | buf = ch->ch_bd->flipbuf; | ||
617 | buf2 = NULL; | ||
618 | } else { | ||
619 | buf = tp->flip.char_buf; | ||
620 | buf2 = tp->flip.flag_buf; | ||
621 | } | ||
622 | } else { | 601 | } else { |
623 | buf = tp->flip.char_buf_ptr; | 602 | /* |
624 | buf2 = tp->flip.flag_buf_ptr; | 603 | * If ld doesn't have a pointer to a receive_buf function, |
604 | * flush the data, then act like the ld doesn't have any | ||
605 | * space to put the data right now. | ||
606 | */ | ||
607 | if (!ld->receive_buf) { | ||
608 | ch->ch_r_head = ch->ch_r_tail; | ||
609 | len = 0; | ||
610 | } | ||
625 | } | 611 | } |
626 | 612 | ||
613 | if (len <= 0) { | ||
614 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | ||
615 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); | ||
616 | if (ld) | ||
617 | tty_ldisc_deref(ld); | ||
618 | return; | ||
619 | } | ||
620 | |||
621 | len = tty_buffer_request_room(tp, len); | ||
627 | n = len; | 622 | n = len; |
628 | 623 | ||
629 | /* | 624 | /* |
@@ -638,121 +633,47 @@ void jsm_input(struct jsm_channel *ch) | |||
638 | if (s <= 0) | 633 | if (s <= 0) |
639 | break; | 634 | break; |
640 | 635 | ||
641 | memcpy(buf, ch->ch_rqueue + tail, s); | 636 | /* |
642 | 637 | * If conditions are such that ld needs to see all | |
643 | /* buf2 is only set when port isn't raw */ | 638 | * UART errors, we will have to walk each character |
644 | if (buf2) | 639 | * and error byte and send them to the buffer one at |
645 | memcpy(buf2, ch->ch_equeue + tail, s); | 640 | * a time. |
646 | 641 | */ | |
647 | tail += s; | ||
648 | buf += s; | ||
649 | if (buf2) | ||
650 | buf2 += s; | ||
651 | n -= s; | ||
652 | /* Flip queue if needed */ | ||
653 | tail &= rmask; | ||
654 | } | ||
655 | 642 | ||
656 | /* | ||
657 | * In high performance mode, we don't have to update | ||
658 | * flag_buf or any of the counts or pointers into flip buf. | ||
659 | */ | ||
660 | if (!jsm_rawreadok) { | ||
661 | if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { | 643 | if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { |
662 | for (i = 0; i < len; i++) { | 644 | for (i = 0; i < s; i++) { |
663 | /* | 645 | /* |
664 | * Give the Linux ld the flags in the | 646 | * Give the Linux ld the flags in the |
665 | * format it likes. | 647 | * format it likes. |
666 | */ | 648 | */ |
667 | if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI) | 649 | if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) |
668 | tp->flip.flag_buf_ptr[i] = TTY_BREAK; | 650 | tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK); |
669 | else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE) | 651 | else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) |
670 | tp->flip.flag_buf_ptr[i] = TTY_PARITY; | 652 | tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY); |
671 | else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE) | 653 | else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) |
672 | tp->flip.flag_buf_ptr[i] = TTY_FRAME; | 654 | tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME); |
673 | else | 655 | else |
674 | tp->flip.flag_buf_ptr[i] = TTY_NORMAL; | 656 | tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL); |
675 | } | 657 | } |
676 | } else { | 658 | } else { |
677 | memset(tp->flip.flag_buf_ptr, 0, len); | 659 | tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ; |
678 | } | 660 | } |
679 | 661 | tail += s; | |
680 | tp->flip.char_buf_ptr += len; | 662 | n -= s; |
681 | tp->flip.flag_buf_ptr += len; | 663 | /* Flip queue if needed */ |
682 | tp->flip.count += len; | 664 | tail &= rmask; |
683 | } | ||
684 | else if (!tp->real_raw) { | ||
685 | if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { | ||
686 | for (i = 0; i < len; i++) { | ||
687 | /* | ||
688 | * Give the Linux ld the flags in the | ||
689 | * format it likes. | ||
690 | */ | ||
691 | if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI) | ||
692 | tp->flip.flag_buf_ptr[i] = TTY_BREAK; | ||
693 | else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE) | ||
694 | tp->flip.flag_buf_ptr[i] = TTY_PARITY; | ||
695 | else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE) | ||
696 | tp->flip.flag_buf_ptr[i] = TTY_FRAME; | ||
697 | else | ||
698 | tp->flip.flag_buf_ptr[i] = TTY_NORMAL; | ||
699 | } | ||
700 | } else | ||
701 | memset(tp->flip.flag_buf, 0, len); | ||
702 | } | 665 | } |
703 | 666 | ||
704 | /* | 667 | ch->ch_r_tail = tail & rmask; |
705 | * If we're doing raw reads, jam it right into the | 668 | ch->ch_e_tail = tail & rmask; |
706 | * line disc bypassing the flip buffers. | 669 | jsm_check_queue_flow_control(ch); |
707 | */ | 670 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); |
708 | if (jsm_rawreadok) { | ||
709 | if (tp->real_raw) { | ||
710 | ch->ch_r_tail = tail & rmask; | ||
711 | ch->ch_e_tail = tail & rmask; | ||
712 | |||
713 | jsm_check_queue_flow_control(ch); | ||
714 | |||
715 | /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */ | ||
716 | 671 | ||
717 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | 672 | /* Tell the tty layer its okay to "eat" the data now */ |
673 | tty_flip_buffer_push(tp); | ||
718 | 674 | ||
719 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, | 675 | if (ld) |
720 | "jsm_input. %d real_raw len:%d calling receive_buf for board %d\n", | 676 | tty_ldisc_deref(ld); |
721 | __LINE__, len, ch->ch_bd->boardnum); | ||
722 | tp->ldisc.receive_buf(tp, ch->ch_bd->flipbuf, NULL, len); | ||
723 | |||
724 | /* Allow use of channel flip buffer again */ | ||
725 | spin_lock_irqsave(&ch->ch_lock, lock_flags); | ||
726 | ch->ch_flags &= ~CH_FLIPBUF_IN_USE; | ||
727 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | ||
728 | |||
729 | } else { | ||
730 | ch->ch_r_tail = tail & rmask; | ||
731 | ch->ch_e_tail = tail & rmask; | ||
732 | |||
733 | jsm_check_queue_flow_control(ch); | ||
734 | |||
735 | /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */ | ||
736 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | ||
737 | |||
738 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, | ||
739 | "jsm_input. %d not real_raw len:%d calling receive_buf for board %d\n", | ||
740 | __LINE__, len, ch->ch_bd->boardnum); | ||
741 | |||
742 | tp->ldisc.receive_buf(tp, tp->flip.char_buf, tp->flip.flag_buf, len); | ||
743 | } | ||
744 | } else { | ||
745 | ch->ch_r_tail = tail & rmask; | ||
746 | ch->ch_e_tail = tail & rmask; | ||
747 | |||
748 | jsm_check_queue_flow_control(ch); | ||
749 | |||
750 | spin_unlock_irqrestore(&ch->ch_lock, lock_flags); | ||
751 | |||
752 | jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, | ||
753 | "jsm_input. %d not jsm_read raw okay scheduling flip\n", __LINE__); | ||
754 | tty_schedule_flip(tp); | ||
755 | } | ||
756 | 677 | ||
757 | jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); | 678 | jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); |
758 | } | 679 | } |
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c index d957a3a9edf1..0ef648fa4b2d 100644 --- a/drivers/serial/mcfserial.c +++ b/drivers/serial/mcfserial.c | |||
@@ -350,8 +350,7 @@ static inline void receive_chars(struct mcf_serial *info) | |||
350 | } | 350 | } |
351 | tty_insert_flip_char(tty, ch, flag); | 351 | tty_insert_flip_char(tty, ch, flag); |
352 | } | 352 | } |
353 | 353 | tty_flip_buffer_push(tty); | |
354 | schedule_work(&tty->flip.work); | ||
355 | return; | 354 | return; |
356 | } | 355 | } |
357 | 356 | ||
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index f6704688ee8c..5578a9dd04e8 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c | |||
@@ -3558,10 +3558,16 @@ static void ixj_write_frame(IXJ *j) | |||
3558 | } | 3558 | } |
3559 | /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */ | 3559 | /* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */ |
3560 | if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) { | 3560 | if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) { |
3561 | if(j->write_buffer_rp + cnt == 0 && j->write_buffer_rp + cnt + 1 == 0 && j->write_buffer_rp + cnt + 2 == 0 && | 3561 | if (j->write_buffer_rp[cnt] == 0 && |
3562 | j->write_buffer_rp + cnt + 3 == 0 && j->write_buffer_rp + cnt + 4 == 0 && j->write_buffer_rp + cnt + 5 == 0 && | 3562 | j->write_buffer_rp[cnt + 1] == 0 && |
3563 | j->write_buffer_rp + cnt + 6 == 0 && j->write_buffer_rp + cnt + 7 == 0 && j->write_buffer_rp + cnt + 8 == 0 && | 3563 | j->write_buffer_rp[cnt + 2] == 0 && |
3564 | j->write_buffer_rp + cnt + 9 == 0) { | 3564 | j->write_buffer_rp[cnt + 3] == 0 && |
3565 | j->write_buffer_rp[cnt + 4] == 0 && | ||
3566 | j->write_buffer_rp[cnt + 5] == 0 && | ||
3567 | j->write_buffer_rp[cnt + 6] == 0 && | ||
3568 | j->write_buffer_rp[cnt + 7] == 0 && | ||
3569 | j->write_buffer_rp[cnt + 8] == 0 && | ||
3570 | j->write_buffer_rp[cnt + 9] == 0) { | ||
3565 | /* someone is trying to write silence lets make this a type 0 frame. */ | 3571 | /* someone is trying to write silence lets make this a type 0 frame. */ |
3566 | outb_p(0x00, j->DSPbase + 0x0C); | 3572 | outb_p(0x00, j->DSPbase + 0x0C); |
3567 | outb_p(0x00, j->DSPbase + 0x0D); | 3573 | outb_p(0x00, j->DSPbase + 0x0D); |